Add sandbox
Browse files- app.py +32 -15
- prompts/devstral_coding_prompt.py +2 -0
- utils/utils.py +15 -0
app.py
CHANGED
|
@@ -4,7 +4,8 @@ import requests
|
|
| 4 |
import json
|
| 5 |
import os
|
| 6 |
from utils.google_genai_llm import get_response, generate_with_gemini
|
| 7 |
-
from utils.utils import parse_json_codefences
|
|
|
|
| 8 |
from prompts.requirements_gathering import requirements_gathering_system_prompt
|
| 9 |
from prompts.planning import hf_query_gen_prompt, hf_context_gen_prompt
|
| 10 |
from prompts.devstral_coding_prompt import devstral_code_gen_sys_prompt, devstral_code_gen_user_prompt
|
|
@@ -198,7 +199,7 @@ def process_user_input(message, history, uploaded_files, file_cache):
|
|
| 198 |
)
|
| 199 |
|
| 200 |
# Get AI response
|
| 201 |
-
ai_response =
|
| 202 |
|
| 203 |
return ai_response, file_cache
|
| 204 |
|
|
@@ -348,7 +349,21 @@ def generate_code_with_devstral(plan_text, history, file_cache):
|
|
| 348 |
return "β **Error:** No response received from Devstral model."
|
| 349 |
except Exception as e:
|
| 350 |
return f"β **Error:** {str(e)}"
|
| 351 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 352 |
# Custom CSS for a sleek design
|
| 353 |
custom_css = """
|
| 354 |
.gradio-container {
|
|
@@ -447,23 +462,20 @@ with gr.Blocks(css=custom_css, title="Data Science Requirements Gathering Agent"
|
|
| 447 |
elem_classes=["chat-container"]
|
| 448 |
)
|
| 449 |
|
| 450 |
-
plan_output = gr.
|
| 451 |
label="Generated Plan",
|
| 452 |
-
interactive=False,
|
| 453 |
visible=True,
|
| 454 |
-
|
| 455 |
-
max_lines=20
|
| 456 |
)
|
| 457 |
|
| 458 |
-
code_output = gr.
|
| 459 |
label="Generated Code",
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
)
|
| 466 |
-
|
| 467 |
with gr.Row():
|
| 468 |
with gr.Column(scale=4):
|
| 469 |
msg = gr.Textbox(
|
|
@@ -520,6 +532,7 @@ with gr.Blocks(css=custom_css, title="Data Science Requirements Gathering Agent"
|
|
| 520 |
with gr.Column():
|
| 521 |
plan_btn = gr.Button("Generate Plan π", variant="secondary", elem_classes=["btn-secondary"], size="lg")
|
| 522 |
code_btn = gr.Button("Generate Code π»", variant="secondary", elem_classes=["btn-secondary"], size="lg")
|
|
|
|
| 523 |
|
| 524 |
# State for conversation history and file cache
|
| 525 |
chat_history = gr.State([])
|
|
@@ -561,7 +574,11 @@ with gr.Blocks(css=custom_css, title="Data Science Requirements Gathering Agent"
|
|
| 561 |
inputs=[plan_output, chat_history, file_cache],
|
| 562 |
outputs=[code_output]
|
| 563 |
)
|
| 564 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 565 |
file_upload.change(
|
| 566 |
lambda files: files,
|
| 567 |
inputs=[file_upload],
|
|
|
|
| 4 |
import json
|
| 5 |
import os
|
| 6 |
from utils.google_genai_llm import get_response, generate_with_gemini
|
| 7 |
+
from utils.utils import parse_json_codefences, parse_python_codefences
|
| 8 |
+
from utils.code_sandbox import code_eval
|
| 9 |
from prompts.requirements_gathering import requirements_gathering_system_prompt
|
| 10 |
from prompts.planning import hf_query_gen_prompt, hf_context_gen_prompt
|
| 11 |
from prompts.devstral_coding_prompt import devstral_code_gen_sys_prompt, devstral_code_gen_user_prompt
|
|
|
|
| 199 |
)
|
| 200 |
|
| 201 |
# Get AI response
|
| 202 |
+
ai_response = generate_with_gemini(formatted_prompt, purpose="REQUIREMENTS_GATHERING")
|
| 203 |
|
| 204 |
return ai_response, file_cache
|
| 205 |
|
|
|
|
| 349 |
return "β **Error:** No response received from Devstral model."
|
| 350 |
except Exception as e:
|
| 351 |
return f"β **Error:** {str(e)}"
|
| 352 |
+
def execute_code(code_output):
|
| 353 |
+
try:
|
| 354 |
+
code = parse_python_codefences(code_output)
|
| 355 |
+
print(code)
|
| 356 |
+
result = code_eval(code)
|
| 357 |
+
if isinstance(result, dict):
|
| 358 |
+
result_str = json.dumps(result, indent=4)
|
| 359 |
+
elif isinstance(result, list):
|
| 360 |
+
result_str = '\n'.join(str(x) for x in result)
|
| 361 |
+
else:
|
| 362 |
+
result_str = str(result)
|
| 363 |
+
return result_str
|
| 364 |
+
except Exception as e:
|
| 365 |
+
return f"β **Error:** {str(e)}"
|
| 366 |
+
|
| 367 |
# Custom CSS for a sleek design
|
| 368 |
custom_css = """
|
| 369 |
.gradio-container {
|
|
|
|
| 462 |
elem_classes=["chat-container"]
|
| 463 |
)
|
| 464 |
|
| 465 |
+
plan_output = gr.Markdown(
|
| 466 |
label="Generated Plan",
|
|
|
|
| 467 |
visible=True,
|
| 468 |
+
max_height=150,
|
|
|
|
| 469 |
)
|
| 470 |
|
| 471 |
+
code_output = gr.Markdown(
|
| 472 |
label="Generated Code",
|
| 473 |
+
visible=True,max_height=150,
|
| 474 |
+
)
|
| 475 |
+
execution_output = gr.Markdown(
|
| 476 |
+
label="Execution Output",
|
| 477 |
+
visible=True,max_height=150,
|
| 478 |
)
|
|
|
|
| 479 |
with gr.Row():
|
| 480 |
with gr.Column(scale=4):
|
| 481 |
msg = gr.Textbox(
|
|
|
|
| 532 |
with gr.Column():
|
| 533 |
plan_btn = gr.Button("Generate Plan π", variant="secondary", elem_classes=["btn-secondary"], size="lg")
|
| 534 |
code_btn = gr.Button("Generate Code π»", variant="secondary", elem_classes=["btn-secondary"], size="lg")
|
| 535 |
+
execute_code_btn = gr.Button("Execute Code π", variant="primary", elem_classes=["btn-primary"], size="lg")
|
| 536 |
|
| 537 |
# State for conversation history and file cache
|
| 538 |
chat_history = gr.State([])
|
|
|
|
| 574 |
inputs=[plan_output, chat_history, file_cache],
|
| 575 |
outputs=[code_output]
|
| 576 |
)
|
| 577 |
+
execute_code_btn.click(
|
| 578 |
+
execute_code,
|
| 579 |
+
inputs=[code_output],
|
| 580 |
+
outputs=[execution_output]
|
| 581 |
+
)
|
| 582 |
file_upload.change(
|
| 583 |
lambda files: files,
|
| 584 |
inputs=[file_upload],
|
prompts/devstral_coding_prompt.py
CHANGED
|
@@ -12,4 +12,6 @@ devstral_code_gen_user_prompt ="""
|
|
| 12 |
|
| 13 |
**Context for Plan Steps**
|
| 14 |
{context}
|
|
|
|
|
|
|
| 15 |
"""
|
|
|
|
| 12 |
|
| 13 |
**Context for Plan Steps**
|
| 14 |
{context}
|
| 15 |
+
|
| 16 |
+
Just return the full execution code block in a python codefence as shown below without any explanation or suffix or prefix text.
|
| 17 |
"""
|
utils/utils.py
CHANGED
|
@@ -21,3 +21,18 @@ def parse_json_codefences(text: str) -> List[Any]:
|
|
| 21 |
except json.JSONDecodeError as e:
|
| 22 |
print(f"β οΈ Failed to parse JSON block:\n{json_str}\nError: {e}")
|
| 23 |
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
except json.JSONDecodeError as e:
|
| 22 |
print(f"β οΈ Failed to parse JSON block:\n{json_str}\nError: {e}")
|
| 23 |
return results
|
| 24 |
+
|
| 25 |
+
def parse_python_codefences(text: str) -> List[str]:
|
| 26 |
+
"""
|
| 27 |
+
Extracts all ```python ... ``` code blocks from `text`
|
| 28 |
+
"""
|
| 29 |
+
# Regex to match ```python ... ``` fences (non-greedy, multiline)
|
| 30 |
+
pattern = re.compile(r"```python\s+(.*?)\s+```", re.DOTALL | re.IGNORECASE)
|
| 31 |
+
results = ""
|
| 32 |
+
|
| 33 |
+
for match in pattern.finditer(text):
|
| 34 |
+
python_str = match.group(1).strip()
|
| 35 |
+
results = python_str
|
| 36 |
+
|
| 37 |
+
return results
|
| 38 |
+
|