youssefleb commited on
Commit
e364ce0
·
verified ·
1 Parent(s): be02de2

Update agent_logic.py

Browse files
Files changed (1) hide show
  1. agent_logic.py +87 -87
agent_logic.py CHANGED
@@ -1,4 +1,4 @@
1
- # agent_logic.py (Milestone 5 - FINAL & ROBUST)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
@@ -6,13 +6,14 @@ import os
6
  import google.generativeai as genai
7
  from anthropic import AsyncAnthropic
8
  from openai import AsyncOpenAI
 
9
  from personas import PERSONAS_DATA
10
  import config
11
  from utils import load_prompt
12
  from mcp_servers import AgentCalibrator, BusinessSolutionEvaluator, get_llm_response
13
  from self_correction import SelfCorrector
14
- from async_generator import async_generator, yield_
15
 
 
16
  CLASSIFIER_SYSTEM_PROMPT = load_prompt(config.PROMPT_FILES["classifier"])
17
  HOMOGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_homogeneous"])
18
  HETEROGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_heterogeneous"])
@@ -79,8 +80,7 @@ class StrategicSelectorAgent:
79
  except Exception as e: print(f"Warning: Anthropic init failed: {e}")
80
  if api_keys.get("sambanova"):
81
  try:
82
- base_url = os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
83
- self.api_clients["SambaNova"] = AsyncOpenAI(api_key=api_keys["sambanova"], base_url=base_url)
84
  except Exception as e: print(f"Warning: SambaNova init failed: {e}")
85
 
86
  if not self.api_clients["Gemini"]:
@@ -102,84 +102,6 @@ class StrategicSelectorAgent:
102
  classification = classification.strip().replace("\"", "")
103
  yield f"Diagnosis: {classification}"
104
 
105
- @async_generator
106
- async def _generate_and_evaluate(self, problem: str, classification: str, correction_prompt: Optional[str] = None):
107
- solution_draft = ""
108
- team_plan = {}
109
-
110
- if correction_prompt:
111
- problem = f"{problem}\n\n{correction_prompt}"
112
-
113
- default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
114
-
115
- if classification == "Direct_Procedure" or classification == "Holistic_Abstract_Reasoning":
116
- if not correction_prompt:
117
- await yield_("Deploying: Baseline Single Agent (Simplicity Hypothesis)...")
118
- solution_draft = await self.single_agent.solve(problem, default_persona)
119
-
120
- elif classification == "Local_Geometric_Procedural":
121
- if not correction_prompt:
122
- await yield_("Deploying: Static Homogeneous Team (Expert Anomaly)...")
123
- solution_draft = await self.homo_team.solve(problem, default_persona)
124
-
125
- elif classification == "Cognitive_Labyrinth":
126
- if not correction_prompt:
127
- await yield_("Deploying: Static Heterogeneous Team (Cognitive Diversity)...")
128
- team_plan, calibration_errors = await self.calibrator.calibrate_team(problem)
129
- if calibration_errors:
130
- await yield_("--- CALIBRATION WARNINGS ---")
131
- for err in calibration_errors: await yield_(err)
132
- await yield_("-----------------------------")
133
- await yield_(f"Calibration complete. Best Team: {json.dumps({k: v['llm'] for k, v in team_plan.items()})}")
134
- self.current_team_plan = team_plan
135
-
136
- # Reuse the calibrated team
137
- solution_draft = await self.hetero_team.solve(problem, self.current_team_plan)
138
-
139
- else:
140
- if not correction_prompt:
141
- await yield_(f"Diagnosis '{classification}' is unknown. Defaulting to Single Agent.")
142
- solution_draft = await self.single_agent.solve(problem, default_persona)
143
-
144
- if "Error generating response" in solution_draft:
145
- raise Exception(f"The specialist team failed to generate a solution. Error: {solution_draft}")
146
-
147
- await yield_(f"Draft solution received: '{solution_draft[:60]}...'")
148
-
149
- # --- EVALUATE ---
150
- await yield_("Evaluating final draft (live)...")
151
- v_fitness_json = await self.evaluator.evaluate(problem, solution_draft)
152
-
153
- # --- NEW: Robust Normalization of Evaluation Data ---
154
- # This block fixes the "list object has no attribute get" error
155
- normalized_fitness = {}
156
- if isinstance(v_fitness_json, dict):
157
- for k, v in v_fitness_json.items():
158
- if isinstance(v, dict):
159
- normalized_fitness[k] = v
160
- elif isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):
161
- # If the LLM wrapped the object in a list, unwrap it
162
- normalized_fitness[k] = v[0]
163
- else:
164
- # Fallback for unexpected structure
165
- normalized_fitness[k] = {'score': 0, 'justification': str(v)}
166
- else:
167
- # Fallback if the whole thing isn't a dict
168
- await yield_(f"Warning: Invalid JSON structure from Judge: {type(v_fitness_json)}")
169
- normalized_fitness = {k: {'score': 0, 'justification': "Invalid JSON structure"} for k in ["Novelty", "Usefulness_Feasibility", "Flexibility", "Elaboration", "Cultural_Appropriateness"]}
170
-
171
- v_fitness_json = normalized_fitness
172
- # ----------------------------------------------------
173
-
174
- scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
175
- await yield_(f"Evaluation Score: {scores}")
176
-
177
- # Debug info if score is low
178
- if scores.get('Novelty', 0) <= 1:
179
- await yield_(f"⚠️ Low Score Detected. Reason: {v_fitness_json.get('Novelty', {}).get('justification', 'Unknown')}")
180
-
181
- return solution_draft, v_fitness_json, scores
182
-
183
  async def solve(self, problem: str) -> AsyncGenerator[str, None]:
184
  classification_generator = self._classify_problem(problem)
185
  classification = ""
@@ -192,11 +114,14 @@ class StrategicSelectorAgent:
192
  yield "Classifier failed. Defaulting to Single Agent."
193
  classification = "Direct_Procedure"
194
 
195
- solution_draft, v_fitness_json, scores = "", {}, {}
 
 
196
 
197
  try:
198
  # --- MAIN LOOP (Self-Correction) ---
199
  for i in range(2):
 
200
  current_problem = problem
201
  if i > 0:
202
  yield f"--- (Loop {i}) Score is too low. Initiating Self-Correction... ---"
@@ -204,12 +129,87 @@ class StrategicSelectorAgent:
204
  yield f"Diagnosis: {correction_prompt_text.splitlines()[3].strip()}"
205
  current_problem = f"{problem}\n\n{correction_prompt_text}"
206
 
207
- loop_generator = self._generate_and_evaluate(current_problem, classification, None if i==0 else "Correcting...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
 
209
- async for status_update in loop_generator:
210
- yield status_update
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
- solution_draft, v_fitness_json, scores = await loop_generator.aclose() # Wait for return
 
213
 
214
  # Check if we passed
215
  if self.corrector.is_good_enough(scores):
 
1
+ # agent_logic.py (FINAL, Robust Version)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
 
6
  import google.generativeai as genai
7
  from anthropic import AsyncAnthropic
8
  from openai import AsyncOpenAI
9
+ import re # <-- Added: For score parsing fix
10
  from personas import PERSONAS_DATA
11
  import config
12
  from utils import load_prompt
13
  from mcp_servers import AgentCalibrator, BusinessSolutionEvaluator, get_llm_response
14
  from self_correction import SelfCorrector
 
15
 
16
+ # (Configuration and Manager Prompts are loaded here)
17
  CLASSIFIER_SYSTEM_PROMPT = load_prompt(config.PROMPT_FILES["classifier"])
18
  HOMOGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_homogeneous"])
19
  HETEROGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_heterogeneous"])
 
80
  except Exception as e: print(f"Warning: Anthropic init failed: {e}")
81
  if api_keys.get("sambanova"):
82
  try:
83
+ self.api_clients["SambaNova"] = AsyncOpenAI(api_key=api_keys["sambanova"], base_url=os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1"))
 
84
  except Exception as e: print(f"Warning: SambaNova init failed: {e}")
85
 
86
  if not self.api_clients["Gemini"]:
 
102
  classification = classification.strip().replace("\"", "")
103
  yield f"Diagnosis: {classification}"
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  async def solve(self, problem: str) -> AsyncGenerator[str, None]:
106
  classification_generator = self._classify_problem(problem)
107
  classification = ""
 
114
  yield "Classifier failed. Defaulting to Single Agent."
115
  classification = "Direct_Procedure"
116
 
117
+ solution_draft = ""
118
+ v_fitness_json = {}
119
+ scores = {}
120
 
121
  try:
122
  # --- MAIN LOOP (Self-Correction) ---
123
  for i in range(2):
124
+
125
  current_problem = problem
126
  if i > 0:
127
  yield f"--- (Loop {i}) Score is too low. Initiating Self-Correction... ---"
 
129
  yield f"Diagnosis: {correction_prompt_text.splitlines()[3].strip()}"
130
  current_problem = f"{problem}\n\n{correction_prompt_text}"
131
 
132
+ # --- DEPLOY ---
133
+ default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
134
+
135
+ if classification == "Direct_Procedure" or classification == "Holistic_Abstract_Reasoning":
136
+ if i == 0: yield "Deploying: Baseline Single Agent (Simplicity Hypothesis)..."
137
+ solution_draft = await self.single_agent.solve(current_problem, default_persona)
138
+
139
+ elif classification == "Local_Geometric_Procedural":
140
+ if i == 0: yield "Deploying: Static Homogeneous Team (Expert Anomaly)..."
141
+ solution_draft = await self.homo_team.solve(current_problem, default_persona)
142
+
143
+ elif classification == "Cognitive_Labyrinth":
144
+ if i == 0:
145
+ yield "Deploying: Static Heterogeneous Team (Cognitive Diversity)..."
146
+ team_plan, calibration_errors = await self.calibrator.calibrate_team(current_problem)
147
+ if calibration_errors:
148
+ yield "--- CALIBRATION WARNINGS ---"
149
+ for err in calibration_errors: yield err
150
+ yield "-----------------------------"
151
+ yield f"Calibration complete. Best Team: {json.dumps({k: v['llm'] for k, v in team_plan.items()})}"
152
+ self.current_team_plan = team_plan
153
+
154
+ solution_draft = await self.hetero_team.solve(current_problem, self.current_team_plan)
155
 
156
+ else:
157
+ if i == 0: yield f"Diagnosis '{classification}' is unknown. Defaulting to Single Agent."
158
+ solution_draft = await self.single_agent.solve(current_problem, default_persona)
159
+
160
+ if "Error generating response" in solution_draft:
161
+ raise Exception(f"The specialist team failed to generate a solution. Error: {solution_draft}")
162
+
163
+ yield f"Draft solution received: '{solution_draft[:60]}...'"
164
+
165
+ # --- EVALUATE ---
166
+ yield "Evaluating draft (live)..."
167
+ v_fitness_json = await self.evaluator.evaluate(current_problem, solution_draft)
168
+
169
+ # --- Robust Normalization of Evaluation Data ---
170
+ normalized_fitness = {}
171
+ if isinstance(v_fitness_json, dict):
172
+ for k, v in v_fitness_json.items():
173
+ if isinstance(v, dict):
174
+ # Standard format: {"score": 4, "justification": "..."}
175
+ score_value = v.get('score')
176
+ justification_value = v.get('justification', str(v))
177
+ elif isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):
178
+ # Handles list structure: [{"score": 4, "justification": "..."}]
179
+ score_value = v[0].get('score')
180
+ justification_value = v[0].get('justification', str(v[0]))
181
+ else:
182
+ # Fallback for unexpected structure
183
+ score_value = 0
184
+ justification_value = str(v)
185
+
186
+ # FIX: Extract the integer score from the string (e.g., "4/5" -> 4)
187
+ if isinstance(score_value, str):
188
+ try:
189
+ score_value = int(re.search(r'\d+', score_value).group())
190
+ except:
191
+ score_value = 0
192
+
193
+ # Ensure score is an integer
194
+ try:
195
+ score_value = int(score_value)
196
+ except (ValueError, TypeError):
197
+ score_value = 0
198
+
199
+ normalized_fitness[k] = {'score': score_value, 'justification': justification_value}
200
+
201
+ else:
202
+ # Fallback if the whole thing isn't a dict
203
+ normalized_fitness = {k: {'score': 0, 'justification': "Invalid JSON structure"} for k in ["Novelty", "Usefulness_Feasibility", "Flexibility", "Elaboration", "Cultural_Appropriateness"]}
204
+
205
+ v_fitness_json = normalized_fitness
206
+ # ----------------------------------------------------
207
+
208
+ scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
209
+ yield f"Evaluation Score: {scores}"
210
 
211
+ if scores.get('Novelty', 0) <= 1:
212
+ yield f"⚠️ Low Score Detected. Reason: {v_fitness_json.get('Novelty', {}).get('justification', 'Unknown')}"
213
 
214
  # Check if we passed
215
  if self.corrector.is_good_enough(scores):