youssefleb commited on
Commit
c8b2c9e
·
verified ·
1 Parent(s): 90a835a

Update agent_logic.py

Browse files
Files changed (1) hide show
  1. agent_logic.py +21 -28
agent_logic.py CHANGED
@@ -1,4 +1,4 @@
1
- # agent_logic.py (Now reads prompts from config)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
@@ -7,18 +7,15 @@ import google.generativeai as genai
7
  from anthropic import AsyncAnthropic
8
  from openai import AsyncOpenAI
9
  from personas import PERSONAS_DATA
10
-
11
- # 1. Import all settings from our new config files
12
  import config
13
  from utils import load_prompt
14
  from mcp_servers import AgentCalibrator, BusinessSolutionEvaluator, get_llm_response
15
 
16
- # 2. Load prompts from files AT STARTUP
17
  CLASSIFIER_SYSTEM_PROMPT = load_prompt(config.PROMPT_FILES["classifier"])
18
  HOMOGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_homogeneous"])
19
  HETEROGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_heterogeneous"])
20
 
21
- # --- 3. Define the Specialist Agent Architectures ---
22
  class Baseline_Single_Agent:
23
  def __init__(self, api_clients: dict):
24
  self.gemini_client = api_clients.get("Gemini")
@@ -32,7 +29,7 @@ class Baseline_Single_Agent:
32
  class Baseline_Static_Homogeneous:
33
  def __init__(self, api_clients: dict):
34
  self.api_clients = {name: client for name, client in api_clients.items() if client}
35
- self.gemini_client = api_clients.get("Gemini") # Manager is always Gemini
36
 
37
  async def solve(self, problem: str, persona_prompt: str):
38
  print(f"--- (Specialist Team: Homogeneous) solving (live)... ---")
@@ -48,7 +45,6 @@ class Baseline_Static_Homogeneous:
48
 
49
  responses = await asyncio.gather(*tasks)
50
 
51
- # 4. Use the loaded manager prompt
52
  manager_system_prompt = HOMOGENEOUS_MANAGER_PROMPT
53
  reports_str = "\n\n".join(f"Report from Team Member {i+1}:\n{resp}" for i, resp in enumerate(responses))
54
  manager_user_prompt = f"Original Problem: {problem}\n\n{reports_str}\n\nPlease synthesize these reports into one final, comprehensive solution."
@@ -58,7 +54,7 @@ class Baseline_Static_Homogeneous:
58
  class Baseline_Static_Heterogeneous:
59
  def __init__(self, api_clients: dict):
60
  self.api_clients = api_clients
61
- self.gemini_client = api_clients.get("Gemini") # Manager is always Gemini
62
 
63
  async def solve(self, problem: str, team_plan: dict):
64
  print(f"--- (Specialist Team: Heterogeneous) solving (live)... ---")
@@ -68,7 +64,7 @@ class Baseline_Static_Heterogeneous:
68
  tasks = []
69
  for role, config_data in team_plan.items():
70
  llm_name = config_data["llm"]
71
- persona_key = config_data["persona"] # e.g., "Culture_5"
72
  client = self.api_clients.get(llm_name)
73
 
74
  if not client:
@@ -82,7 +78,6 @@ class Baseline_Static_Heterogeneous:
82
 
83
  responses = await asyncio.gather(*tasks)
84
 
85
- # 5. Use the loaded manager prompt
86
  manager_system_prompt = HETEROGENEOUS_MANAGER_PROMPT
87
  reports_str = "\n\n".join(f"Report from {team_plan[role]['llm']} (as {role}):\n{resp}" for (role, resp) in zip(team_plan.keys(), responses))
88
  manager_user_prompt = f"Original Problem: {problem}\n\n{reports_str}\n\nPlease synthesize these specialist reports into one final, comprehensive solution."
@@ -94,17 +89,11 @@ class StrategicSelectorAgent:
94
 
95
  def __init__(self, api_keys: Dict[str, Optional[str]]):
96
  self.api_keys = api_keys
97
-
98
- self.api_clients = {
99
- "Gemini": None,
100
- "Anthropic": None,
101
- "SambaNova": None
102
- }
103
 
104
  if api_keys.get("google"):
105
  try:
106
  genai.configure(api_key=api_keys["google"])
107
- # 6. Read model name from config
108
  self.api_clients["Gemini"] = genai.GenerativeModel(config.MODELS["Gemini"]["default"])
109
  except Exception as e:
110
  print(f"Warning: Failed to initialize Gemini client. Error: {e}")
@@ -118,10 +107,7 @@ class StrategicSelectorAgent:
118
  if api_keys.get("sambanova"):
119
  try:
120
  base_url = os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
121
- self.api_clients["SambaNova"] = AsyncOpenAI(
122
- api_key=api_keys["sambanova"],
123
- base_url=base_url
124
- )
125
  except Exception as e:
126
  print(f"Warning: Failed to initialize SambaNova client. Error: {e}")
127
 
@@ -135,14 +121,11 @@ class StrategicSelectorAgent:
135
  self.homo_team = Baseline_Static_Homogeneous(self.api_clients)
136
  self.hetero_team = Baseline_Static_Heterogeneous(self.api_clients)
137
 
138
- # 7. Check if prompts loaded correctly
139
  if "ERROR:" in CLASSIFIER_SYSTEM_PROMPT:
140
  raise FileNotFoundError(CLASSIFIER_SYSTEM_PROMPT)
141
 
142
  async def _classify_problem(self, problem: str) -> AsyncGenerator[str, None]:
143
  yield "Classifying problem archetype (live)..."
144
-
145
- # 8. Use the loaded classifier prompt
146
  classification = await get_llm_response(
147
  "Gemini",
148
  self.api_clients["Gemini"],
@@ -167,7 +150,6 @@ class StrategicSelectorAgent:
167
  solution_draft = ""
168
 
169
  try:
170
- # 9. Read default persona from config
171
  default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
172
 
173
  if classification == "Direct_Procedure" or classification == "Holistic_Abstract_Reasoning":
@@ -180,7 +162,17 @@ class StrategicSelectorAgent:
180
 
181
  elif classification == "Cognitive_Labyrinth":
182
  yield "Deploying: Static Heterogeneous Team (Cognitive Diversity)..."
183
- team_plan = await self.calibrator.calibrate_team(problem)
 
 
 
 
 
 
 
 
 
 
184
  yield f"Calibration complete. Best Team: {json.dumps({k: v['llm'] for k, v in team_plan.items()})}"
185
  solution_draft = await self.hetero_team.solve(problem, team_plan)
186
 
@@ -199,10 +191,11 @@ class StrategicSelectorAgent:
199
  scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
200
  yield f"Initial Score: {scores}"
201
 
202
- yield "Skipping self-correction for Milestone 4..."
 
203
 
204
  await asyncio.sleep(0.5)
205
- yield "Milestone 4 Complete. All systems are live and stateless."
206
 
207
  solution_draft_json_safe = json.dumps(solution_draft)
208
  yield f"FINAL: {{\"text\": {solution_draft_json_safe}, \"audio\": null}}"
 
1
+ # agent_logic.py (Now with error logging)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
 
7
  from anthropic import AsyncAnthropic
8
  from openai import AsyncOpenAI
9
  from personas import PERSONAS_DATA
 
 
10
  import config
11
  from utils import load_prompt
12
  from mcp_servers import AgentCalibrator, BusinessSolutionEvaluator, get_llm_response
13
 
 
14
  CLASSIFIER_SYSTEM_PROMPT = load_prompt(config.PROMPT_FILES["classifier"])
15
  HOMOGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_homogeneous"])
16
  HETEROGENEOUS_MANAGER_PROMPT = load_prompt(config.PROMPT_FILES["manager_heterogeneous"])
17
 
18
+ # --- (Specialist Agent classes are unchanged) ---
19
  class Baseline_Single_Agent:
20
  def __init__(self, api_clients: dict):
21
  self.gemini_client = api_clients.get("Gemini")
 
29
  class Baseline_Static_Homogeneous:
30
  def __init__(self, api_clients: dict):
31
  self.api_clients = {name: client for name, client in api_clients.items() if client}
32
+ self.gemini_client = api_clients.get("Gemini")
33
 
34
  async def solve(self, problem: str, persona_prompt: str):
35
  print(f"--- (Specialist Team: Homogeneous) solving (live)... ---")
 
45
 
46
  responses = await asyncio.gather(*tasks)
47
 
 
48
  manager_system_prompt = HOMOGENEOUS_MANAGER_PROMPT
49
  reports_str = "\n\n".join(f"Report from Team Member {i+1}:\n{resp}" for i, resp in enumerate(responses))
50
  manager_user_prompt = f"Original Problem: {problem}\n\n{reports_str}\n\nPlease synthesize these reports into one final, comprehensive solution."
 
54
  class Baseline_Static_Heterogeneous:
55
  def __init__(self, api_clients: dict):
56
  self.api_clients = api_clients
57
+ self.gemini_client = api_clients.get("Gemini")
58
 
59
  async def solve(self, problem: str, team_plan: dict):
60
  print(f"--- (Specialist Team: Heterogeneous) solving (live)... ---")
 
64
  tasks = []
65
  for role, config_data in team_plan.items():
66
  llm_name = config_data["llm"]
67
+ persona_key = config_data["persona"]
68
  client = self.api_clients.get(llm_name)
69
 
70
  if not client:
 
78
 
79
  responses = await asyncio.gather(*tasks)
80
 
 
81
  manager_system_prompt = HETEROGENEOUS_MANAGER_PROMPT
82
  reports_str = "\n\n".join(f"Report from {team_plan[role]['llm']} (as {role}):\n{resp}" for (role, resp) in zip(team_plan.keys(), responses))
83
  manager_user_prompt = f"Original Problem: {problem}\n\n{reports_str}\n\nPlease synthesize these specialist reports into one final, comprehensive solution."
 
89
 
90
  def __init__(self, api_keys: Dict[str, Optional[str]]):
91
  self.api_keys = api_keys
92
+ self.api_clients = { "Gemini": None, "Anthropic": None, "SambaNova": None }
 
 
 
 
 
93
 
94
  if api_keys.get("google"):
95
  try:
96
  genai.configure(api_key=api_keys["google"])
 
97
  self.api_clients["Gemini"] = genai.GenerativeModel(config.MODELS["Gemini"]["default"])
98
  except Exception as e:
99
  print(f"Warning: Failed to initialize Gemini client. Error: {e}")
 
107
  if api_keys.get("sambanova"):
108
  try:
109
  base_url = os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
110
+ self.api_clients["SambaNova"] = AsyncOpenAI(api_key=api_keys["sambanova"], base_url=base_url)
 
 
 
111
  except Exception as e:
112
  print(f"Warning: Failed to initialize SambaNova client. Error: {e}")
113
 
 
121
  self.homo_team = Baseline_Static_Homogeneous(self.api_clients)
122
  self.hetero_team = Baseline_Static_Heterogeneous(self.api_clients)
123
 
 
124
  if "ERROR:" in CLASSIFIER_SYSTEM_PROMPT:
125
  raise FileNotFoundError(CLASSIFIER_SYSTEM_PROMPT)
126
 
127
  async def _classify_problem(self, problem: str) -> AsyncGenerator[str, None]:
128
  yield "Classifying problem archetype (live)..."
 
 
129
  classification = await get_llm_response(
130
  "Gemini",
131
  self.api_clients["Gemini"],
 
150
  solution_draft = ""
151
 
152
  try:
 
153
  default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
154
 
155
  if classification == "Direct_Procedure" or classification == "Holistic_Abstract_Reasoning":
 
162
 
163
  elif classification == "Cognitive_Labyrinth":
164
  yield "Deploying: Static Heterogeneous Team (Cognitive Diversity)..."
165
+
166
+ # --- NEW: Capture errors from calibration ---
167
+ team_plan, calibration_errors = await self.calibrator.calibrate_team(problem)
168
+
169
+ # --- NEW: Yield any calibration errors ---
170
+ if calibration_errors:
171
+ yield "--- CALIBRATION WARNINGS ---"
172
+ for err in calibration_errors:
173
+ yield err
174
+ yield "-----------------------------"
175
+
176
  yield f"Calibration complete. Best Team: {json.dumps({k: v['llm'] for k, v in team_plan.items()})}"
177
  solution_draft = await self.hetero_team.solve(problem, team_plan)
178
 
 
191
  scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
192
  yield f"Initial Score: {scores}"
193
 
194
+ # --- This is where Milestone 5 will go ---
195
+ yield "Skipping self-correction for now..."
196
 
197
  await asyncio.sleep(0.5)
198
+ yield "Milestone 4 (with error logging) Complete."
199
 
200
  solution_draft_json_safe = json.dumps(solution_draft)
201
  yield f"FINAL: {{\"text\": {solution_draft_json_safe}, \"audio\": null}}"