This is for the agent workflow project.
I had a bug in the GUI where after you used the run button to test a proc agent, it cached the function for performance reasons and just reused that function. This means to test changed to a proc function you have to save and exit the app and then restart and get back to the proc agent and do that after you tested each change.
The solution is to pass down an optional parameter to tell it to not do that. Being optional means it does not break anything that is using the default value already.
# The only required change is adding ', force_recompile: bool = False'
to the function definition
def exec_proc_agent(function_name: str, step_params: Dict[str, Any], function_def: str, force_recompile: bool = False) -> tuple[bytes, Dict[str, Dict[str, Union[int, str]]]]:
spacing = depth_manager.get_spacing()
logging.info("%sStarting %s" % (spacing, function_name))
logging.debug("%s******** \n step_params%s" % (spacing, step_params))
result = b'' # Initialize result as empty bytes
status = {"status": {"value": 1, "reason": "Function execution not attempted"}}
try:
# The second change is modifying this 'if' statement
# This one line now handles both the old and new logic
if force_recompile or function_name not in globals():
if force_recompile:
logging.info("%sForce recompile requested for function %s" % (spacing, function_name))
else:
logging.info("%sCreating function %s for the first time" % (spacing, function_name))
logging.debug("%s******** Function definition:\n%s" % (spacing, function_def))
# Define the function dynamically
exec(function_def, globals())
else:
logging.info("%sUsing existing cached function %s" % (spacing, function_name))
func = globals()[function_name]
Now, to get the value passed down we had to pass it in the function that calls this function.
def exec_workflow(workflow: Dict[str, Any], config: Dict[str, Any], cli_args: Dict[str, Any],results, force_recompile: bool = False)->bytes:
with depth_manager.step() as (depth, spacing):
logging.info(f"{spacing}Executing workflow at depth {depth}")
logging.debug(f"{spacing}cli_args: {cli_args}")
try:
validate_workflow(workflow, config)
except ValueError as e:
logging.error(f"{spacing}Workflow validation failed: {str(e)}")
raise
steps = prepare_for_steps(workflow, results)
while results['step_index'] < len(steps):
start_time = time.perf_counter()
step, agent_name, agent_config, scoped_params, step_params = process_vars(steps, config, cli_args, results)
logging.info(f"{spacing}Executing step: {agent_name}, type : {agent_config['type']}")
try:
if agent_config['type'] == 'template':
result, status = build_template(agent_config.get('prompt', ''), scoped_params)
elif agent_config['type'] == 'proc':
result, status = exec_proc_agent(agent_config['function'], step_params, agent_config['function_def'], force_recompile)
elif agent_config['type'] == 'workflow':
nested_cli_args, nested_workflow = get_nested_args (step, scoped_params, spacing, agent_name, config, cli_args)
result, status = exec_workflow(nested_workflow, config, nested_cli_args, {}, force_recompile)
else: #unknown agent
result = b''
status = {"status": {"value": 1, "reason": f"Unknown agent type: {agent_config['type']}"}}
And one higher level requires this to be passed down
def exec_agent(agent: Dict[str, Any], agent_name: str, config: Dict[str, Any], cli_args: Dict[str, Any],results, force_recompile: bool = False)->bytes:
# If the agent has no steps, promote it to a temporary workflow
if not agent.get('type') in ['workflow']:
agent = create_temp_workflow(agent_name, agent, config, cli_args)
return exec_workflow(agent, config, cli_args, results, force_recompile)
Then in the ui_run_modal file we had to change the one field to pass this in where we needed it.
try:
final_result_tape, final_status = self.core_lib["exec_agent"](
agent=agent_to_run,
agent_name=self.agent_name,
config=temp_config,
cli_args=workflow_inputs,
results={},
force_recompile=True
)
And now we can see the right thing happening in the log tab of the run modal.
2025-08-08 21:18:18,198 [INFO] Creating temporary workflow agent for agent: build_google_url_request
2025-08-08 21:18:18,198 [INFO] Executing workflow at depth 1
2025-08-08 21:18:18,198 [INFO] Starting workflow validation
2025-08-08 21:18:18,198 [INFO] Workflow validation completed successfully and has been blessed.
2025-08-08 21:18:18,198 [INFO] Executing step: build_google_url_request, type : proc
2025-08-08 21:18:18,198 [INFO] Starting build_google_url_request
2025-08-08 21:18:18,198 [INFO] Force recompile requested for function build_google_url_request
2025-08-08 21:18:18,199 [INFO] Completed build_google_url_request with status: {'status': {'value': 0, 'reason': 'Success'}}
2025-08-08 21:18:18,200 [INFO] Step completed. 'build_google_url_request' in 1.3427 milliseconds
There was a related bug around this area. I was not deleting a compiled function when the execution was over. This lead to some really weird behavior if you messed up the name of the function you are trying to run, the solution was a finally block to delete the function if it exists if force recompile is turned on.
No comments:
Post a Comment