Pattern: Subchat-Delegating Bot
The Subchat-Delegating Bot pattern uses subchats to perform parallel, specialized work. The main conversation spawns isolated subchats that each focus on a specific aspect of the task.
Reference implementations: LawyerRat (legal analysis), Productman (idea validation)
When to Use
Use this pattern when you need:
- Parallel processing of multiple aspects
- Specialized analysis from different perspectives
- Independent work that shouldn’t pollute the main context
- Results that need to be combined
Architecture
┌─────────────────────────────────────────────────────────────┐│ MAIN CONVERSATION ││ ││ User: "Evaluate this business idea" ││ │ ││ ▼ ││ ┌─────────────────────────────────────┐ ││ │ Tool: evaluate_idea │ ││ │ -> Creates 3 subchats │ ││ └─────────────────────────────────────┘ ││ │ ││ ┌──────────┼──────────┐ ││ ▼ ▼ ▼ ││ ┌─────────┐ ┌─────────┐ ┌─────────┐ ││ │Subchat 1│ │Subchat 2│ │Subchat 3│ (parallel) ││ │Market │ │Technical│ │Financial│ ││ │Analysis │ │Analysis │ │Analysis │ ││ └────┬────┘ └────┬────┘ └────┬────┘ ││ │ │ │ ││ └──────────┴──────────┘ ││ │ ││ ▼ ││ ┌─────────────────────────────────────┐ ││ │ Results combined as tool result │ ││ └─────────────────────────────────────┘ ││ │ ││ ▼ ││ Assistant: "Based on the analysis..." │└─────────────────────────────────────────────────────────────┘Complete Implementation
evaluator_bot.py
import asyncioimport jsonfrom typing import Dict, Any
from flexus_client_kit import ckit_clientfrom flexus_client_kit import ckit_cloudtoolfrom flexus_client_kit import ckit_bot_execfrom flexus_client_kit import ckit_shutdownfrom flexus_client_kit import ckit_ask_modelfrom flexus_client_kit.integrations import fi_pdocfrom evaluator import evaluator_install
BOT_NAME = "evaluator"BOT_VERSION = "1.0.0"
# --- Tool Definitions ---
EVALUATE_IDEA_TOOL = ckit_cloudtool.CloudTool( strict=True, name="evaluate_idea", description="Evaluate a business idea from multiple perspectives in parallel.", parameters={ "type": "object", "properties": { "idea": { "type": "string", "description": "The business idea to evaluate", }, "perspectives": { "type": "array", "items": { "type": "string", "enum": ["market", "technical", "financial", "competitive"], }, "description": "Which perspectives to analyze", }, }, "required": ["idea", "perspectives"], "additionalProperties": False, },)
TOOLS = [ EVALUATE_IDEA_TOOL, fi_pdoc.POLICY_DOCUMENT_TOOL,]
# --- Main Loop ---
async def evaluator_main_loop(fclient: ckit_client.FlexusClient, rcx: ckit_bot_exec.RobotContext): setup = ckit_bot_exec.official_setup_mixing_procedure( evaluator_install.evaluator_setup_schema, rcx.persona.persona_setup )
pdoc = fi_pdoc.IntegrationPdoc(rcx, rcx.persona.ws_root_group_id)
# --- Tool Handlers ---
@rcx.on_tool_call(EVALUATE_IDEA_TOOL.name) async def handle_evaluate(toolcall: ckit_cloudtool.FCloudtoolCall, args: Dict[str, Any]) -> str: idea = args["idea"] perspectives = args["perspectives"]
if not perspectives: return "Error: At least one perspective required"
if len(perspectives) > 4: return "Error: Maximum 4 perspectives"
# Build questions for each perspective questions = [] titles = [] for p in perspectives: if p == "market": questions.append(f"""Analyze market potential for this idea:{idea}
Evaluate:1. Target market size2. Customer pain points addressed3. Market timing4. Potential barriers
End your analysis with ANALYSIS_COMPLETE.""") titles.append("Market Analysis")
elif p == "technical": questions.append(f"""Analyze technical feasibility for this idea:{idea}
Evaluate:1. Technical complexity2. Required skills/resources3. Scalability considerations4. Potential technical risks
End your analysis with ANALYSIS_COMPLETE.""") titles.append("Technical Analysis")
elif p == "financial": questions.append(f"""Analyze financial viability for this idea:{idea}
Evaluate:1. Revenue model2. Cost structure3. Break-even timeline4. Funding requirements
End your analysis with ANALYSIS_COMPLETE.""") titles.append("Financial Analysis")
elif p == "competitive": questions.append(f"""Analyze competitive landscape for this idea:{idea}
Evaluate:1. Direct competitors2. Indirect alternatives3. Competitive advantages4. Defensibility
End your analysis with ANALYSIS_COMPLETE.""") titles.append("Competitive Analysis")
# Create parallel subchats subchats = await ckit_ask_model.bot_subchat_create_multiple( client=fclient, who_is_asking="idea_evaluator", persona_id=rcx.persona.persona_id, first_question=questions, first_calls=["null" for _ in questions], title=titles, fcall_id=toolcall.fcall_id, fexp_name="analyst", # Uses analyst expert )
# Wait for all subchats to complete raise ckit_cloudtool.WaitForSubchats(subchats)
@rcx.on_tool_call(fi_pdoc.POLICY_DOCUMENT_TOOL.name) async def handle_pdoc(toolcall, args): return await pdoc.called_by_model(toolcall, args)
# --- Main Loop ---
try: while not ckit_shutdown.shutdown_event.is_set(): await rcx.unpark_collected_events(sleep_if_no_work=10.0) finally: pass
def main(): scenario_fn = ckit_bot_exec.parse_bot_args() fclient = ckit_client.FlexusClient( ckit_client.bot_service_name(BOT_NAME, BOT_VERSION), endpoint="/v1/jailed-bot" )
asyncio.run(ckit_bot_exec.run_bots_in_this_group( fclient, marketable_name=BOT_NAME, marketable_version_str=BOT_VERSION, bot_main_loop=evaluator_main_loop, inprocess_tools=TOOLS, scenario_fn=scenario_fn, install_func=evaluator_install.install, ))
if __name__ == "__main__": main()evaluator_install.py
import jsonfrom flexus_client_kit import ckit_bot_installfrom flexus_client_kit.ckit_bot_install import FMarketplaceExpertInputfrom flexus_simple_bots import prompts_commonfrom evaluator import evaluator_bot, evaluator_prompts
evaluator_setup_schema = {}
# Main expert toolstools_json = json.dumps([t.openai_style_tool() for t in evaluator_bot.TOOLS])
# Analyst expert has minimal tools (just needs to analyze and write)analyst_tools_json = json.dumps([ fi_pdoc.POLICY_DOCUMENT_TOOL.openai_style_tool()])
def install(ws_id: str): return ckit_bot_install.marketplace_upsert_dev_bot( ws_id=ws_id, marketable_name="evaluator", marketable_version="1.0.0", marketable_title1="Idea Evaluator", marketable_title2="Evaluate ideas from multiple perspectives", marketable_occupation="Business Analyst", marketable_author="Your Name", marketable_description="""Evaluator analyzes business ideas from multiple perspectives in parallel.
## Features- Market analysis- Technical feasibility- Financial viability- Competitive landscape
Results are combined into a comprehensive evaluation.""", marketable_typical_group="Business / Analysis", marketable_tags=["analysis", "evaluation", "business"], marketable_setup_schema=json.dumps(evaluator_setup_schema), marketable_picture_big_b64="", marketable_picture_small_b64="", marketable_experts=[ ("default", FMarketplaceExpertInput( fexp_system_prompt=evaluator_prompts.SYSTEM_PROMPT_DEFAULT, fexp_app_capture_tools=tools_json, )), ("analyst", FMarketplaceExpertInput( fexp_system_prompt=evaluator_prompts.SYSTEM_PROMPT_ANALYST, fexp_python_kernel=evaluator_prompts.KERNEL_ANALYST, fexp_app_capture_tools=analyst_tools_json, )), ], marketable_schedule=[], )
if __name__ == "__main__": ckit_bot_install.main_install_dev_bot(install)evaluator_prompts.py
from flexus_simple_bots import prompts_common
SYSTEM_PROMPT_DEFAULT = """You are an Idea Evaluator that analyzes business ideas.
## Your Process1. When asked to evaluate an idea, use the `evaluate_idea` tool2. Select relevant perspectives (market, technical, financial, competitive)3. Review the combined analysis4. Provide a summary with recommendations
## Tools
### evaluate_ideaSpawns parallel analysis from multiple perspectives.
### flexus_policy_documentSave evaluations for future reference.
""" + prompts_common.PROMPT_POLICY_DOCUMENTS
SYSTEM_PROMPT_ANALYST = """You are a specialized analyst conducting focused analysis.
Your task is given to you in the first message. Complete the analysis thoroughly.
Guidelines:- Be specific and data-driven where possible- Identify both opportunities and risks- Provide actionable insights- End your analysis with ANALYSIS_COMPLETE
You can use flexus_policy_document to reference existing documents if helpful."""
# Kernel that detects completion markerKERNEL_ANALYST = """msg = messages[-1]
# Check if analysis is completeif msg["role"] == "assistant": content = str(msg.get("content", "")) if "ANALYSIS_COMPLETE" in content: # Return the analysis as subchat result subchat_result = content elif not msg.get("tool_calls"): # No tool calls and no completion marker - prompt to finish post_cd_instruction = "Complete your analysis and include ANALYSIS_COMPLETE at the end.""""Key Concepts
Subchat Expert
The expert used for subchats needs:
- Focused system prompt — Specialized for the task
- Lark kernel — Detects completion and sets
subchat_result - Minimal tools — Only what’s needed for the analysis
Subchat Completion
Without a kernel that sets subchat_result, subchats will timeout after 10 minutes. Always include completion detection logic.
Completion Markers
Use text markers like ANALYSIS_COMPLETE that the kernel can detect:
KERNEL_ANALYST = """if "ANALYSIS_COMPLETE" in str(messages[-1].get("content", "")): subchat_result = messages[-1]["content"]"""Result Combination
When all subchats complete, their results are automatically combined and returned as the tool result. The main conversation receives all analyses at once.
Variations
Different Experts per Subchat
# Use different expert for each typeif perspective == "technical": fexp_name = "technical_analyst"elif perspective == "financial": fexp_name = "financial_analyst"Storing Results
# Save each analysis to a document@rcx.on_tool_call("evaluate_idea")async def handle(toolcall, args): # ... create subchats ... # After subchats complete, results are in the tool response # Main conversation can then save them: # "Save these analyses to /evaluations/{idea_id}/"Conditional Subchats
Only spawn subchats for relevant perspectives:
if needs_technical_analysis(idea): perspectives.append("technical")if needs_financial_analysis(idea): perspectives.append("financial")