Execute multiple tasks simultaneously with intelligent result aggregation and conflict resolution.
from mcp_agent.app import MCPApp from mcp_agent.agents.agent import Agent from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM app = MCPApp(name="parallel_example") async with app.run() as context: # Create specialized agents for different tasks proofreader = Agent( name="proofreader", instruction="Review for grammar, spelling, and punctuation errors." ) fact_checker = Agent( name="fact_checker", instruction="Verify factual consistency and logical coherence." ) style_enforcer = Agent( name="style_enforcer", instruction="Analyze narrative flow and storytelling quality.", server_names=["fetch"] ) # Aggregator agent grader = Agent( name="grader", instruction="Compile feedback into a structured report with final grade." ) # Create parallel LLM with fan-out and fan-in parallel = ParallelLLM( fan_in_agent=grader, fan_out_agents=[proofreader, fact_checker, style_enforcer], llm_factory=OpenAIAugmentedLLM, ) # Execute parallel workflow result = await parallel.generate_str( message="Grade this student's short story submission: [story text]" )