diff --git a/.github/workflows/import-time.yml b/.github/workflows/import-time.yml
new file mode 100644
index 0000000000..7c0126b239
--- /dev/null
+++ b/.github/workflows/import-time.yml
@@ -0,0 +1,103 @@
+name: Import Time Guard
+
+on:
+ pull_request:
+ paths:
+ - "lib/crewai/src/**"
+ - "lib/crewai/pyproject.toml"
+ - "pyproject.toml"
+
+permissions:
+ contents: read
+
+jobs:
+ import-time:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.12"]
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - uses: astral-sh/setup-uv@v6
+ with:
+ version: "0.11.3"
+ enable-cache: true
+
+ - name: Install the project
+ run: uv sync --all-extras --no-dev
+ env:
+ UV_PYTHON: ${{ matrix.python-version }}
+
+ - name: Benchmark PR branch
+ id: pr
+ run: |
+ result=$(uv run python scripts/benchmark_import_time.py --runs 5 --json)
+ echo "result=$result" >> "$GITHUB_OUTPUT"
+ echo "pr_median=$(echo $result | python3 -c 'import sys,json; print(json.load(sys.stdin)["median_s"])')" >> "$GITHUB_OUTPUT"
+ echo "### PR Branch Import Time" >> "$GITHUB_STEP_SUMMARY"
+ echo "$result" | python3 -c "
+ import sys, json
+ d = json.load(sys.stdin)
+ print(f'- Median: {d[\"median_s\"]}s')
+ print(f'- Mean: {d[\"mean_s\"]}s ± {d[\"stdev_s\"]}s')
+ print(f'- Range: {d[\"min_s\"]}s – {d[\"max_s\"]}s')
+ " >> "$GITHUB_STEP_SUMMARY"
+ env:
+ UV_PYTHON: ${{ matrix.python-version }}
+
+ - name: Checkout base branch
+ run: git checkout ${{ github.event.pull_request.base.sha }}
+
+ - name: Install base branch
+ run: uv sync --all-extras --no-dev
+ env:
+ UV_PYTHON: ${{ matrix.python-version }}
+
+ - name: Benchmark base branch
+ id: base
+ run: |
+ result=$(uv run python scripts/benchmark_import_time.py --runs 5 --json 2>/dev/null || echo '{"median_s": 0}')
+ echo "result=$result" >> "$GITHUB_OUTPUT"
+ echo "base_median=$(echo $result | python3 -c 'import sys,json; print(json.load(sys.stdin)["median_s"])')" >> "$GITHUB_OUTPUT"
+ echo "### Base Branch Import Time" >> "$GITHUB_STEP_SUMMARY"
+ echo "$result" | python3 -c "
+ import sys, json
+ d = json.load(sys.stdin)
+ if d.get('median_s', 0) > 0:
+ print(f'- Median: {d[\"median_s\"]}s')
+ else:
+ print('- Benchmark script not present on base branch (skip comparison)')
+ " >> "$GITHUB_STEP_SUMMARY"
+ env:
+ UV_PYTHON: ${{ matrix.python-version }}
+
+ - name: Compare and gate
+ run: |
+ pr_median=${{ steps.pr.outputs.pr_median }}
+ base_median=${{ steps.base.outputs.base_median }}
+
+ python3 -c "
+ pr = float('$pr_median')
+ base = float('$base_median')
+
+ if base <= 0:
+ print('⏭️ No base benchmark available — skipping comparison.')
+ exit(0)
+
+ change_pct = ((pr - base) / base) * 100
+ print(f'Base: {base:.3f}s')
+ print(f'PR: {pr:.3f}s')
+ print(f'Change: {change_pct:+.1f}%')
+ print()
+
+ if change_pct > 5:
+ print(f'❌ BLOCKED: Import time regressed by {change_pct:.1f}% (threshold: 5%)')
+ exit(1)
+ elif change_pct > 0:
+ print(f'⚠️ Slight regression ({change_pct:.1f}%) but within 5% threshold.')
+ else:
+ print(f'✅ Import time improved by {abs(change_pct):.1f}%')
+ "
diff --git a/.gitignore b/.gitignore
index 785c2c2994..d7e89fcaa4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,4 @@ chromadb-*.lock
.crewai/memory
blogs/*
secrets/*
+UNKNOWN.egg-info/
diff --git a/docs/ar/guides/coding-tools/build-with-ai.mdx b/docs/ar/guides/coding-tools/build-with-ai.mdx
index 4ea73abb9b..88a94e84d7 100644
--- a/docs/ar/guides/coding-tools/build-with-ai.mdx
+++ b/docs/ar/guides/coding-tools/build-with-ai.mdx
@@ -207,9 +207,6 @@ CrewAI AMP مُصمَّم لفرق الإنتاج. إليك ما تحصل علي
- **Factory (استضافة ذاتية)** — على بنيتك التحتية لسيطرة كاملة على البيانات
- **هجين** — دمج السحابة والاستضافة الذاتية حسب حساسية البيانات
-
- سجّل في [app.crewai.com](https://app.crewai.com) لمعرفة الخطط الحالية. تسعير المؤسسات وFactory متاح عند الطلب.
-
diff --git a/docs/en/guides/coding-tools/build-with-ai.mdx b/docs/en/guides/coding-tools/build-with-ai.mdx
index 2badb284e4..8e6c2b3eac 100644
--- a/docs/en/guides/coding-tools/build-with-ai.mdx
+++ b/docs/en/guides/coding-tools/build-with-ai.mdx
@@ -207,9 +207,6 @@ CrewAI AMP is built for production teams. Here's what you get beyond deployment.
- **Factory (self-hosted)** — run on your own infrastructure for full data control
- **Hybrid** — mix cloud and self-hosted based on sensitivity requirements
-
- Sign up at [app.crewai.com](https://app.crewai.com) to see current plans. Enterprise and Factory pricing is available on request.
-
diff --git a/docs/ko/guides/coding-tools/build-with-ai.mdx b/docs/ko/guides/coding-tools/build-with-ai.mdx
index 22f6b25d85..0e56a06cc9 100644
--- a/docs/ko/guides/coding-tools/build-with-ai.mdx
+++ b/docs/ko/guides/coding-tools/build-with-ai.mdx
@@ -207,9 +207,6 @@ CrewAI AMP는 프로덕션 팀을 위해 만들어졌습니다. 배포 외에
- **Factory(셀프 호스팅)** — 데이터 통제를 위해 자체 인프라에서 실행
- **하이브리드** — 민감도에 따라 클라우드와 셀프 호스팅을 혼합
-
- [app.crewai.com](https://app.crewai.com)에 가입하면 현재 요금제를 확인할 수 있습니다. 엔터프라이즈 및 Factory 가격은 문의 시 안내합니다.
-
diff --git a/docs/pt-BR/guides/coding-tools/build-with-ai.mdx b/docs/pt-BR/guides/coding-tools/build-with-ai.mdx
index bc697ea109..57704aac9d 100644
--- a/docs/pt-BR/guides/coding-tools/build-with-ai.mdx
+++ b/docs/pt-BR/guides/coding-tools/build-with-ai.mdx
@@ -207,9 +207,6 @@ O CrewAI AMP foi feito para equipes em produção. Além da implantação, você
- **Factory (self-hosted)** — na sua infraestrutura para controle total dos dados
- **Híbrido** — combine nuvem e self-hosted conforme a sensibilidade dos dados
-
- Cadastre-se em [app.crewai.com](https://app.crewai.com) para ver os planos atuais. Preços enterprise e Factory sob consulta.
-
diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml
index a43e276531..7cf64465d3 100644
--- a/lib/crewai-tools/pyproject.toml
+++ b/lib/crewai-tools/pyproject.toml
@@ -112,7 +112,7 @@ github = [
]
rag = [
"python-docx>=1.1.0",
- "lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
+ "lxml>=6.1.0,<7", # 6.1.0+ required for GHSA-vfmq-68hx-4jfw (XXE in iterparse)
]
xml = [
"unstructured[local-inference, all-docs]>=0.17.2"
diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py
index 74a3e85ded..a2df8c2b97 100644
--- a/lib/crewai/src/crewai/agent/core.py
+++ b/lib/crewai/src/crewai/agent/core.py
@@ -78,8 +78,7 @@
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.lite_agent_output import LiteAgentOutput
from crewai.llms.base_llm import BaseLLM
-from crewai.mcp import MCPServerConfig
-from crewai.mcp.tool_resolver import MCPToolResolver
+from crewai.mcp.config import MCPServerConfig
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.fingerprint import Fingerprint
from crewai.skills.loader import activate_skill, discover_skills
@@ -119,6 +118,7 @@
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
+ from crewai.mcp.tool_resolver import MCPToolResolver
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
@@ -1120,6 +1120,8 @@ def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]:
Delegates to :class:`~crewai.mcp.tool_resolver.MCPToolResolver`.
"""
self._cleanup_mcp_clients()
+ from crewai.mcp.tool_resolver import MCPToolResolver
+
self._mcp_resolver = MCPToolResolver(agent=self, logger=self._logger)
return self._mcp_resolver.resolve(mcps)
diff --git a/lib/crewai/src/crewai/events/__init__.py b/lib/crewai/src/crewai/events/__init__.py
index bcdafe49ad..a9c02a243c 100644
--- a/lib/crewai/src/crewai/events/__init__.py
+++ b/lib/crewai/src/crewai/events/__init__.py
@@ -6,112 +6,20 @@
- Build custom logging and analytics
- Extend CrewAI with custom event handlers
- Declare handler dependencies for ordered execution
+
+Event type classes are lazy-loaded on first access to avoid importing
+~12 Pydantic model modules (and their transitive deps) at package init time.
"""
from __future__ import annotations
+import importlib
from typing import TYPE_CHECKING, Any
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.depends import Depends
from crewai.events.event_bus import crewai_event_bus
from crewai.events.handler_graph import CircularDependencyError
-from crewai.events.types.crew_events import (
- CrewKickoffCompletedEvent,
- CrewKickoffFailedEvent,
- CrewKickoffStartedEvent,
- CrewTestCompletedEvent,
- CrewTestFailedEvent,
- CrewTestResultEvent,
- CrewTestStartedEvent,
- CrewTrainCompletedEvent,
- CrewTrainFailedEvent,
- CrewTrainStartedEvent,
-)
-from crewai.events.types.flow_events import (
- FlowCreatedEvent,
- FlowEvent,
- FlowFinishedEvent,
- FlowPlotEvent,
- FlowStartedEvent,
- HumanFeedbackReceivedEvent,
- HumanFeedbackRequestedEvent,
- MethodExecutionFailedEvent,
- MethodExecutionFinishedEvent,
- MethodExecutionStartedEvent,
-)
-from crewai.events.types.knowledge_events import (
- KnowledgeQueryCompletedEvent,
- KnowledgeQueryFailedEvent,
- KnowledgeQueryStartedEvent,
- KnowledgeRetrievalCompletedEvent,
- KnowledgeRetrievalStartedEvent,
- KnowledgeSearchQueryFailedEvent,
-)
-from crewai.events.types.llm_events import (
- LLMCallCompletedEvent,
- LLMCallFailedEvent,
- LLMCallStartedEvent,
- LLMStreamChunkEvent,
-)
-from crewai.events.types.llm_guardrail_events import (
- LLMGuardrailCompletedEvent,
- LLMGuardrailStartedEvent,
-)
-from crewai.events.types.logging_events import (
- AgentLogsExecutionEvent,
- AgentLogsStartedEvent,
-)
-from crewai.events.types.mcp_events import (
- MCPConfigFetchFailedEvent,
- MCPConnectionCompletedEvent,
- MCPConnectionFailedEvent,
- MCPConnectionStartedEvent,
- MCPToolExecutionCompletedEvent,
- MCPToolExecutionFailedEvent,
- MCPToolExecutionStartedEvent,
-)
-from crewai.events.types.memory_events import (
- MemoryQueryCompletedEvent,
- MemoryQueryFailedEvent,
- MemoryQueryStartedEvent,
- MemoryRetrievalCompletedEvent,
- MemoryRetrievalFailedEvent,
- MemoryRetrievalStartedEvent,
- MemorySaveCompletedEvent,
- MemorySaveFailedEvent,
- MemorySaveStartedEvent,
-)
-from crewai.events.types.reasoning_events import (
- AgentReasoningCompletedEvent,
- AgentReasoningFailedEvent,
- AgentReasoningStartedEvent,
- ReasoningEvent,
-)
-from crewai.events.types.skill_events import (
- SkillActivatedEvent,
- SkillDiscoveryCompletedEvent,
- SkillDiscoveryStartedEvent,
- SkillEvent,
- SkillLoadFailedEvent,
- SkillLoadedEvent,
-)
-from crewai.events.types.task_events import (
- TaskCompletedEvent,
- TaskEvaluationEvent,
- TaskFailedEvent,
- TaskStartedEvent,
-)
-from crewai.events.types.tool_usage_events import (
- ToolExecutionErrorEvent,
- ToolSelectionErrorEvent,
- ToolUsageErrorEvent,
- ToolUsageEvent,
- ToolUsageFinishedEvent,
- ToolUsageStartedEvent,
- ToolValidateInputErrorEvent,
-)
-
if TYPE_CHECKING:
from crewai.events.types.agent_events import (
@@ -125,6 +33,223 @@
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
+ from crewai.events.types.crew_events import (
+ CrewKickoffCompletedEvent,
+ CrewKickoffFailedEvent,
+ CrewKickoffStartedEvent,
+ CrewTestCompletedEvent,
+ CrewTestFailedEvent,
+ CrewTestResultEvent,
+ CrewTestStartedEvent,
+ CrewTrainCompletedEvent,
+ CrewTrainFailedEvent,
+ CrewTrainStartedEvent,
+ )
+ from crewai.events.types.flow_events import (
+ FlowCreatedEvent,
+ FlowEvent,
+ FlowFinishedEvent,
+ FlowPlotEvent,
+ FlowStartedEvent,
+ HumanFeedbackReceivedEvent,
+ HumanFeedbackRequestedEvent,
+ MethodExecutionFailedEvent,
+ MethodExecutionFinishedEvent,
+ MethodExecutionStartedEvent,
+ )
+ from crewai.events.types.knowledge_events import (
+ KnowledgeQueryCompletedEvent,
+ KnowledgeQueryFailedEvent,
+ KnowledgeQueryStartedEvent,
+ KnowledgeRetrievalCompletedEvent,
+ KnowledgeRetrievalStartedEvent,
+ KnowledgeSearchQueryFailedEvent,
+ )
+ from crewai.events.types.llm_events import (
+ LLMCallCompletedEvent,
+ LLMCallFailedEvent,
+ LLMCallStartedEvent,
+ LLMStreamChunkEvent,
+ )
+ from crewai.events.types.llm_guardrail_events import (
+ LLMGuardrailCompletedEvent,
+ LLMGuardrailStartedEvent,
+ )
+ from crewai.events.types.logging_events import (
+ AgentLogsExecutionEvent,
+ AgentLogsStartedEvent,
+ )
+ from crewai.events.types.mcp_events import (
+ MCPConfigFetchFailedEvent,
+ MCPConnectionCompletedEvent,
+ MCPConnectionFailedEvent,
+ MCPConnectionStartedEvent,
+ MCPToolExecutionCompletedEvent,
+ MCPToolExecutionFailedEvent,
+ MCPToolExecutionStartedEvent,
+ )
+ from crewai.events.types.memory_events import (
+ MemoryQueryCompletedEvent,
+ MemoryQueryFailedEvent,
+ MemoryQueryStartedEvent,
+ MemoryRetrievalCompletedEvent,
+ MemoryRetrievalFailedEvent,
+ MemoryRetrievalStartedEvent,
+ MemorySaveCompletedEvent,
+ MemorySaveFailedEvent,
+ MemorySaveStartedEvent,
+ )
+ from crewai.events.types.reasoning_events import (
+ AgentReasoningCompletedEvent,
+ AgentReasoningFailedEvent,
+ AgentReasoningStartedEvent,
+ ReasoningEvent,
+ )
+ from crewai.events.types.skill_events import (
+ SkillActivatedEvent,
+ SkillDiscoveryCompletedEvent,
+ SkillDiscoveryStartedEvent,
+ SkillEvent,
+ SkillLoadFailedEvent,
+ SkillLoadedEvent,
+ )
+ from crewai.events.types.task_events import (
+ TaskCompletedEvent,
+ TaskEvaluationEvent,
+ TaskFailedEvent,
+ TaskStartedEvent,
+ )
+ from crewai.events.types.tool_usage_events import (
+ ToolExecutionErrorEvent,
+ ToolSelectionErrorEvent,
+ ToolUsageErrorEvent,
+ ToolUsageEvent,
+ ToolUsageFinishedEvent,
+ ToolUsageStartedEvent,
+ ToolValidateInputErrorEvent,
+ )
+
+# Map every event class name → its module path for lazy loading
+_LAZY_EVENT_MAPPING: dict[str, str] = {
+ # agent_events
+ "AgentEvaluationCompletedEvent": "crewai.events.types.agent_events",
+ "AgentEvaluationFailedEvent": "crewai.events.types.agent_events",
+ "AgentEvaluationStartedEvent": "crewai.events.types.agent_events",
+ "AgentExecutionCompletedEvent": "crewai.events.types.agent_events",
+ "AgentExecutionErrorEvent": "crewai.events.types.agent_events",
+ "AgentExecutionStartedEvent": "crewai.events.types.agent_events",
+ "LiteAgentExecutionCompletedEvent": "crewai.events.types.agent_events",
+ "LiteAgentExecutionErrorEvent": "crewai.events.types.agent_events",
+ "LiteAgentExecutionStartedEvent": "crewai.events.types.agent_events",
+ # crew_events
+ "CrewKickoffCompletedEvent": "crewai.events.types.crew_events",
+ "CrewKickoffFailedEvent": "crewai.events.types.crew_events",
+ "CrewKickoffStartedEvent": "crewai.events.types.crew_events",
+ "CrewTestCompletedEvent": "crewai.events.types.crew_events",
+ "CrewTestFailedEvent": "crewai.events.types.crew_events",
+ "CrewTestResultEvent": "crewai.events.types.crew_events",
+ "CrewTestStartedEvent": "crewai.events.types.crew_events",
+ "CrewTrainCompletedEvent": "crewai.events.types.crew_events",
+ "CrewTrainFailedEvent": "crewai.events.types.crew_events",
+ "CrewTrainStartedEvent": "crewai.events.types.crew_events",
+ # flow_events
+ "FlowCreatedEvent": "crewai.events.types.flow_events",
+ "FlowEvent": "crewai.events.types.flow_events",
+ "FlowFinishedEvent": "crewai.events.types.flow_events",
+ "FlowPlotEvent": "crewai.events.types.flow_events",
+ "FlowStartedEvent": "crewai.events.types.flow_events",
+ "HumanFeedbackReceivedEvent": "crewai.events.types.flow_events",
+ "HumanFeedbackRequestedEvent": "crewai.events.types.flow_events",
+ "MethodExecutionFailedEvent": "crewai.events.types.flow_events",
+ "MethodExecutionFinishedEvent": "crewai.events.types.flow_events",
+ "MethodExecutionStartedEvent": "crewai.events.types.flow_events",
+ # knowledge_events
+ "KnowledgeQueryCompletedEvent": "crewai.events.types.knowledge_events",
+ "KnowledgeQueryFailedEvent": "crewai.events.types.knowledge_events",
+ "KnowledgeQueryStartedEvent": "crewai.events.types.knowledge_events",
+ "KnowledgeRetrievalCompletedEvent": "crewai.events.types.knowledge_events",
+ "KnowledgeRetrievalStartedEvent": "crewai.events.types.knowledge_events",
+ "KnowledgeSearchQueryFailedEvent": "crewai.events.types.knowledge_events",
+ # llm_events
+ "LLMCallCompletedEvent": "crewai.events.types.llm_events",
+ "LLMCallFailedEvent": "crewai.events.types.llm_events",
+ "LLMCallStartedEvent": "crewai.events.types.llm_events",
+ "LLMStreamChunkEvent": "crewai.events.types.llm_events",
+ # llm_guardrail_events
+ "LLMGuardrailCompletedEvent": "crewai.events.types.llm_guardrail_events",
+ "LLMGuardrailStartedEvent": "crewai.events.types.llm_guardrail_events",
+ # logging_events
+ "AgentLogsExecutionEvent": "crewai.events.types.logging_events",
+ "AgentLogsStartedEvent": "crewai.events.types.logging_events",
+ # mcp_events
+ "MCPConfigFetchFailedEvent": "crewai.events.types.mcp_events",
+ "MCPConnectionCompletedEvent": "crewai.events.types.mcp_events",
+ "MCPConnectionFailedEvent": "crewai.events.types.mcp_events",
+ "MCPConnectionStartedEvent": "crewai.events.types.mcp_events",
+ "MCPToolExecutionCompletedEvent": "crewai.events.types.mcp_events",
+ "MCPToolExecutionFailedEvent": "crewai.events.types.mcp_events",
+ "MCPToolExecutionStartedEvent": "crewai.events.types.mcp_events",
+ # memory_events
+ "MemoryQueryCompletedEvent": "crewai.events.types.memory_events",
+ "MemoryQueryFailedEvent": "crewai.events.types.memory_events",
+ "MemoryQueryStartedEvent": "crewai.events.types.memory_events",
+ "MemoryRetrievalCompletedEvent": "crewai.events.types.memory_events",
+ "MemoryRetrievalFailedEvent": "crewai.events.types.memory_events",
+ "MemoryRetrievalStartedEvent": "crewai.events.types.memory_events",
+ "MemorySaveCompletedEvent": "crewai.events.types.memory_events",
+ "MemorySaveFailedEvent": "crewai.events.types.memory_events",
+ "MemorySaveStartedEvent": "crewai.events.types.memory_events",
+ # reasoning_events
+ "AgentReasoningCompletedEvent": "crewai.events.types.reasoning_events",
+ "AgentReasoningFailedEvent": "crewai.events.types.reasoning_events",
+ "AgentReasoningStartedEvent": "crewai.events.types.reasoning_events",
+ "ReasoningEvent": "crewai.events.types.reasoning_events",
+ # skill_events
+ "SkillActivatedEvent": "crewai.events.types.skill_events",
+ "SkillDiscoveryCompletedEvent": "crewai.events.types.skill_events",
+ "SkillDiscoveryStartedEvent": "crewai.events.types.skill_events",
+ "SkillEvent": "crewai.events.types.skill_events",
+ "SkillLoadFailedEvent": "crewai.events.types.skill_events",
+ "SkillLoadedEvent": "crewai.events.types.skill_events",
+ # task_events
+ "TaskCompletedEvent": "crewai.events.types.task_events",
+ "TaskEvaluationEvent": "crewai.events.types.task_events",
+ "TaskFailedEvent": "crewai.events.types.task_events",
+ "TaskStartedEvent": "crewai.events.types.task_events",
+ # tool_usage_events
+ "ToolExecutionErrorEvent": "crewai.events.types.tool_usage_events",
+ "ToolSelectionErrorEvent": "crewai.events.types.tool_usage_events",
+ "ToolUsageErrorEvent": "crewai.events.types.tool_usage_events",
+ "ToolUsageEvent": "crewai.events.types.tool_usage_events",
+ "ToolUsageFinishedEvent": "crewai.events.types.tool_usage_events",
+ "ToolUsageStartedEvent": "crewai.events.types.tool_usage_events",
+ "ToolValidateInputErrorEvent": "crewai.events.types.tool_usage_events",
+}
+
+_extension_exports: dict[str, Any] = {}
+
+
+def __getattr__(name: str) -> Any:
+ """Lazy import for event types and registered extensions."""
+ if name in _LAZY_EVENT_MAPPING:
+ module_path = _LAZY_EVENT_MAPPING[name]
+ module = importlib.import_module(module_path)
+ val = getattr(module, name)
+ globals()[name] = val # cache for subsequent access
+ return val
+
+ if name in _extension_exports:
+ value = _extension_exports[name]
+ if isinstance(value, str):
+ module_path, _, attr_name = value.rpartition(".")
+ if module_path:
+ module = importlib.import_module(module_path)
+ return getattr(module, attr_name)
+ return importlib.import_module(value)
+ return value
+
+ msg = f"module {__name__!r} has no attribute {name!r}"
+ raise AttributeError(msg)
__all__ = [
@@ -214,42 +339,3 @@
"_extension_exports",
"crewai_event_bus",
]
-
-_AGENT_EVENT_MAPPING = {
- "AgentEvaluationCompletedEvent": "crewai.events.types.agent_events",
- "AgentEvaluationFailedEvent": "crewai.events.types.agent_events",
- "AgentEvaluationStartedEvent": "crewai.events.types.agent_events",
- "AgentExecutionCompletedEvent": "crewai.events.types.agent_events",
- "AgentExecutionErrorEvent": "crewai.events.types.agent_events",
- "AgentExecutionStartedEvent": "crewai.events.types.agent_events",
- "LiteAgentExecutionCompletedEvent": "crewai.events.types.agent_events",
- "LiteAgentExecutionErrorEvent": "crewai.events.types.agent_events",
- "LiteAgentExecutionStartedEvent": "crewai.events.types.agent_events",
-}
-
-_extension_exports: dict[str, Any] = {}
-
-
-def __getattr__(name: str) -> Any:
- """Lazy import for agent events and registered extensions."""
- if name in _AGENT_EVENT_MAPPING:
- import importlib
-
- module_path = _AGENT_EVENT_MAPPING[name]
- module = importlib.import_module(module_path)
- return getattr(module, name)
-
- if name in _extension_exports:
- import importlib
-
- value = _extension_exports[name]
- if isinstance(value, str):
- module_path, _, attr_name = value.rpartition(".")
- if module_path:
- module = importlib.import_module(module_path)
- return getattr(module, attr_name)
- return importlib.import_module(value)
- return value
-
- msg = f"module {__name__!r} has no attribute {name!r}"
- raise AttributeError(msg)
diff --git a/lib/crewai/src/crewai/mcp/__init__.py b/lib/crewai/src/crewai/mcp/__init__.py
index e078919fdf..bb3dab1990 100644
--- a/lib/crewai/src/crewai/mcp/__init__.py
+++ b/lib/crewai/src/crewai/mcp/__init__.py
@@ -2,9 +2,17 @@
This module provides native MCP client functionality, allowing CrewAI agents
to connect to any MCP-compliant server using various transport types.
+
+Heavy imports (MCPClient, MCPToolResolver, BaseTransport, TransportType) are
+lazy-loaded on first access to avoid pulling in the ``mcp`` SDK (~400ms)
+when only lightweight config/filter types are needed.
"""
-from crewai.mcp.client import MCPClient
+from __future__ import annotations
+
+import importlib
+from typing import TYPE_CHECKING, Any
+
from crewai.mcp.config import (
MCPServerConfig,
MCPServerHTTP,
@@ -18,8 +26,28 @@
create_dynamic_tool_filter,
create_static_tool_filter,
)
-from crewai.mcp.tool_resolver import MCPToolResolver
-from crewai.mcp.transports.base import BaseTransport, TransportType
+
+if TYPE_CHECKING:
+ from crewai.mcp.client import MCPClient
+ from crewai.mcp.tool_resolver import MCPToolResolver
+ from crewai.mcp.transports.base import BaseTransport, TransportType
+
+_LAZY: dict[str, tuple[str, str]] = {
+ "MCPClient": ("crewai.mcp.client", "MCPClient"),
+ "MCPToolResolver": ("crewai.mcp.tool_resolver", "MCPToolResolver"),
+ "BaseTransport": ("crewai.mcp.transports.base", "BaseTransport"),
+ "TransportType": ("crewai.mcp.transports.base", "TransportType"),
+}
+
+
+def __getattr__(name: str) -> Any:
+ if name in _LAZY:
+ mod_path, attr = _LAZY[name]
+ mod = importlib.import_module(mod_path)
+ val = getattr(mod, attr)
+ globals()[name] = val # cache for subsequent access
+ return val
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
__all__ = [
diff --git a/pyproject.toml b/pyproject.toml
index 1b8aea627c..754b4d6353 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -164,7 +164,7 @@ info = "Commits must follow Conventional Commits 1.0.0."
[tool.uv]
# Pinned to include the security patch releases (authlib 1.6.11,
# langchain-text-splitters 1.1.2) uploaded on 2026-04-16.
-exclude-newer = "2026-04-17"
+exclude-newer = "2026-04-22"
# composio-core pins rich<14 but textual requires rich>=14.
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
diff --git a/scripts/benchmark_import_time.py b/scripts/benchmark_import_time.py
new file mode 100755
index 0000000000..e44b2272a0
--- /dev/null
+++ b/scripts/benchmark_import_time.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+"""Benchmark `import crewai` cold start time.
+
+Usage:
+ python scripts/benchmark_import_time.py [--runs N] [--json]
+
+Spawns a fresh Python subprocess for each run to ensure cold imports.
+Prints median, mean, min, max across all runs.
+With --json, outputs machine-readable results for CI.
+"""
+import argparse
+import json
+import statistics
+import subprocess
+import sys
+
+
+IMPORT_SCRIPT = "import time; t0 = time.perf_counter(); import crewai; print(time.perf_counter() - t0)"
+
+
+def measure_import(python: str = sys.executable) -> float:
+ """Run a single cold-import measurement in a subprocess."""
+ result = subprocess.run(
+ [python, "-c", IMPORT_SCRIPT],
+ capture_output=True,
+ text=True,
+ env={"PATH": "", "VIRTUAL_ENV": "", "PYTHONPATH": ""},
+ timeout=30,
+ )
+ if result.returncode != 0:
+ raise RuntimeError(f"Import failed: {result.stderr.strip()}")
+ return float(result.stdout.strip())
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Benchmark crewai import time")
+ parser.add_argument("--runs", type=int, default=5, help="Number of runs (default: 5)")
+ parser.add_argument("--json", action="store_true", help="Output JSON for CI")
+ parser.add_argument("--threshold", type=float, default=None,
+ help="Fail if median exceeds this value (seconds)")
+ args = parser.parse_args()
+
+ times = []
+ for i in range(args.runs):
+ t = measure_import()
+ times.append(t)
+ if not args.json:
+ print(f" Run {i + 1}: {t:.3f}s")
+
+ median = statistics.median(times)
+ mean = statistics.mean(times)
+ stdev = statistics.stdev(times) if len(times) > 1 else 0.0
+
+ result = {
+ "runs": args.runs,
+ "median_s": round(median, 3),
+ "mean_s": round(mean, 3),
+ "stdev_s": round(stdev, 3),
+ "min_s": round(min(times), 3),
+ "max_s": round(max(times), 3),
+ }
+
+ if args.json:
+ print(json.dumps(result))
+ else:
+ print(f"\n Median: {median:.3f}s")
+ print(f" Mean: {mean:.3f}s ± {stdev:.3f}s")
+ print(f" Range: {min(times):.3f}s – {max(times):.3f}s")
+
+ if args.threshold and median > args.threshold:
+ print(f"\n ❌ FAILED: median {median:.3f}s exceeds threshold {args.threshold:.3f}s")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()