Phidata
Static analysis for Phidata applications to detect agent loops, unsafe tools, and knowledge base injection risks.
Quick Start
inkog scan ./my-phidata-appWhat Inkog Detects
| Finding | Severity | Description |
|---|---|---|
| Agent Loop | CRITICAL | Agent without run limits |
| Unsafe Tool | CRITICAL | Tools with shell or file access |
| Knowledge Injection | HIGH | Unsafe knowledge base sources |
| Memory Exposure | HIGH | Unencrypted sensitive data in memory |
| Permission Escalation | HIGH | Tools accessing unauthorized resources |
Agent Without Limits
Agents without run limits can execute indefinitely.
Vulnerable
No run limits or timeout
from phi.agent import Agent
from phi.model.openai import OpenAIChat
agent = Agent(
model=OpenAIChat(id="gpt-4"),
tools=[...],
show_tool_calls=True
# No run limits
)
# Can run forever
agent.run("Complete this task")Secure
History limits and timeout wrapper
from phi.agent import Agent
from phi.model.openai import OpenAIChat
import asyncio
agent = Agent(
model=OpenAIChat(id="gpt-4"),
tools=[...],
show_tool_calls=False, # Disable in production
num_history_responses=10, # Limit history
add_history_to_messages=True
)
# Add timeout wrapper
async def safe_run(agent, message, timeout=120):
try:
return await asyncio.wait_for(
agent.arun(message),
timeout=timeout
)
except asyncio.TimeoutError:
return "Agent timed out"
response = await safe_run(agent, "Complete this task")Unsafe Tool Access
Tools with unrestricted system access are dangerous.
Vulnerable
Unrestricted shell and file access
from phi.agent import Agent
from phi.tools.shell import ShellTools
from phi.tools.file import FileTools
agent = Agent(
tools=[
ShellTools(), # Can run ANY command
FileTools() # Can access ANY file
]
)Secure
Custom tools with path restrictions
from phi.agent import Agent
from phi.tools.function import Function
def safe_read_file(path: str) -> str:
"""Read only from allowed directory."""
from pathlib import Path
allowed = Path("./data")
target = Path(path).resolve()
if not target.is_relative_to(allowed):
return "Error: Access denied"
return target.read_text()[:5000]
def safe_list_files(directory: str = ".") -> str:
"""List files in allowed directory only."""
from pathlib import Path
allowed = Path("./data")
target = Path(directory).resolve()
if not target.is_relative_to(allowed):
return "Error: Access denied"
return "\n".join(str(f) for f in target.iterdir())
agent = Agent(
tools=[
Function(safe_read_file),
Function(safe_list_files)
]
)Knowledge Base Injection
Loading knowledge from untrusted sources risks poisoning.
Vulnerable
Untrusted knowledge sources
from phi.agent import Agent
from phi.knowledge.pdf import PDFKnowledgeBase
from phi.vectordb.pgvector import PgVector
# Load PDFs from user-provided path
knowledge = PDFKnowledgeBase(
path=user_provided_path, # Untrusted!
vector_db=PgVector(...)
)
agent = Agent(knowledge=knowledge)Secure
Path validation with size limits
from phi.agent import Agent
from phi.knowledge.pdf import PDFKnowledgeBase
from phi.vectordb.pgvector import PgVector
from pathlib import Path
ALLOWED_DIR = Path("./knowledge/approved")
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10MB
def validate_knowledge_path(path: str) -> str:
"""Validate and sanitize knowledge path."""
target = Path(path).resolve()
if not target.is_relative_to(ALLOWED_DIR):
raise ValueError("Path not in allowed directory")
# Check file sizes
for f in target.glob("*.pdf"):
if f.stat().st_size > MAX_FILE_SIZE:
raise ValueError(f"File too large: {f}")
return str(target)
# Validate before loading
safe_path = validate_knowledge_path(user_path)
knowledge = PDFKnowledgeBase(
path=safe_path,
vector_db=PgVector(...)
)
agent = Agent(knowledge=knowledge)Memory Security
Agent memory can store and leak sensitive information.
Vulnerable
Plain text storage of sensitive data
from phi.agent import Agent
from phi.memory.db.postgres import PgMemory
# Stores all conversations unencrypted
memory = PgMemory(
table_name="agent_memory",
db_url=db_url
)
agent = Agent(memory=memory)
# Memory may contain API keys, PII, etc.Secure
Sanitization and encryption
from phi.agent import Agent
from phi.memory.db.postgres import PgMemory
from cryptography.fernet import Fernet
import re
class SecureMemory(PgMemory):
def __init__(self, encryption_key: bytes, **kwargs):
super().__init__(**kwargs)
self.fernet = Fernet(encryption_key)
def sanitize(self, text: str) -> str:
"""Remove sensitive data before storage."""
# Remove API keys
text = re.sub(r'sk-[a-zA-Z0-9]+', '[REDACTED]', text)
# Remove SSNs
text = re.sub(r'd{3}-d{2}-d{4}', '[REDACTED]', text)
return text
def add(self, message):
message.content = self.sanitize(message.content)
encrypted = self.fernet.encrypt(message.content.encode())
message.content = encrypted.decode()
super().add(message)
memory = SecureMemory(
encryption_key=key,
table_name="secure_memory",
db_url=db_url
)Team Agent Delegation
Multi-agent teams can create delegation loops.
Vulnerable
Unlimited delegation between agents
from phi.agent import Agent
researcher = Agent(name="Researcher", role="Research topics")
writer = Agent(name="Writer", role="Write content")
team = Agent(
team=[researcher, writer],
instructions="Delegate to team members"
# No delegation limits
)Secure
Tracked delegation with limits
from phi.agent import Agent
class DelegationTracker:
def __init__(self, max_delegations=5):
self.count = 0
self.max = max_delegations
def can_delegate(self):
return self.count < self.max
def increment(self):
self.count += 1
tracker = DelegationTracker()
def controlled_delegate(agent, task):
if not tracker.can_delegate():
return "Max delegations reached"
tracker.increment()
return agent.run(task)
researcher = Agent(name="Researcher", role="Research topics")
writer = Agent(name="Writer", role="Write content")
team = Agent(
team=[researcher, writer],
instructions="Delegate max 5 times total",
num_history_responses=5
)Best Practices
- Add timeouts to all agent runs
- Limit history with
num_history_responses - Restrict tool access with custom functions
- Validate knowledge sources before loading
- Encrypt sensitive data in memory
- Track delegations in multi-agent teams
CLI Examples
# Scan Phidata project
inkog scan ./my-phidata-app
# Check tool usage
inkog scan . -severity critical
# JSON output
inkog scan . -output jsonRelated
- CrewAI - Similar multi-agent patterns
- Access Control
- Data Exposure
Last updated on