Remove conversation context/recall system from ClaudeTools
Completely removed the database context recall system while preserving database tables for safety. This major cleanup removes 80+ files and 16,831 lines of code. What was removed: - API layer: 4 routers (conversation-contexts, context-snippets, project-states, decision-logs) with 35+ endpoints - Database models: 5 models (ConversationContext, ContextSnippet, DecisionLog, ProjectState, ContextTag) - Services: 4 service layers with business logic - Schemas: 4 Pydantic schema files - Claude Code hooks: 13 hook files (user-prompt-submit, task-complete, sync-contexts, periodic saves) - Scripts: 15+ scripts (import, migration, testing, tombstone checking) - Tests: 5 test files (context recall, compression, diagnostics) - Documentation: 30+ markdown files (guides, architecture, quick starts) - Utilities: context compression, conversation parsing Files modified: - api/main.py: Removed router registrations - api/models/__init__.py: Removed model imports - api/schemas/__init__.py: Removed schema imports - api/services/__init__.py: Removed service imports - .claude/claude.md: Completely rewritten without context references Database tables preserved: - conversation_contexts, context_snippets, context_tags, project_states, decision_logs (5 orphaned tables remain for safety) - Migration created but NOT applied: 20260118_172743_remove_context_system.py - Tables can be dropped later when confirmed not needed New files added: - CONTEXT_SYSTEM_REMOVAL_SUMMARY.md: Detailed removal report - CONTEXT_SYSTEM_REMOVAL_COMPLETE.md: Final status - CONTEXT_EXPORT_RESULTS.md: Export attempt results - scripts/export-tombstoned-contexts.py: Export tool for future use - migrations/versions/20260118_172743_remove_context_system.py Impact: - Reduced from 130 to 95 API endpoints - Reduced from 43 to 38 active database tables - Removed 16,831 lines of code - System fully operational without context recall Reason for removal: - System was not actively used (no tombstoned contexts found) - Reduces codebase complexity - Focuses on core MSP work tracking functionality - Database preserved for safety (can rollback if needed) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -31,10 +31,6 @@ from api.routers import (
|
||||
credentials,
|
||||
credential_audit_logs,
|
||||
security_incidents,
|
||||
conversation_contexts,
|
||||
context_snippets,
|
||||
project_states,
|
||||
decision_logs,
|
||||
bulk_import,
|
||||
version,
|
||||
)
|
||||
@@ -126,10 +122,6 @@ app.include_router(firewall_rules.router, prefix="/api/firewall-rules", tags=["F
|
||||
app.include_router(credentials.router, prefix="/api/credentials", tags=["Credentials"])
|
||||
app.include_router(credential_audit_logs.router, prefix="/api/credential-audit-logs", tags=["Credential Audit Logs"])
|
||||
app.include_router(security_incidents.router, prefix="/api/security-incidents", tags=["Security Incidents"])
|
||||
app.include_router(conversation_contexts.router, prefix="/api/conversation-contexts", tags=["Conversation Contexts"])
|
||||
app.include_router(context_snippets.router, prefix="/api/context-snippets", tags=["Context Snippets"])
|
||||
app.include_router(project_states.router, prefix="/api/project-states", tags=["Project States"])
|
||||
app.include_router(decision_logs.router, prefix="/api/decision-logs", tags=["Decision Logs"])
|
||||
app.include_router(bulk_import.router, prefix="/api/bulk-import", tags=["Bulk Import"])
|
||||
|
||||
|
||||
|
||||
@@ -10,13 +10,10 @@ from api.models.base import Base, TimestampMixin, UUIDMixin
|
||||
from api.models.billable_time import BillableTime
|
||||
from api.models.client import Client
|
||||
from api.models.command_run import CommandRun
|
||||
from api.models.context_snippet import ContextSnippet
|
||||
from api.models.conversation_context import ConversationContext
|
||||
from api.models.credential import Credential
|
||||
from api.models.credential_audit_log import CredentialAuditLog
|
||||
from api.models.credential_permission import CredentialPermission
|
||||
from api.models.database_change import DatabaseChange
|
||||
from api.models.decision_log import DecisionLog
|
||||
from api.models.deployment import Deployment
|
||||
from api.models.environmental_insight import EnvironmentalInsight
|
||||
from api.models.external_integration import ExternalIntegration
|
||||
@@ -34,7 +31,6 @@ from api.models.operation_failure import OperationFailure
|
||||
from api.models.pending_task import PendingTask
|
||||
from api.models.problem_solution import ProblemSolution
|
||||
from api.models.project import Project
|
||||
from api.models.project_state import ProjectState
|
||||
from api.models.schema_migration import SchemaMigration
|
||||
from api.models.security_incident import SecurityIncident
|
||||
from api.models.service import Service
|
||||
@@ -55,13 +51,10 @@ __all__ = [
|
||||
"BillableTime",
|
||||
"Client",
|
||||
"CommandRun",
|
||||
"ContextSnippet",
|
||||
"ConversationContext",
|
||||
"Credential",
|
||||
"CredentialAuditLog",
|
||||
"CredentialPermission",
|
||||
"DatabaseChange",
|
||||
"DecisionLog",
|
||||
"Deployment",
|
||||
"EnvironmentalInsight",
|
||||
"ExternalIntegration",
|
||||
@@ -79,7 +72,6 @@ __all__ = [
|
||||
"PendingTask",
|
||||
"ProblemSolution",
|
||||
"Project",
|
||||
"ProjectState",
|
||||
"SchemaMigration",
|
||||
"SecurityIncident",
|
||||
"Service",
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
"""
|
||||
ContextSnippet model for storing reusable context snippets.
|
||||
|
||||
Stores small, highly compressed pieces of information like technical decisions,
|
||||
configurations, patterns, and lessons learned for quick retrieval.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from sqlalchemy import Float, ForeignKey, Index, Integer, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from .base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .client import Client
|
||||
from .project import Project
|
||||
|
||||
|
||||
class ContextSnippet(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
ContextSnippet model for storing reusable context snippets.
|
||||
|
||||
Stores small, highly compressed pieces of information like technical
|
||||
decisions, configurations, patterns, and lessons learned. These snippets
|
||||
are designed for quick retrieval and reuse across conversations.
|
||||
|
||||
Attributes:
|
||||
category: Category of snippet (tech_decision, configuration, pattern, lesson_learned)
|
||||
title: Brief title describing the snippet
|
||||
dense_content: Highly compressed information content
|
||||
structured_data: JSON object for optional structured representation
|
||||
tags: JSON array of tags for retrieval and categorization
|
||||
project_id: Foreign key to projects (optional)
|
||||
client_id: Foreign key to clients (optional)
|
||||
relevance_score: Float score for ranking relevance (default 1.0)
|
||||
usage_count: Integer count of how many times this snippet was retrieved (default 0)
|
||||
project: Relationship to Project model
|
||||
client: Relationship to Client model
|
||||
"""
|
||||
|
||||
__tablename__ = "context_snippets"
|
||||
|
||||
# Foreign keys
|
||||
project_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("projects.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to projects (optional)"
|
||||
)
|
||||
|
||||
client_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("clients.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to clients (optional)"
|
||||
)
|
||||
|
||||
# Snippet metadata
|
||||
category: Mapped[str] = mapped_column(
|
||||
String(100),
|
||||
nullable=False,
|
||||
doc="Category: tech_decision, configuration, pattern, lesson_learned"
|
||||
)
|
||||
|
||||
title: Mapped[str] = mapped_column(
|
||||
String(200),
|
||||
nullable=False,
|
||||
doc="Brief title describing the snippet"
|
||||
)
|
||||
|
||||
# Content
|
||||
dense_content: Mapped[str] = mapped_column(
|
||||
Text,
|
||||
nullable=False,
|
||||
doc="Highly compressed information content"
|
||||
)
|
||||
|
||||
structured_data: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON object for optional structured representation"
|
||||
)
|
||||
|
||||
# Retrieval metadata
|
||||
tags: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of tags for retrieval and categorization"
|
||||
)
|
||||
|
||||
relevance_score: Mapped[float] = mapped_column(
|
||||
Float,
|
||||
default=1.0,
|
||||
server_default="1.0",
|
||||
doc="Float score for ranking relevance (default 1.0)"
|
||||
)
|
||||
|
||||
usage_count: Mapped[int] = mapped_column(
|
||||
Integer,
|
||||
default=0,
|
||||
server_default="0",
|
||||
doc="Integer count of how many times this snippet was retrieved"
|
||||
)
|
||||
|
||||
# Relationships
|
||||
project: Mapped[Optional["Project"]] = relationship(
|
||||
"Project",
|
||||
doc="Relationship to Project model"
|
||||
)
|
||||
|
||||
client: Mapped[Optional["Client"]] = relationship(
|
||||
"Client",
|
||||
doc="Relationship to Client model"
|
||||
)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index("idx_context_snippets_project", "project_id"),
|
||||
Index("idx_context_snippets_client", "client_id"),
|
||||
Index("idx_context_snippets_category", "category"),
|
||||
Index("idx_context_snippets_relevance", "relevance_score"),
|
||||
Index("idx_context_snippets_usage", "usage_count"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the context snippet."""
|
||||
return f"<ContextSnippet(title='{self.title}', category='{self.category}', usage={self.usage_count})>"
|
||||
@@ -1,135 +0,0 @@
|
||||
"""
|
||||
ConversationContext model for storing Claude's conversation context.
|
||||
|
||||
Stores compressed summaries of conversations, sessions, and project states
|
||||
for cross-machine recall and context continuity.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from sqlalchemy import Float, ForeignKey, Index, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from .base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .machine import Machine
|
||||
from .project import Project
|
||||
from .session import Session
|
||||
|
||||
|
||||
class ConversationContext(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
ConversationContext model for storing Claude's conversation context.
|
||||
|
||||
Stores compressed, structured summaries of conversations, work sessions,
|
||||
and project states to enable Claude to recall important context across
|
||||
different machines and conversation sessions.
|
||||
|
||||
Attributes:
|
||||
session_id: Foreign key to sessions (optional - not all contexts are work sessions)
|
||||
project_id: Foreign key to projects (optional)
|
||||
context_type: Type of context (session_summary, project_state, general_context)
|
||||
title: Brief title describing the context
|
||||
dense_summary: Compressed, structured summary (JSON or dense text)
|
||||
key_decisions: JSON array of important decisions made
|
||||
current_state: JSON object describing what's currently in progress
|
||||
tags: JSON array of tags for retrieval and categorization
|
||||
relevance_score: Float score for ranking relevance (default 1.0)
|
||||
machine_id: Foreign key to machines (which machine created this context)
|
||||
session: Relationship to Session model
|
||||
project: Relationship to Project model
|
||||
machine: Relationship to Machine model
|
||||
"""
|
||||
|
||||
__tablename__ = "conversation_contexts"
|
||||
|
||||
# Foreign keys
|
||||
session_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("sessions.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to sessions (optional - not all contexts are work sessions)"
|
||||
)
|
||||
|
||||
project_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("projects.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to projects (optional)"
|
||||
)
|
||||
|
||||
machine_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("machines.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to machines (which machine created this context)"
|
||||
)
|
||||
|
||||
# Context metadata
|
||||
context_type: Mapped[str] = mapped_column(
|
||||
String(50),
|
||||
nullable=False,
|
||||
doc="Type of context: session_summary, project_state, general_context"
|
||||
)
|
||||
|
||||
title: Mapped[str] = mapped_column(
|
||||
String(200),
|
||||
nullable=False,
|
||||
doc="Brief title describing the context"
|
||||
)
|
||||
|
||||
# Context content
|
||||
dense_summary: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="Compressed, structured summary (JSON or dense text)"
|
||||
)
|
||||
|
||||
key_decisions: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of important decisions made"
|
||||
)
|
||||
|
||||
current_state: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON object describing what's currently in progress"
|
||||
)
|
||||
|
||||
# Retrieval metadata
|
||||
tags: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of tags for retrieval and categorization"
|
||||
)
|
||||
|
||||
relevance_score: Mapped[float] = mapped_column(
|
||||
Float,
|
||||
default=1.0,
|
||||
server_default="1.0",
|
||||
doc="Float score for ranking relevance (default 1.0)"
|
||||
)
|
||||
|
||||
# Relationships
|
||||
session: Mapped[Optional["Session"]] = relationship(
|
||||
"Session",
|
||||
doc="Relationship to Session model"
|
||||
)
|
||||
|
||||
project: Mapped[Optional["Project"]] = relationship(
|
||||
"Project",
|
||||
doc="Relationship to Project model"
|
||||
)
|
||||
|
||||
machine: Mapped[Optional["Machine"]] = relationship(
|
||||
"Machine",
|
||||
doc="Relationship to Machine model"
|
||||
)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index("idx_conversation_contexts_session", "session_id"),
|
||||
Index("idx_conversation_contexts_project", "project_id"),
|
||||
Index("idx_conversation_contexts_machine", "machine_id"),
|
||||
Index("idx_conversation_contexts_type", "context_type"),
|
||||
Index("idx_conversation_contexts_relevance", "relevance_score"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the conversation context."""
|
||||
return f"<ConversationContext(title='{self.title}', type='{self.context_type}', relevance={self.relevance_score})>"
|
||||
@@ -1,115 +0,0 @@
|
||||
"""
|
||||
DecisionLog model for tracking important decisions made during work.
|
||||
|
||||
Stores decisions with their rationale, alternatives considered, and impact
|
||||
to provide decision history and context for future work.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from sqlalchemy import ForeignKey, Index, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from .base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .project import Project
|
||||
from .session import Session
|
||||
|
||||
|
||||
class DecisionLog(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
DecisionLog model for tracking important decisions made during work.
|
||||
|
||||
Stores decisions with their type, rationale, alternatives considered,
|
||||
and impact assessment. This provides a decision history that can be
|
||||
referenced in future conversations and work sessions.
|
||||
|
||||
Attributes:
|
||||
decision_type: Type of decision (technical, architectural, process, security)
|
||||
decision_text: What was decided (the actual decision)
|
||||
rationale: Why this decision was made
|
||||
alternatives_considered: JSON array of other options that were considered
|
||||
impact: Impact level (low, medium, high, critical)
|
||||
project_id: Foreign key to projects (optional)
|
||||
session_id: Foreign key to sessions (optional)
|
||||
tags: JSON array of tags for retrieval and categorization
|
||||
project: Relationship to Project model
|
||||
session: Relationship to Session model
|
||||
"""
|
||||
|
||||
__tablename__ = "decision_logs"
|
||||
|
||||
# Foreign keys
|
||||
project_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("projects.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to projects (optional)"
|
||||
)
|
||||
|
||||
session_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("sessions.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to sessions (optional)"
|
||||
)
|
||||
|
||||
# Decision metadata
|
||||
decision_type: Mapped[str] = mapped_column(
|
||||
String(100),
|
||||
nullable=False,
|
||||
doc="Type of decision: technical, architectural, process, security"
|
||||
)
|
||||
|
||||
impact: Mapped[str] = mapped_column(
|
||||
String(50),
|
||||
default="medium",
|
||||
server_default="medium",
|
||||
doc="Impact level: low, medium, high, critical"
|
||||
)
|
||||
|
||||
# Decision content
|
||||
decision_text: Mapped[str] = mapped_column(
|
||||
Text,
|
||||
nullable=False,
|
||||
doc="What was decided (the actual decision)"
|
||||
)
|
||||
|
||||
rationale: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="Why this decision was made"
|
||||
)
|
||||
|
||||
alternatives_considered: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of other options that were considered"
|
||||
)
|
||||
|
||||
# Retrieval metadata
|
||||
tags: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of tags for retrieval and categorization"
|
||||
)
|
||||
|
||||
# Relationships
|
||||
project: Mapped[Optional["Project"]] = relationship(
|
||||
"Project",
|
||||
doc="Relationship to Project model"
|
||||
)
|
||||
|
||||
session: Mapped[Optional["Session"]] = relationship(
|
||||
"Session",
|
||||
doc="Relationship to Session model"
|
||||
)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index("idx_decision_logs_project", "project_id"),
|
||||
Index("idx_decision_logs_session", "session_id"),
|
||||
Index("idx_decision_logs_type", "decision_type"),
|
||||
Index("idx_decision_logs_impact", "impact"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the decision log."""
|
||||
decision_preview = self.decision_text[:50] + "..." if len(self.decision_text) > 50 else self.decision_text
|
||||
return f"<DecisionLog(type='{self.decision_type}', impact='{self.impact}', decision='{decision_preview}')>"
|
||||
@@ -1,118 +0,0 @@
|
||||
"""
|
||||
ProjectState model for tracking current state of projects.
|
||||
|
||||
Stores the current phase, progress, blockers, and next actions for each project
|
||||
to enable quick context retrieval when resuming work.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from sqlalchemy import ForeignKey, Index, Integer, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from .base import Base, TimestampMixin, UUIDMixin
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .project import Project
|
||||
from .session import Session
|
||||
|
||||
|
||||
class ProjectState(Base, UUIDMixin, TimestampMixin):
|
||||
"""
|
||||
ProjectState model for tracking current state of projects.
|
||||
|
||||
Stores the current phase, progress, blockers, next actions, and key
|
||||
information about a project's state. Each project has exactly one
|
||||
ProjectState record that is updated as the project progresses.
|
||||
|
||||
Attributes:
|
||||
project_id: Foreign key to projects (required, unique - one state per project)
|
||||
current_phase: Current phase or stage of the project
|
||||
progress_percentage: Integer percentage of completion (0-100)
|
||||
blockers: JSON array of current blockers preventing progress
|
||||
next_actions: JSON array of next steps to take
|
||||
context_summary: Dense overview text of where the project currently stands
|
||||
key_files: JSON array of important file paths for this project
|
||||
important_decisions: JSON array of key decisions made for this project
|
||||
last_session_id: Foreign key to the last session that updated this state
|
||||
project: Relationship to Project model
|
||||
last_session: Relationship to Session model
|
||||
"""
|
||||
|
||||
__tablename__ = "project_states"
|
||||
|
||||
# Foreign keys
|
||||
project_id: Mapped[str] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("projects.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
unique=True,
|
||||
doc="Foreign key to projects (required, unique - one state per project)"
|
||||
)
|
||||
|
||||
last_session_id: Mapped[Optional[str]] = mapped_column(
|
||||
String(36),
|
||||
ForeignKey("sessions.id", ondelete="SET NULL"),
|
||||
doc="Foreign key to the last session that updated this state"
|
||||
)
|
||||
|
||||
# State metadata
|
||||
current_phase: Mapped[Optional[str]] = mapped_column(
|
||||
String(100),
|
||||
doc="Current phase or stage of the project"
|
||||
)
|
||||
|
||||
progress_percentage: Mapped[int] = mapped_column(
|
||||
Integer,
|
||||
default=0,
|
||||
server_default="0",
|
||||
doc="Integer percentage of completion (0-100)"
|
||||
)
|
||||
|
||||
# State content
|
||||
blockers: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of current blockers preventing progress"
|
||||
)
|
||||
|
||||
next_actions: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of next steps to take"
|
||||
)
|
||||
|
||||
context_summary: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="Dense overview text of where the project currently stands"
|
||||
)
|
||||
|
||||
key_files: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of important file paths for this project"
|
||||
)
|
||||
|
||||
important_decisions: Mapped[Optional[str]] = mapped_column(
|
||||
Text,
|
||||
doc="JSON array of key decisions made for this project"
|
||||
)
|
||||
|
||||
# Relationships
|
||||
project: Mapped["Project"] = relationship(
|
||||
"Project",
|
||||
doc="Relationship to Project model"
|
||||
)
|
||||
|
||||
last_session: Mapped[Optional["Session"]] = relationship(
|
||||
"Session",
|
||||
doc="Relationship to Session model"
|
||||
)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index("idx_project_states_project", "project_id"),
|
||||
Index("idx_project_states_last_session", "last_session_id"),
|
||||
Index("idx_project_states_progress", "progress_percentage"),
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the project state."""
|
||||
return f"<ProjectState(project_id='{self.project_id}', phase='{self.current_phase}', progress={self.progress_percentage}%)>"
|
||||
@@ -1,312 +0,0 @@
|
||||
"""
|
||||
ContextSnippet API router for ClaudeTools.
|
||||
|
||||
Defines all REST API endpoints for managing context snippets,
|
||||
reusable pieces of knowledge for quick retrieval.
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.database import get_db
|
||||
from api.middleware.auth import get_current_user
|
||||
from api.schemas.context_snippet import (
|
||||
ContextSnippetCreate,
|
||||
ContextSnippetResponse,
|
||||
ContextSnippetUpdate,
|
||||
)
|
||||
from api.services import context_snippet_service
|
||||
|
||||
# Create router with prefix and tags
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=dict,
|
||||
summary="List all context snippets",
|
||||
description="Retrieve a paginated list of all context snippets with optional filtering",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def list_context_snippets(
|
||||
skip: int = Query(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Number of records to skip for pagination"
|
||||
),
|
||||
limit: int = Query(
|
||||
default=100,
|
||||
ge=1,
|
||||
le=1000,
|
||||
description="Maximum number of records to return (max 1000)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
List all context snippets with pagination.
|
||||
|
||||
Returns snippets ordered by relevance score and usage count.
|
||||
"""
|
||||
try:
|
||||
snippets, total = context_snippet_service.get_context_snippets(db, skip, limit)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"snippets": [ContextSnippetResponse.model_validate(snippet) for snippet in snippets]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve context snippets: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-tags",
|
||||
response_model=dict,
|
||||
summary="Get context snippets by tags",
|
||||
description="Retrieve context snippets filtered by tags",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_context_snippets_by_tags(
|
||||
tags: List[str] = Query(..., description="Tags to filter by (OR logic - any match)"),
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get context snippets filtered by tags.
|
||||
|
||||
Uses OR logic - snippets matching any of the provided tags will be returned.
|
||||
"""
|
||||
try:
|
||||
snippets, total = context_snippet_service.get_context_snippets_by_tags(
|
||||
db, tags, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"tags": tags,
|
||||
"snippets": [ContextSnippetResponse.model_validate(snippet) for snippet in snippets]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve context snippets: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/top-relevant",
|
||||
response_model=dict,
|
||||
summary="Get top relevant context snippets",
|
||||
description="Retrieve the most relevant context snippets by relevance score",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_top_relevant_snippets(
|
||||
limit: int = Query(
|
||||
default=10,
|
||||
ge=1,
|
||||
le=50,
|
||||
description="Maximum number of snippets to retrieve (max 50)"
|
||||
),
|
||||
min_relevance_score: float = Query(
|
||||
default=7.0,
|
||||
ge=0.0,
|
||||
le=10.0,
|
||||
description="Minimum relevance score threshold (0.0-10.0)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get the top most relevant context snippets.
|
||||
|
||||
Returns snippets ordered by relevance score (highest first).
|
||||
"""
|
||||
try:
|
||||
snippets = context_snippet_service.get_top_relevant_snippets(
|
||||
db, limit, min_relevance_score
|
||||
)
|
||||
|
||||
return {
|
||||
"total": len(snippets),
|
||||
"limit": limit,
|
||||
"min_relevance_score": min_relevance_score,
|
||||
"snippets": [ContextSnippetResponse.model_validate(snippet) for snippet in snippets]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve top relevant snippets: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-project/{project_id}",
|
||||
response_model=dict,
|
||||
summary="Get context snippets by project",
|
||||
description="Retrieve all context snippets for a specific project",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_context_snippets_by_project(
|
||||
project_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all context snippets for a specific project.
|
||||
"""
|
||||
try:
|
||||
snippets, total = context_snippet_service.get_context_snippets_by_project(
|
||||
db, project_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"project_id": str(project_id),
|
||||
"snippets": [ContextSnippetResponse.model_validate(snippet) for snippet in snippets]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve context snippets: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-client/{client_id}",
|
||||
response_model=dict,
|
||||
summary="Get context snippets by client",
|
||||
description="Retrieve all context snippets for a specific client",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_context_snippets_by_client(
|
||||
client_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all context snippets for a specific client.
|
||||
"""
|
||||
try:
|
||||
snippets, total = context_snippet_service.get_context_snippets_by_client(
|
||||
db, client_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"client_id": str(client_id),
|
||||
"snippets": [ContextSnippetResponse.model_validate(snippet) for snippet in snippets]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve context snippets: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{snippet_id}",
|
||||
response_model=ContextSnippetResponse,
|
||||
summary="Get context snippet by ID",
|
||||
description="Retrieve a single context snippet by its unique identifier (increments usage_count)",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_context_snippet(
|
||||
snippet_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get a specific context snippet by ID.
|
||||
|
||||
Note: This automatically increments the usage_count for tracking.
|
||||
"""
|
||||
snippet = context_snippet_service.get_context_snippet_by_id(db, snippet_id)
|
||||
return ContextSnippetResponse.model_validate(snippet)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=ContextSnippetResponse,
|
||||
summary="Create new context snippet",
|
||||
description="Create a new context snippet with the provided details",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
)
|
||||
def create_context_snippet(
|
||||
snippet_data: ContextSnippetCreate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Create a new context snippet.
|
||||
|
||||
Requires a valid JWT token with appropriate permissions.
|
||||
"""
|
||||
snippet = context_snippet_service.create_context_snippet(db, snippet_data)
|
||||
return ContextSnippetResponse.model_validate(snippet)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{snippet_id}",
|
||||
response_model=ContextSnippetResponse,
|
||||
summary="Update context snippet",
|
||||
description="Update an existing context snippet's details",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def update_context_snippet(
|
||||
snippet_id: UUID,
|
||||
snippet_data: ContextSnippetUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Update an existing context snippet.
|
||||
|
||||
Only provided fields will be updated. All fields are optional.
|
||||
"""
|
||||
snippet = context_snippet_service.update_context_snippet(db, snippet_id, snippet_data)
|
||||
return ContextSnippetResponse.model_validate(snippet)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{snippet_id}",
|
||||
response_model=dict,
|
||||
summary="Delete context snippet",
|
||||
description="Delete a context snippet by its ID",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def delete_context_snippet(
|
||||
snippet_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Delete a context snippet.
|
||||
|
||||
This is a permanent operation and cannot be undone.
|
||||
"""
|
||||
return context_snippet_service.delete_context_snippet(db, snippet_id)
|
||||
@@ -1,312 +0,0 @@
|
||||
"""
|
||||
ConversationContext API router for ClaudeTools.
|
||||
|
||||
Defines all REST API endpoints for managing conversation contexts,
|
||||
including context recall functionality for Claude's memory system.
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.database import get_db
|
||||
from api.middleware.auth import get_current_user
|
||||
from api.schemas.conversation_context import (
|
||||
ConversationContextCreate,
|
||||
ConversationContextResponse,
|
||||
ConversationContextUpdate,
|
||||
)
|
||||
from api.services import conversation_context_service
|
||||
|
||||
# Create router with prefix and tags
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=dict,
|
||||
summary="List all conversation contexts",
|
||||
description="Retrieve a paginated list of all conversation contexts with optional filtering",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def list_conversation_contexts(
|
||||
skip: int = Query(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Number of records to skip for pagination"
|
||||
),
|
||||
limit: int = Query(
|
||||
default=100,
|
||||
ge=1,
|
||||
le=1000,
|
||||
description="Maximum number of records to return (max 1000)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
List all conversation contexts with pagination.
|
||||
|
||||
Returns contexts ordered by relevance score and recency.
|
||||
"""
|
||||
try:
|
||||
contexts, total = conversation_context_service.get_conversation_contexts(db, skip, limit)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"contexts": [ConversationContextResponse.model_validate(ctx) for ctx in contexts]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve conversation contexts: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recall",
|
||||
response_model=dict,
|
||||
summary="Retrieve relevant contexts for injection",
|
||||
description="Get token-efficient context formatted for Claude prompt injection",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def recall_context(
|
||||
search_term: Optional[str] = Query(
|
||||
None,
|
||||
max_length=200,
|
||||
pattern=r'^[a-zA-Z0-9\s\-_.,!?()]+$',
|
||||
description="Full-text search term (alphanumeric, spaces, and basic punctuation only)"
|
||||
),
|
||||
project_id: Optional[UUID] = Query(None, description="Filter by project ID"),
|
||||
tags: Optional[List[str]] = Query(
|
||||
None,
|
||||
description="Filter by tags (OR logic)",
|
||||
max_items=20
|
||||
),
|
||||
limit: int = Query(
|
||||
default=10,
|
||||
ge=1,
|
||||
le=50,
|
||||
description="Maximum number of contexts to retrieve (max 50)"
|
||||
),
|
||||
min_relevance_score: float = Query(
|
||||
default=5.0,
|
||||
ge=0.0,
|
||||
le=10.0,
|
||||
description="Minimum relevance score threshold (0.0-10.0)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Retrieve relevant contexts formatted for Claude prompt injection.
|
||||
|
||||
This endpoint returns contexts matching the search criteria with
|
||||
properly formatted JSON response containing the contexts array.
|
||||
|
||||
Query Parameters:
|
||||
- search_term: Full-text search across title and summary (uses FULLTEXT index)
|
||||
- project_id: Filter contexts by project
|
||||
- tags: Filter contexts by tags (any match)
|
||||
- limit: Maximum number of contexts to retrieve
|
||||
- min_relevance_score: Minimum relevance score threshold
|
||||
|
||||
Returns JSON with contexts array and metadata.
|
||||
"""
|
||||
# Validate tags to prevent SQL injection
|
||||
if tags:
|
||||
import re
|
||||
tag_pattern = re.compile(r'^[a-zA-Z0-9\-_]+$')
|
||||
for tag in tags:
|
||||
if not tag_pattern.match(tag):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid tag format: '{tag}'. Tags must be alphanumeric with hyphens or underscores only."
|
||||
)
|
||||
|
||||
try:
|
||||
contexts, total = conversation_context_service.get_recall_context(
|
||||
db=db,
|
||||
search_term=search_term,
|
||||
project_id=project_id,
|
||||
tags=tags,
|
||||
limit=limit,
|
||||
min_relevance_score=min_relevance_score
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"limit": limit,
|
||||
"search_term": search_term,
|
||||
"project_id": str(project_id) if project_id else None,
|
||||
"tags": tags,
|
||||
"min_relevance_score": min_relevance_score,
|
||||
"contexts": [ConversationContextResponse.model_validate(ctx) for ctx in contexts]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve recall context: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-project/{project_id}",
|
||||
response_model=dict,
|
||||
summary="Get conversation contexts by project",
|
||||
description="Retrieve all conversation contexts for a specific project",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_conversation_contexts_by_project(
|
||||
project_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all conversation contexts for a specific project.
|
||||
"""
|
||||
try:
|
||||
contexts, total = conversation_context_service.get_conversation_contexts_by_project(
|
||||
db, project_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"project_id": str(project_id),
|
||||
"contexts": [ConversationContextResponse.model_validate(ctx) for ctx in contexts]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve conversation contexts: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-session/{session_id}",
|
||||
response_model=dict,
|
||||
summary="Get conversation contexts by session",
|
||||
description="Retrieve all conversation contexts for a specific session",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_conversation_contexts_by_session(
|
||||
session_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all conversation contexts for a specific session.
|
||||
"""
|
||||
try:
|
||||
contexts, total = conversation_context_service.get_conversation_contexts_by_session(
|
||||
db, session_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"session_id": str(session_id),
|
||||
"contexts": [ConversationContextResponse.model_validate(ctx) for ctx in contexts]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve conversation contexts: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{context_id}",
|
||||
response_model=ConversationContextResponse,
|
||||
summary="Get conversation context by ID",
|
||||
description="Retrieve a single conversation context by its unique identifier",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_conversation_context(
|
||||
context_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get a specific conversation context by ID.
|
||||
"""
|
||||
context = conversation_context_service.get_conversation_context_by_id(db, context_id)
|
||||
return ConversationContextResponse.model_validate(context)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=ConversationContextResponse,
|
||||
summary="Create new conversation context",
|
||||
description="Create a new conversation context with the provided details",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
)
|
||||
def create_conversation_context(
|
||||
context_data: ConversationContextCreate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Create a new conversation context.
|
||||
|
||||
Requires a valid JWT token with appropriate permissions.
|
||||
"""
|
||||
context = conversation_context_service.create_conversation_context(db, context_data)
|
||||
return ConversationContextResponse.model_validate(context)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{context_id}",
|
||||
response_model=ConversationContextResponse,
|
||||
summary="Update conversation context",
|
||||
description="Update an existing conversation context's details",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def update_conversation_context(
|
||||
context_id: UUID,
|
||||
context_data: ConversationContextUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Update an existing conversation context.
|
||||
|
||||
Only provided fields will be updated. All fields are optional.
|
||||
"""
|
||||
context = conversation_context_service.update_conversation_context(db, context_id, context_data)
|
||||
return ConversationContextResponse.model_validate(context)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{context_id}",
|
||||
response_model=dict,
|
||||
summary="Delete conversation context",
|
||||
description="Delete a conversation context by its ID",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def delete_conversation_context(
|
||||
context_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Delete a conversation context.
|
||||
|
||||
This is a permanent operation and cannot be undone.
|
||||
"""
|
||||
return conversation_context_service.delete_conversation_context(db, context_id)
|
||||
@@ -1,264 +0,0 @@
|
||||
"""
|
||||
DecisionLog API router for ClaudeTools.
|
||||
|
||||
Defines all REST API endpoints for managing decision logs,
|
||||
tracking important decisions made during work.
|
||||
"""
|
||||
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.database import get_db
|
||||
from api.middleware.auth import get_current_user
|
||||
from api.schemas.decision_log import (
|
||||
DecisionLogCreate,
|
||||
DecisionLogResponse,
|
||||
DecisionLogUpdate,
|
||||
)
|
||||
from api.services import decision_log_service
|
||||
|
||||
# Create router with prefix and tags
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=dict,
|
||||
summary="List all decision logs",
|
||||
description="Retrieve a paginated list of all decision logs",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def list_decision_logs(
|
||||
skip: int = Query(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Number of records to skip for pagination"
|
||||
),
|
||||
limit: int = Query(
|
||||
default=100,
|
||||
ge=1,
|
||||
le=1000,
|
||||
description="Maximum number of records to return (max 1000)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
List all decision logs with pagination.
|
||||
|
||||
Returns decision logs ordered by most recent first.
|
||||
"""
|
||||
try:
|
||||
logs, total = decision_log_service.get_decision_logs(db, skip, limit)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"logs": [DecisionLogResponse.model_validate(log) for log in logs]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve decision logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-impact/{impact}",
|
||||
response_model=dict,
|
||||
summary="Get decision logs by impact level",
|
||||
description="Retrieve decision logs filtered by impact level",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_decision_logs_by_impact(
|
||||
impact: str,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get decision logs filtered by impact level.
|
||||
|
||||
Valid impact levels: low, medium, high, critical
|
||||
"""
|
||||
try:
|
||||
logs, total = decision_log_service.get_decision_logs_by_impact(
|
||||
db, impact, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"impact": impact,
|
||||
"logs": [DecisionLogResponse.model_validate(log) for log in logs]
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve decision logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-project/{project_id}",
|
||||
response_model=dict,
|
||||
summary="Get decision logs by project",
|
||||
description="Retrieve all decision logs for a specific project",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_decision_logs_by_project(
|
||||
project_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all decision logs for a specific project.
|
||||
"""
|
||||
try:
|
||||
logs, total = decision_log_service.get_decision_logs_by_project(
|
||||
db, project_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"project_id": str(project_id),
|
||||
"logs": [DecisionLogResponse.model_validate(log) for log in logs]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve decision logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-session/{session_id}",
|
||||
response_model=dict,
|
||||
summary="Get decision logs by session",
|
||||
description="Retrieve all decision logs for a specific session",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_decision_logs_by_session(
|
||||
session_id: UUID,
|
||||
skip: int = Query(default=0, ge=0),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get all decision logs for a specific session.
|
||||
"""
|
||||
try:
|
||||
logs, total = decision_log_service.get_decision_logs_by_session(
|
||||
db, session_id, skip, limit
|
||||
)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"session_id": str(session_id),
|
||||
"logs": [DecisionLogResponse.model_validate(log) for log in logs]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve decision logs: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{log_id}",
|
||||
response_model=DecisionLogResponse,
|
||||
summary="Get decision log by ID",
|
||||
description="Retrieve a single decision log by its unique identifier",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_decision_log(
|
||||
log_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get a specific decision log by ID.
|
||||
"""
|
||||
log = decision_log_service.get_decision_log_by_id(db, log_id)
|
||||
return DecisionLogResponse.model_validate(log)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=DecisionLogResponse,
|
||||
summary="Create new decision log",
|
||||
description="Create a new decision log with the provided details",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
)
|
||||
def create_decision_log(
|
||||
log_data: DecisionLogCreate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Create a new decision log.
|
||||
|
||||
Requires a valid JWT token with appropriate permissions.
|
||||
"""
|
||||
log = decision_log_service.create_decision_log(db, log_data)
|
||||
return DecisionLogResponse.model_validate(log)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{log_id}",
|
||||
response_model=DecisionLogResponse,
|
||||
summary="Update decision log",
|
||||
description="Update an existing decision log's details",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def update_decision_log(
|
||||
log_id: UUID,
|
||||
log_data: DecisionLogUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Update an existing decision log.
|
||||
|
||||
Only provided fields will be updated. All fields are optional.
|
||||
"""
|
||||
log = decision_log_service.update_decision_log(db, log_id, log_data)
|
||||
return DecisionLogResponse.model_validate(log)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{log_id}",
|
||||
response_model=dict,
|
||||
summary="Delete decision log",
|
||||
description="Delete a decision log by its ID",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def delete_decision_log(
|
||||
log_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Delete a decision log.
|
||||
|
||||
This is a permanent operation and cannot be undone.
|
||||
"""
|
||||
return decision_log_service.delete_decision_log(db, log_id)
|
||||
@@ -1,202 +0,0 @@
|
||||
"""
|
||||
ProjectState API router for ClaudeTools.
|
||||
|
||||
Defines all REST API endpoints for managing project states,
|
||||
tracking the current state of projects for context retrieval.
|
||||
"""
|
||||
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.database import get_db
|
||||
from api.middleware.auth import get_current_user
|
||||
from api.schemas.project_state import (
|
||||
ProjectStateCreate,
|
||||
ProjectStateResponse,
|
||||
ProjectStateUpdate,
|
||||
)
|
||||
from api.services import project_state_service
|
||||
|
||||
# Create router with prefix and tags
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
response_model=dict,
|
||||
summary="List all project states",
|
||||
description="Retrieve a paginated list of all project states",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def list_project_states(
|
||||
skip: int = Query(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Number of records to skip for pagination"
|
||||
),
|
||||
limit: int = Query(
|
||||
default=100,
|
||||
ge=1,
|
||||
le=1000,
|
||||
description="Maximum number of records to return (max 1000)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
List all project states with pagination.
|
||||
|
||||
Returns project states ordered by most recently updated.
|
||||
"""
|
||||
try:
|
||||
states, total = project_state_service.get_project_states(db, skip, limit)
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"states": [ProjectStateResponse.model_validate(state) for state in states]
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve project states: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/by-project/{project_id}",
|
||||
response_model=ProjectStateResponse,
|
||||
summary="Get project state by project ID",
|
||||
description="Retrieve the project state for a specific project (unique per project)",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_project_state_by_project(
|
||||
project_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get the project state for a specific project.
|
||||
|
||||
Each project has exactly one project state.
|
||||
"""
|
||||
state = project_state_service.get_project_state_by_project(db, project_id)
|
||||
|
||||
if not state:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ProjectState for project ID {project_id} not found"
|
||||
)
|
||||
|
||||
return ProjectStateResponse.model_validate(state)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{state_id}",
|
||||
response_model=ProjectStateResponse,
|
||||
summary="Get project state by ID",
|
||||
description="Retrieve a single project state by its unique identifier",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def get_project_state(
|
||||
state_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Get a specific project state by ID.
|
||||
"""
|
||||
state = project_state_service.get_project_state_by_id(db, state_id)
|
||||
return ProjectStateResponse.model_validate(state)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
response_model=ProjectStateResponse,
|
||||
summary="Create new project state",
|
||||
description="Create a new project state with the provided details",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
)
|
||||
def create_project_state(
|
||||
state_data: ProjectStateCreate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Create a new project state.
|
||||
|
||||
Each project can only have one project state (enforced by unique constraint).
|
||||
Requires a valid JWT token with appropriate permissions.
|
||||
"""
|
||||
state = project_state_service.create_project_state(db, state_data)
|
||||
return ProjectStateResponse.model_validate(state)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/{state_id}",
|
||||
response_model=ProjectStateResponse,
|
||||
summary="Update project state",
|
||||
description="Update an existing project state's details",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def update_project_state(
|
||||
state_id: UUID,
|
||||
state_data: ProjectStateUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Update an existing project state.
|
||||
|
||||
Only provided fields will be updated. All fields are optional.
|
||||
Uses compression utilities when updating to maintain efficient storage.
|
||||
"""
|
||||
state = project_state_service.update_project_state(db, state_id, state_data)
|
||||
return ProjectStateResponse.model_validate(state)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/by-project/{project_id}",
|
||||
response_model=ProjectStateResponse,
|
||||
summary="Update project state by project ID",
|
||||
description="Update project state by project ID (creates if doesn't exist)",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def update_project_state_by_project(
|
||||
project_id: UUID,
|
||||
state_data: ProjectStateUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Update project state by project ID.
|
||||
|
||||
Convenience method that creates a new project state if it doesn't exist,
|
||||
or updates the existing one if it does.
|
||||
"""
|
||||
state = project_state_service.update_project_state_by_project(db, project_id, state_data)
|
||||
return ProjectStateResponse.model_validate(state)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{state_id}",
|
||||
response_model=dict,
|
||||
summary="Delete project state",
|
||||
description="Delete a project state by its ID",
|
||||
status_code=status.HTTP_200_OK,
|
||||
)
|
||||
def delete_project_state(
|
||||
state_id: UUID,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
):
|
||||
"""
|
||||
Delete a project state.
|
||||
|
||||
This is a permanent operation and cannot be undone.
|
||||
"""
|
||||
return project_state_service.delete_project_state(db, state_id)
|
||||
@@ -2,13 +2,6 @@
|
||||
|
||||
from .billable_time import BillableTimeBase, BillableTimeCreate, BillableTimeResponse, BillableTimeUpdate
|
||||
from .client import ClientBase, ClientCreate, ClientResponse, ClientUpdate
|
||||
from .context_snippet import ContextSnippetBase, ContextSnippetCreate, ContextSnippetResponse, ContextSnippetUpdate
|
||||
from .conversation_context import (
|
||||
ConversationContextBase,
|
||||
ConversationContextCreate,
|
||||
ConversationContextResponse,
|
||||
ConversationContextUpdate,
|
||||
)
|
||||
from .credential import CredentialBase, CredentialCreate, CredentialResponse, CredentialUpdate
|
||||
from .credential_audit_log import (
|
||||
CredentialAuditLogBase,
|
||||
@@ -16,14 +9,12 @@ from .credential_audit_log import (
|
||||
CredentialAuditLogResponse,
|
||||
CredentialAuditLogUpdate,
|
||||
)
|
||||
from .decision_log import DecisionLogBase, DecisionLogCreate, DecisionLogResponse, DecisionLogUpdate
|
||||
from .firewall_rule import FirewallRuleBase, FirewallRuleCreate, FirewallRuleResponse, FirewallRuleUpdate
|
||||
from .infrastructure import InfrastructureBase, InfrastructureCreate, InfrastructureResponse, InfrastructureUpdate
|
||||
from .m365_tenant import M365TenantBase, M365TenantCreate, M365TenantResponse, M365TenantUpdate
|
||||
from .machine import MachineBase, MachineCreate, MachineResponse, MachineUpdate
|
||||
from .network import NetworkBase, NetworkCreate, NetworkResponse, NetworkUpdate
|
||||
from .project import ProjectBase, ProjectCreate, ProjectResponse, ProjectUpdate
|
||||
from .project_state import ProjectStateBase, ProjectStateCreate, ProjectStateResponse, ProjectStateUpdate
|
||||
from .security_incident import SecurityIncidentBase, SecurityIncidentCreate, SecurityIncidentResponse, SecurityIncidentUpdate
|
||||
from .service import ServiceBase, ServiceCreate, ServiceResponse, ServiceUpdate
|
||||
from .session import SessionBase, SessionCreate, SessionResponse, SessionUpdate
|
||||
@@ -118,24 +109,4 @@ __all__ = [
|
||||
"SecurityIncidentCreate",
|
||||
"SecurityIncidentUpdate",
|
||||
"SecurityIncidentResponse",
|
||||
# ConversationContext schemas
|
||||
"ConversationContextBase",
|
||||
"ConversationContextCreate",
|
||||
"ConversationContextUpdate",
|
||||
"ConversationContextResponse",
|
||||
# ContextSnippet schemas
|
||||
"ContextSnippetBase",
|
||||
"ContextSnippetCreate",
|
||||
"ContextSnippetUpdate",
|
||||
"ContextSnippetResponse",
|
||||
# ProjectState schemas
|
||||
"ProjectStateBase",
|
||||
"ProjectStateCreate",
|
||||
"ProjectStateUpdate",
|
||||
"ProjectStateResponse",
|
||||
# DecisionLog schemas
|
||||
"DecisionLogBase",
|
||||
"DecisionLogCreate",
|
||||
"DecisionLogUpdate",
|
||||
"DecisionLogResponse",
|
||||
]
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
"""
|
||||
Pydantic schemas for ContextSnippet model.
|
||||
|
||||
Request and response schemas for reusable context snippets.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ContextSnippetBase(BaseModel):
|
||||
"""Base schema with shared ContextSnippet fields."""
|
||||
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
client_id: Optional[UUID] = Field(None, description="Client ID (optional)")
|
||||
category: str = Field(..., description="Category: tech_decision, configuration, pattern, lesson_learned")
|
||||
title: str = Field(..., description="Brief title describing the snippet")
|
||||
dense_content: str = Field(..., description="Highly compressed information content")
|
||||
structured_data: Optional[str] = Field(None, description="JSON object for optional structured representation")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
relevance_score: float = Field(1.0, ge=0.0, le=10.0, description="Float score for ranking relevance (0.0-10.0)")
|
||||
usage_count: int = Field(0, ge=0, description="Integer count of how many times this snippet was retrieved")
|
||||
|
||||
|
||||
class ContextSnippetCreate(ContextSnippetBase):
|
||||
"""Schema for creating a new ContextSnippet."""
|
||||
pass
|
||||
|
||||
|
||||
class ContextSnippetUpdate(BaseModel):
|
||||
"""Schema for updating an existing ContextSnippet. All fields are optional."""
|
||||
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
client_id: Optional[UUID] = Field(None, description="Client ID (optional)")
|
||||
category: Optional[str] = Field(None, description="Category: tech_decision, configuration, pattern, lesson_learned")
|
||||
title: Optional[str] = Field(None, description="Brief title describing the snippet")
|
||||
dense_content: Optional[str] = Field(None, description="Highly compressed information content")
|
||||
structured_data: Optional[str] = Field(None, description="JSON object for optional structured representation")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
relevance_score: Optional[float] = Field(None, ge=0.0, le=10.0, description="Float score for ranking relevance (0.0-10.0)")
|
||||
usage_count: Optional[int] = Field(None, ge=0, description="Integer count of how many times this snippet was retrieved")
|
||||
|
||||
|
||||
class ContextSnippetResponse(ContextSnippetBase):
|
||||
"""Schema for ContextSnippet responses with ID and timestamps."""
|
||||
|
||||
id: UUID = Field(..., description="Unique identifier for the context snippet")
|
||||
created_at: datetime = Field(..., description="Timestamp when the snippet was created")
|
||||
updated_at: datetime = Field(..., description="Timestamp when the snippet was last updated")
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
@@ -1,56 +0,0 @@
|
||||
"""
|
||||
Pydantic schemas for ConversationContext model.
|
||||
|
||||
Request and response schemas for conversation context storage and recall.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ConversationContextBase(BaseModel):
|
||||
"""Base schema with shared ConversationContext fields."""
|
||||
|
||||
session_id: Optional[UUID] = Field(None, description="Session ID (optional)")
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
machine_id: Optional[UUID] = Field(None, description="Machine ID that created this context")
|
||||
context_type: str = Field(..., description="Type of context: session_summary, project_state, general_context")
|
||||
title: str = Field(..., description="Brief title describing the context")
|
||||
dense_summary: Optional[str] = Field(None, description="Compressed, structured summary (JSON or dense text)")
|
||||
key_decisions: Optional[str] = Field(None, description="JSON array of important decisions made")
|
||||
current_state: Optional[str] = Field(None, description="JSON object describing what's currently in progress")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
relevance_score: float = Field(1.0, ge=0.0, le=10.0, description="Float score for ranking relevance (0.0-10.0)")
|
||||
|
||||
|
||||
class ConversationContextCreate(ConversationContextBase):
|
||||
"""Schema for creating a new ConversationContext."""
|
||||
pass
|
||||
|
||||
|
||||
class ConversationContextUpdate(BaseModel):
|
||||
"""Schema for updating an existing ConversationContext. All fields are optional."""
|
||||
|
||||
session_id: Optional[UUID] = Field(None, description="Session ID (optional)")
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
machine_id: Optional[UUID] = Field(None, description="Machine ID that created this context")
|
||||
context_type: Optional[str] = Field(None, description="Type of context: session_summary, project_state, general_context")
|
||||
title: Optional[str] = Field(None, description="Brief title describing the context")
|
||||
dense_summary: Optional[str] = Field(None, description="Compressed, structured summary (JSON or dense text)")
|
||||
key_decisions: Optional[str] = Field(None, description="JSON array of important decisions made")
|
||||
current_state: Optional[str] = Field(None, description="JSON object describing what's currently in progress")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
relevance_score: Optional[float] = Field(None, ge=0.0, le=10.0, description="Float score for ranking relevance (0.0-10.0)")
|
||||
|
||||
|
||||
class ConversationContextResponse(ConversationContextBase):
|
||||
"""Schema for ConversationContext responses with ID and timestamps."""
|
||||
|
||||
id: UUID = Field(..., description="Unique identifier for the conversation context")
|
||||
created_at: datetime = Field(..., description="Timestamp when the context was created")
|
||||
updated_at: datetime = Field(..., description="Timestamp when the context was last updated")
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
@@ -1,52 +0,0 @@
|
||||
"""
|
||||
Pydantic schemas for DecisionLog model.
|
||||
|
||||
Request and response schemas for tracking important decisions made during work.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class DecisionLogBase(BaseModel):
|
||||
"""Base schema with shared DecisionLog fields."""
|
||||
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
session_id: Optional[UUID] = Field(None, description="Session ID (optional)")
|
||||
decision_type: str = Field(..., description="Type of decision: technical, architectural, process, security")
|
||||
decision_text: str = Field(..., description="What was decided (the actual decision)")
|
||||
rationale: Optional[str] = Field(None, description="Why this decision was made")
|
||||
alternatives_considered: Optional[str] = Field(None, description="JSON array of other options that were considered")
|
||||
impact: str = Field("medium", description="Impact level: low, medium, high, critical")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
|
||||
|
||||
class DecisionLogCreate(DecisionLogBase):
|
||||
"""Schema for creating a new DecisionLog."""
|
||||
pass
|
||||
|
||||
|
||||
class DecisionLogUpdate(BaseModel):
|
||||
"""Schema for updating an existing DecisionLog. All fields are optional."""
|
||||
|
||||
project_id: Optional[UUID] = Field(None, description="Project ID (optional)")
|
||||
session_id: Optional[UUID] = Field(None, description="Session ID (optional)")
|
||||
decision_type: Optional[str] = Field(None, description="Type of decision: technical, architectural, process, security")
|
||||
decision_text: Optional[str] = Field(None, description="What was decided (the actual decision)")
|
||||
rationale: Optional[str] = Field(None, description="Why this decision was made")
|
||||
alternatives_considered: Optional[str] = Field(None, description="JSON array of other options that were considered")
|
||||
impact: Optional[str] = Field(None, description="Impact level: low, medium, high, critical")
|
||||
tags: Optional[str] = Field(None, description="JSON array of tags for retrieval and categorization")
|
||||
|
||||
|
||||
class DecisionLogResponse(DecisionLogBase):
|
||||
"""Schema for DecisionLog responses with ID and timestamps."""
|
||||
|
||||
id: UUID = Field(..., description="Unique identifier for the decision log")
|
||||
created_at: datetime = Field(..., description="Timestamp when the decision was logged")
|
||||
updated_at: datetime = Field(..., description="Timestamp when the decision log was last updated")
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
@@ -1,53 +0,0 @@
|
||||
"""
|
||||
Pydantic schemas for ProjectState model.
|
||||
|
||||
Request and response schemas for tracking current state of projects.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ProjectStateBase(BaseModel):
|
||||
"""Base schema with shared ProjectState fields."""
|
||||
|
||||
project_id: UUID = Field(..., description="Project ID (required, unique - one state per project)")
|
||||
last_session_id: Optional[UUID] = Field(None, description="Last session ID that updated this state")
|
||||
current_phase: Optional[str] = Field(None, description="Current phase or stage of the project")
|
||||
progress_percentage: int = Field(0, ge=0, le=100, description="Integer percentage of completion (0-100)")
|
||||
blockers: Optional[str] = Field(None, description="JSON array of current blockers preventing progress")
|
||||
next_actions: Optional[str] = Field(None, description="JSON array of next steps to take")
|
||||
context_summary: Optional[str] = Field(None, description="Dense overview text of where the project currently stands")
|
||||
key_files: Optional[str] = Field(None, description="JSON array of important file paths for this project")
|
||||
important_decisions: Optional[str] = Field(None, description="JSON array of key decisions made for this project")
|
||||
|
||||
|
||||
class ProjectStateCreate(ProjectStateBase):
|
||||
"""Schema for creating a new ProjectState."""
|
||||
pass
|
||||
|
||||
|
||||
class ProjectStateUpdate(BaseModel):
|
||||
"""Schema for updating an existing ProjectState. All fields are optional except project_id."""
|
||||
|
||||
last_session_id: Optional[UUID] = Field(None, description="Last session ID that updated this state")
|
||||
current_phase: Optional[str] = Field(None, description="Current phase or stage of the project")
|
||||
progress_percentage: Optional[int] = Field(None, ge=0, le=100, description="Integer percentage of completion (0-100)")
|
||||
blockers: Optional[str] = Field(None, description="JSON array of current blockers preventing progress")
|
||||
next_actions: Optional[str] = Field(None, description="JSON array of next steps to take")
|
||||
context_summary: Optional[str] = Field(None, description="Dense overview text of where the project currently stands")
|
||||
key_files: Optional[str] = Field(None, description="JSON array of important file paths for this project")
|
||||
important_decisions: Optional[str] = Field(None, description="JSON array of key decisions made for this project")
|
||||
|
||||
|
||||
class ProjectStateResponse(ProjectStateBase):
|
||||
"""Schema for ProjectState responses with ID and timestamps."""
|
||||
|
||||
id: UUID = Field(..., description="Unique identifier for the project state")
|
||||
created_at: datetime = Field(..., description="Timestamp when the state was created")
|
||||
updated_at: datetime = Field(..., description="Timestamp when the state was last updated")
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
@@ -11,10 +11,6 @@ from . import (
|
||||
credential_service,
|
||||
credential_audit_log_service,
|
||||
security_incident_service,
|
||||
conversation_context_service,
|
||||
context_snippet_service,
|
||||
project_state_service,
|
||||
decision_log_service,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -28,8 +24,4 @@ __all__ = [
|
||||
"credential_service",
|
||||
"credential_audit_log_service",
|
||||
"security_incident_service",
|
||||
"conversation_context_service",
|
||||
"context_snippet_service",
|
||||
"project_state_service",
|
||||
"decision_log_service",
|
||||
]
|
||||
|
||||
@@ -1,367 +0,0 @@
|
||||
"""
|
||||
ContextSnippet service layer for business logic and database operations.
|
||||
|
||||
Handles all database operations for context snippets, providing reusable
|
||||
knowledge storage and retrieval.
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import HTTPException, status
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.models.context_snippet import ContextSnippet
|
||||
from api.schemas.context_snippet import ContextSnippetCreate, ContextSnippetUpdate
|
||||
|
||||
|
||||
def get_context_snippets(
|
||||
db: Session,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ContextSnippet], int]:
|
||||
"""
|
||||
Retrieve a paginated list of context snippets.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip (for pagination)
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of context snippets, total count)
|
||||
"""
|
||||
# Get total count
|
||||
total = db.query(ContextSnippet).count()
|
||||
|
||||
# Get paginated results, ordered by relevance and usage
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.order_by(ContextSnippet.relevance_score.desc(), ContextSnippet.usage_count.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return snippets, total
|
||||
|
||||
|
||||
def get_context_snippet_by_id(db: Session, snippet_id: UUID) -> ContextSnippet:
|
||||
"""
|
||||
Retrieve a single context snippet by its ID.
|
||||
|
||||
Automatically increments usage_count when snippet is retrieved.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
snippet_id: UUID of the context snippet to retrieve
|
||||
|
||||
Returns:
|
||||
ContextSnippet: The context snippet object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if context snippet not found
|
||||
"""
|
||||
snippet = db.query(ContextSnippet).filter(ContextSnippet.id == str(snippet_id)).first()
|
||||
|
||||
if not snippet:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ContextSnippet with ID {snippet_id} not found"
|
||||
)
|
||||
|
||||
# Increment usage count
|
||||
snippet.usage_count += 1
|
||||
db.commit()
|
||||
db.refresh(snippet)
|
||||
|
||||
return snippet
|
||||
|
||||
|
||||
def get_context_snippets_by_project(
|
||||
db: Session,
|
||||
project_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ContextSnippet], int]:
|
||||
"""
|
||||
Retrieve context snippets for a specific project.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of context snippets, total count)
|
||||
"""
|
||||
# Get total count for project
|
||||
total = db.query(ContextSnippet).filter(
|
||||
ContextSnippet.project_id == str(project_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.filter(ContextSnippet.project_id == str(project_id))
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return snippets, total
|
||||
|
||||
|
||||
def get_context_snippets_by_client(
|
||||
db: Session,
|
||||
client_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ContextSnippet], int]:
|
||||
"""
|
||||
Retrieve context snippets for a specific client.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
client_id: UUID of the client
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of context snippets, total count)
|
||||
"""
|
||||
# Get total count for client
|
||||
total = db.query(ContextSnippet).filter(
|
||||
ContextSnippet.client_id == str(client_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.filter(ContextSnippet.client_id == str(client_id))
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return snippets, total
|
||||
|
||||
|
||||
def get_context_snippets_by_tags(
|
||||
db: Session,
|
||||
tags: List[str],
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ContextSnippet], int]:
|
||||
"""
|
||||
Retrieve context snippets filtered by tags.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
tags: List of tags to filter by (OR logic - any tag matches)
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of context snippets, total count)
|
||||
"""
|
||||
# Build tag filters
|
||||
tag_filters = []
|
||||
for tag in tags:
|
||||
tag_filters.append(ContextSnippet.tags.contains(f'"{tag}"'))
|
||||
|
||||
# Get total count
|
||||
if tag_filters:
|
||||
total = db.query(ContextSnippet).filter(or_(*tag_filters)).count()
|
||||
else:
|
||||
total = 0
|
||||
|
||||
# Get paginated results
|
||||
if tag_filters:
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.filter(or_(*tag_filters))
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
else:
|
||||
snippets = []
|
||||
|
||||
return snippets, total
|
||||
|
||||
|
||||
def get_top_relevant_snippets(
|
||||
db: Session,
|
||||
limit: int = 10,
|
||||
min_relevance_score: float = 7.0
|
||||
) -> list[ContextSnippet]:
|
||||
"""
|
||||
Get the top most relevant context snippets.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
limit: Maximum number of snippets to return (default 10)
|
||||
min_relevance_score: Minimum relevance score threshold (default 7.0)
|
||||
|
||||
Returns:
|
||||
list: Top relevant context snippets
|
||||
"""
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.filter(ContextSnippet.relevance_score >= min_relevance_score)
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return snippets
|
||||
|
||||
|
||||
def create_context_snippet(
|
||||
db: Session,
|
||||
snippet_data: ContextSnippetCreate
|
||||
) -> ContextSnippet:
|
||||
"""
|
||||
Create a new context snippet.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
snippet_data: Context snippet creation data
|
||||
|
||||
Returns:
|
||||
ContextSnippet: The created context snippet object
|
||||
|
||||
Raises:
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
try:
|
||||
# Create new context snippet instance
|
||||
db_snippet = ContextSnippet(**snippet_data.model_dump())
|
||||
|
||||
# Add to database
|
||||
db.add(db_snippet)
|
||||
db.commit()
|
||||
db.refresh(db_snippet)
|
||||
|
||||
return db_snippet
|
||||
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to create context snippet: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def update_context_snippet(
|
||||
db: Session,
|
||||
snippet_id: UUID,
|
||||
snippet_data: ContextSnippetUpdate
|
||||
) -> ContextSnippet:
|
||||
"""
|
||||
Update an existing context snippet.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
snippet_id: UUID of the context snippet to update
|
||||
snippet_data: Context snippet update data
|
||||
|
||||
Returns:
|
||||
ContextSnippet: The updated context snippet object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if context snippet not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing snippet (without incrementing usage count)
|
||||
snippet = db.query(ContextSnippet).filter(ContextSnippet.id == str(snippet_id)).first()
|
||||
|
||||
if not snippet:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ContextSnippet with ID {snippet_id} not found"
|
||||
)
|
||||
|
||||
try:
|
||||
# Update only provided fields
|
||||
update_data = snippet_data.model_dump(exclude_unset=True)
|
||||
|
||||
# Apply updates
|
||||
for field, value in update_data.items():
|
||||
setattr(snippet, field, value)
|
||||
|
||||
db.commit()
|
||||
db.refresh(snippet)
|
||||
|
||||
return snippet
|
||||
|
||||
except HTTPException:
|
||||
db.rollback()
|
||||
raise
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to update context snippet: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def delete_context_snippet(db: Session, snippet_id: UUID) -> dict:
|
||||
"""
|
||||
Delete a context snippet by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
snippet_id: UUID of the context snippet to delete
|
||||
|
||||
Returns:
|
||||
dict: Success message
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if context snippet not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing snippet (without incrementing usage count)
|
||||
snippet = db.query(ContextSnippet).filter(ContextSnippet.id == str(snippet_id)).first()
|
||||
|
||||
if not snippet:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ContextSnippet with ID {snippet_id} not found"
|
||||
)
|
||||
|
||||
try:
|
||||
db.delete(snippet)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "ContextSnippet deleted successfully",
|
||||
"snippet_id": str(snippet_id)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to delete context snippet: {str(e)}"
|
||||
)
|
||||
@@ -1,340 +0,0 @@
|
||||
"""
|
||||
ConversationContext service layer for business logic and database operations.
|
||||
|
||||
Handles all database operations for conversation contexts, providing context
|
||||
recall and retrieval functionality for Claude's memory system.
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import HTTPException, status
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.models.conversation_context import ConversationContext
|
||||
from api.schemas.conversation_context import ConversationContextCreate, ConversationContextUpdate
|
||||
from api.utils.context_compression import format_for_injection
|
||||
|
||||
|
||||
def get_conversation_contexts(
|
||||
db: Session,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ConversationContext], int]:
|
||||
"""
|
||||
Retrieve a paginated list of conversation contexts.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip (for pagination)
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of conversation contexts, total count)
|
||||
"""
|
||||
# Get total count
|
||||
total = db.query(ConversationContext).count()
|
||||
|
||||
# Get paginated results, ordered by relevance and recency
|
||||
contexts = (
|
||||
db.query(ConversationContext)
|
||||
.order_by(ConversationContext.relevance_score.desc(), ConversationContext.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return contexts, total
|
||||
|
||||
|
||||
def get_conversation_context_by_id(db: Session, context_id: UUID) -> ConversationContext:
|
||||
"""
|
||||
Retrieve a single conversation context by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
context_id: UUID of the conversation context to retrieve
|
||||
|
||||
Returns:
|
||||
ConversationContext: The conversation context object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if conversation context not found
|
||||
"""
|
||||
context = db.query(ConversationContext).filter(ConversationContext.id == str(context_id)).first()
|
||||
|
||||
if not context:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ConversationContext with ID {context_id} not found"
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def get_conversation_contexts_by_project(
|
||||
db: Session,
|
||||
project_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ConversationContext], int]:
|
||||
"""
|
||||
Retrieve conversation contexts for a specific project.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of conversation contexts, total count)
|
||||
"""
|
||||
# Get total count for project
|
||||
total = db.query(ConversationContext).filter(
|
||||
ConversationContext.project_id == str(project_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
contexts = (
|
||||
db.query(ConversationContext)
|
||||
.filter(ConversationContext.project_id == str(project_id))
|
||||
.order_by(ConversationContext.relevance_score.desc(), ConversationContext.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return contexts, total
|
||||
|
||||
|
||||
def get_conversation_contexts_by_session(
|
||||
db: Session,
|
||||
session_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ConversationContext], int]:
|
||||
"""
|
||||
Retrieve conversation contexts for a specific session.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
session_id: UUID of the session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of conversation contexts, total count)
|
||||
"""
|
||||
# Get total count for session
|
||||
total = db.query(ConversationContext).filter(
|
||||
ConversationContext.session_id == str(session_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
contexts = (
|
||||
db.query(ConversationContext)
|
||||
.filter(ConversationContext.session_id == str(session_id))
|
||||
.order_by(ConversationContext.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return contexts, total
|
||||
|
||||
|
||||
def get_recall_context(
|
||||
db: Session,
|
||||
project_id: Optional[UUID] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
limit: int = 10,
|
||||
min_relevance_score: float = 5.0
|
||||
) -> str:
|
||||
"""
|
||||
Get relevant contexts formatted for Claude prompt injection.
|
||||
|
||||
This is the main context recall function that retrieves the most relevant
|
||||
contexts and formats them for efficient injection into Claude's prompt.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: Optional project ID to filter by
|
||||
tags: Optional list of tags to filter by
|
||||
limit: Maximum number of contexts to retrieve (default 10)
|
||||
min_relevance_score: Minimum relevance score threshold (default 5.0)
|
||||
|
||||
Returns:
|
||||
str: Token-efficient markdown string ready for prompt injection
|
||||
"""
|
||||
# Build query
|
||||
query = db.query(ConversationContext)
|
||||
|
||||
# Filter by project if specified
|
||||
if project_id:
|
||||
query = query.filter(ConversationContext.project_id == str(project_id))
|
||||
|
||||
# Filter by minimum relevance score
|
||||
query = query.filter(ConversationContext.relevance_score >= min_relevance_score)
|
||||
|
||||
# Filter by tags if specified
|
||||
if tags:
|
||||
# Check if any of the provided tags exist in the JSON tags field
|
||||
# This uses PostgreSQL's JSON operators
|
||||
tag_filters = []
|
||||
for tag in tags:
|
||||
tag_filters.append(ConversationContext.tags.contains(f'"{tag}"'))
|
||||
if tag_filters:
|
||||
query = query.filter(or_(*tag_filters))
|
||||
|
||||
# Order by relevance score and get top results
|
||||
contexts = query.order_by(
|
||||
ConversationContext.relevance_score.desc()
|
||||
).limit(limit).all()
|
||||
|
||||
# Convert to dictionary format for formatting
|
||||
context_dicts = []
|
||||
for ctx in contexts:
|
||||
context_dict = {
|
||||
"content": ctx.dense_summary or ctx.title,
|
||||
"type": ctx.context_type,
|
||||
"tags": json.loads(ctx.tags) if ctx.tags else [],
|
||||
"relevance_score": ctx.relevance_score
|
||||
}
|
||||
context_dicts.append(context_dict)
|
||||
|
||||
# Use compression utility to format for injection
|
||||
return format_for_injection(context_dicts)
|
||||
|
||||
|
||||
def create_conversation_context(
|
||||
db: Session,
|
||||
context_data: ConversationContextCreate
|
||||
) -> ConversationContext:
|
||||
"""
|
||||
Create a new conversation context.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
context_data: Conversation context creation data
|
||||
|
||||
Returns:
|
||||
ConversationContext: The created conversation context object
|
||||
|
||||
Raises:
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
try:
|
||||
# Create new conversation context instance
|
||||
db_context = ConversationContext(**context_data.model_dump())
|
||||
|
||||
# Add to database
|
||||
db.add(db_context)
|
||||
db.commit()
|
||||
db.refresh(db_context)
|
||||
|
||||
return db_context
|
||||
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to create conversation context: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def update_conversation_context(
|
||||
db: Session,
|
||||
context_id: UUID,
|
||||
context_data: ConversationContextUpdate
|
||||
) -> ConversationContext:
|
||||
"""
|
||||
Update an existing conversation context.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
context_id: UUID of the conversation context to update
|
||||
context_data: Conversation context update data
|
||||
|
||||
Returns:
|
||||
ConversationContext: The updated conversation context object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if conversation context not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing context
|
||||
context = get_conversation_context_by_id(db, context_id)
|
||||
|
||||
try:
|
||||
# Update only provided fields
|
||||
update_data = context_data.model_dump(exclude_unset=True)
|
||||
|
||||
# Apply updates
|
||||
for field, value in update_data.items():
|
||||
setattr(context, field, value)
|
||||
|
||||
db.commit()
|
||||
db.refresh(context)
|
||||
|
||||
return context
|
||||
|
||||
except HTTPException:
|
||||
db.rollback()
|
||||
raise
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to update conversation context: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def delete_conversation_context(db: Session, context_id: UUID) -> dict:
|
||||
"""
|
||||
Delete a conversation context by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
context_id: UUID of the conversation context to delete
|
||||
|
||||
Returns:
|
||||
dict: Success message
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if conversation context not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing context (raises 404 if not found)
|
||||
context = get_conversation_context_by_id(db, context_id)
|
||||
|
||||
try:
|
||||
db.delete(context)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "ConversationContext deleted successfully",
|
||||
"context_id": str(context_id)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to delete conversation context: {str(e)}"
|
||||
)
|
||||
@@ -1,318 +0,0 @@
|
||||
"""
|
||||
DecisionLog service layer for business logic and database operations.
|
||||
|
||||
Handles all database operations for decision logs, tracking important
|
||||
decisions made during work for future reference.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import HTTPException, status
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.models.decision_log import DecisionLog
|
||||
from api.schemas.decision_log import DecisionLogCreate, DecisionLogUpdate
|
||||
|
||||
|
||||
def get_decision_logs(
|
||||
db: Session,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[DecisionLog], int]:
|
||||
"""
|
||||
Retrieve a paginated list of decision logs.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip (for pagination)
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of decision logs, total count)
|
||||
"""
|
||||
# Get total count
|
||||
total = db.query(DecisionLog).count()
|
||||
|
||||
# Get paginated results, ordered by most recent first
|
||||
logs = (
|
||||
db.query(DecisionLog)
|
||||
.order_by(DecisionLog.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return logs, total
|
||||
|
||||
|
||||
def get_decision_log_by_id(db: Session, log_id: UUID) -> DecisionLog:
|
||||
"""
|
||||
Retrieve a single decision log by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
log_id: UUID of the decision log to retrieve
|
||||
|
||||
Returns:
|
||||
DecisionLog: The decision log object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if decision log not found
|
||||
"""
|
||||
log = db.query(DecisionLog).filter(DecisionLog.id == str(log_id)).first()
|
||||
|
||||
if not log:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"DecisionLog with ID {log_id} not found"
|
||||
)
|
||||
|
||||
return log
|
||||
|
||||
|
||||
def get_decision_logs_by_project(
|
||||
db: Session,
|
||||
project_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[DecisionLog], int]:
|
||||
"""
|
||||
Retrieve decision logs for a specific project.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of decision logs, total count)
|
||||
"""
|
||||
# Get total count for project
|
||||
total = db.query(DecisionLog).filter(
|
||||
DecisionLog.project_id == str(project_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
logs = (
|
||||
db.query(DecisionLog)
|
||||
.filter(DecisionLog.project_id == str(project_id))
|
||||
.order_by(DecisionLog.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return logs, total
|
||||
|
||||
|
||||
def get_decision_logs_by_session(
|
||||
db: Session,
|
||||
session_id: UUID,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[DecisionLog], int]:
|
||||
"""
|
||||
Retrieve decision logs for a specific session.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
session_id: UUID of the session
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of decision logs, total count)
|
||||
"""
|
||||
# Get total count for session
|
||||
total = db.query(DecisionLog).filter(
|
||||
DecisionLog.session_id == str(session_id)
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
logs = (
|
||||
db.query(DecisionLog)
|
||||
.filter(DecisionLog.session_id == str(session_id))
|
||||
.order_by(DecisionLog.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return logs, total
|
||||
|
||||
|
||||
def get_decision_logs_by_impact(
|
||||
db: Session,
|
||||
impact: str,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[DecisionLog], int]:
|
||||
"""
|
||||
Retrieve decision logs filtered by impact level.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
impact: Impact level (low, medium, high, critical)
|
||||
skip: Number of records to skip
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of decision logs, total count)
|
||||
"""
|
||||
# Validate impact level
|
||||
valid_impacts = ["low", "medium", "high", "critical"]
|
||||
if impact.lower() not in valid_impacts:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid impact level. Must be one of: {', '.join(valid_impacts)}"
|
||||
)
|
||||
|
||||
# Get total count for impact
|
||||
total = db.query(DecisionLog).filter(
|
||||
DecisionLog.impact == impact.lower()
|
||||
).count()
|
||||
|
||||
# Get paginated results
|
||||
logs = (
|
||||
db.query(DecisionLog)
|
||||
.filter(DecisionLog.impact == impact.lower())
|
||||
.order_by(DecisionLog.created_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return logs, total
|
||||
|
||||
|
||||
def create_decision_log(
|
||||
db: Session,
|
||||
log_data: DecisionLogCreate
|
||||
) -> DecisionLog:
|
||||
"""
|
||||
Create a new decision log.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
log_data: Decision log creation data
|
||||
|
||||
Returns:
|
||||
DecisionLog: The created decision log object
|
||||
|
||||
Raises:
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
try:
|
||||
# Create new decision log instance
|
||||
db_log = DecisionLog(**log_data.model_dump())
|
||||
|
||||
# Add to database
|
||||
db.add(db_log)
|
||||
db.commit()
|
||||
db.refresh(db_log)
|
||||
|
||||
return db_log
|
||||
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to create decision log: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def update_decision_log(
|
||||
db: Session,
|
||||
log_id: UUID,
|
||||
log_data: DecisionLogUpdate
|
||||
) -> DecisionLog:
|
||||
"""
|
||||
Update an existing decision log.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
log_id: UUID of the decision log to update
|
||||
log_data: Decision log update data
|
||||
|
||||
Returns:
|
||||
DecisionLog: The updated decision log object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if decision log not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing log
|
||||
log = get_decision_log_by_id(db, log_id)
|
||||
|
||||
try:
|
||||
# Update only provided fields
|
||||
update_data = log_data.model_dump(exclude_unset=True)
|
||||
|
||||
# Apply updates
|
||||
for field, value in update_data.items():
|
||||
setattr(log, field, value)
|
||||
|
||||
db.commit()
|
||||
db.refresh(log)
|
||||
|
||||
return log
|
||||
|
||||
except HTTPException:
|
||||
db.rollback()
|
||||
raise
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to update decision log: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def delete_decision_log(db: Session, log_id: UUID) -> dict:
|
||||
"""
|
||||
Delete a decision log by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
log_id: UUID of the decision log to delete
|
||||
|
||||
Returns:
|
||||
dict: Success message
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if decision log not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing log (raises 404 if not found)
|
||||
log = get_decision_log_by_id(db, log_id)
|
||||
|
||||
try:
|
||||
db.delete(log)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "DecisionLog deleted successfully",
|
||||
"log_id": str(log_id)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to delete decision log: {str(e)}"
|
||||
)
|
||||
@@ -1,273 +0,0 @@
|
||||
"""
|
||||
ProjectState service layer for business logic and database operations.
|
||||
|
||||
Handles all database operations for project states, tracking the current
|
||||
state of projects for quick context retrieval.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import HTTPException, status
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from api.models.project_state import ProjectState
|
||||
from api.schemas.project_state import ProjectStateCreate, ProjectStateUpdate
|
||||
from api.utils.context_compression import compress_project_state
|
||||
|
||||
|
||||
def get_project_states(
|
||||
db: Session,
|
||||
skip: int = 0,
|
||||
limit: int = 100
|
||||
) -> tuple[list[ProjectState], int]:
|
||||
"""
|
||||
Retrieve a paginated list of project states.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
skip: Number of records to skip (for pagination)
|
||||
limit: Maximum number of records to return
|
||||
|
||||
Returns:
|
||||
tuple: (list of project states, total count)
|
||||
"""
|
||||
# Get total count
|
||||
total = db.query(ProjectState).count()
|
||||
|
||||
# Get paginated results, ordered by most recently updated
|
||||
states = (
|
||||
db.query(ProjectState)
|
||||
.order_by(ProjectState.updated_at.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return states, total
|
||||
|
||||
|
||||
def get_project_state_by_id(db: Session, state_id: UUID) -> ProjectState:
|
||||
"""
|
||||
Retrieve a single project state by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
state_id: UUID of the project state to retrieve
|
||||
|
||||
Returns:
|
||||
ProjectState: The project state object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if project state not found
|
||||
"""
|
||||
state = db.query(ProjectState).filter(ProjectState.id == str(state_id)).first()
|
||||
|
||||
if not state:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"ProjectState with ID {state_id} not found"
|
||||
)
|
||||
|
||||
return state
|
||||
|
||||
|
||||
def get_project_state_by_project(db: Session, project_id: UUID) -> Optional[ProjectState]:
|
||||
"""
|
||||
Retrieve the project state for a specific project.
|
||||
|
||||
Each project has exactly one project state (unique constraint).
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project
|
||||
|
||||
Returns:
|
||||
Optional[ProjectState]: The project state if found, None otherwise
|
||||
"""
|
||||
state = db.query(ProjectState).filter(ProjectState.project_id == str(project_id)).first()
|
||||
return state
|
||||
|
||||
|
||||
def create_project_state(
|
||||
db: Session,
|
||||
state_data: ProjectStateCreate
|
||||
) -> ProjectState:
|
||||
"""
|
||||
Create a new project state.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
state_data: Project state creation data
|
||||
|
||||
Returns:
|
||||
ProjectState: The created project state object
|
||||
|
||||
Raises:
|
||||
HTTPException: 409 if project state already exists for this project
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Check if project state already exists for this project
|
||||
existing_state = get_project_state_by_project(db, state_data.project_id)
|
||||
if existing_state:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"ProjectState for project ID {state_data.project_id} already exists"
|
||||
)
|
||||
|
||||
try:
|
||||
# Create new project state instance
|
||||
db_state = ProjectState(**state_data.model_dump())
|
||||
|
||||
# Add to database
|
||||
db.add(db_state)
|
||||
db.commit()
|
||||
db.refresh(db_state)
|
||||
|
||||
return db_state
|
||||
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
if "project_id" in str(e.orig):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"ProjectState for project ID {state_data.project_id} already exists"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to create project state: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def update_project_state(
|
||||
db: Session,
|
||||
state_id: UUID,
|
||||
state_data: ProjectStateUpdate
|
||||
) -> ProjectState:
|
||||
"""
|
||||
Update an existing project state.
|
||||
|
||||
Uses compression utilities when updating to maintain efficient storage.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
state_id: UUID of the project state to update
|
||||
state_data: Project state update data
|
||||
|
||||
Returns:
|
||||
ProjectState: The updated project state object
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if project state not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing state
|
||||
state = get_project_state_by_id(db, state_id)
|
||||
|
||||
try:
|
||||
# Update only provided fields
|
||||
update_data = state_data.model_dump(exclude_unset=True)
|
||||
|
||||
# Apply updates
|
||||
for field, value in update_data.items():
|
||||
setattr(state, field, value)
|
||||
|
||||
db.commit()
|
||||
db.refresh(state)
|
||||
|
||||
return state
|
||||
|
||||
except HTTPException:
|
||||
db.rollback()
|
||||
raise
|
||||
except IntegrityError as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Database error: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to update project state: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def update_project_state_by_project(
|
||||
db: Session,
|
||||
project_id: UUID,
|
||||
state_data: ProjectStateUpdate
|
||||
) -> ProjectState:
|
||||
"""
|
||||
Update project state by project ID (convenience method).
|
||||
|
||||
If project state doesn't exist, creates a new one.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
project_id: UUID of the project
|
||||
state_data: Project state update data
|
||||
|
||||
Returns:
|
||||
ProjectState: The updated or created project state object
|
||||
|
||||
Raises:
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Try to get existing state
|
||||
state = get_project_state_by_project(db, project_id)
|
||||
|
||||
if state:
|
||||
# Update existing state
|
||||
return update_project_state(db, UUID(state.id), state_data)
|
||||
else:
|
||||
# Create new state
|
||||
create_data = ProjectStateCreate(
|
||||
project_id=project_id,
|
||||
**state_data.model_dump(exclude_unset=True)
|
||||
)
|
||||
return create_project_state(db, create_data)
|
||||
|
||||
|
||||
def delete_project_state(db: Session, state_id: UUID) -> dict:
|
||||
"""
|
||||
Delete a project state by its ID.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
state_id: UUID of the project state to delete
|
||||
|
||||
Returns:
|
||||
dict: Success message
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if project state not found
|
||||
HTTPException: 500 if database error occurs
|
||||
"""
|
||||
# Get existing state (raises 404 if not found)
|
||||
state = get_project_state_by_id(db, state_id)
|
||||
|
||||
try:
|
||||
db.delete(state)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "ProjectState deleted successfully",
|
||||
"state_id": str(state_id)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to delete project state: {str(e)}"
|
||||
)
|
||||
@@ -1,554 +0,0 @@
|
||||
# Context Compression Utilities - Usage Examples
|
||||
|
||||
Complete examples for all context compression functions in ClaudeTools Context Recall System.
|
||||
|
||||
## 1. compress_conversation_summary()
|
||||
|
||||
Compresses conversations into dense JSON with key points.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import compress_conversation_summary
|
||||
|
||||
# Example 1: From message list
|
||||
messages = [
|
||||
{"role": "user", "content": "Build authentication system with JWT"},
|
||||
{"role": "assistant", "content": "Completed auth endpoints. Using FastAPI for async support."},
|
||||
{"role": "user", "content": "Now add CRUD endpoints for users"},
|
||||
{"role": "assistant", "content": "Working on user CRUD. Blocker: need to decide on pagination approach."}
|
||||
]
|
||||
|
||||
summary = compress_conversation_summary(messages)
|
||||
print(summary)
|
||||
# Output:
|
||||
# {
|
||||
# "phase": "api_development",
|
||||
# "completed": ["auth endpoints"],
|
||||
# "in_progress": "user crud",
|
||||
# "blockers": ["need to decide on pagination approach"],
|
||||
# "decisions": [{
|
||||
# "decision": "use fastapi",
|
||||
# "rationale": "async support",
|
||||
# "impact": "medium",
|
||||
# "timestamp": "2026-01-16T..."
|
||||
# }],
|
||||
# "next": ["add crud endpoints"]
|
||||
# }
|
||||
|
||||
# Example 2: From raw text
|
||||
text = """
|
||||
Completed:
|
||||
- Authentication system with JWT
|
||||
- Database migrations
|
||||
- User model
|
||||
|
||||
Currently working on: API rate limiting
|
||||
|
||||
Blockers:
|
||||
- Need Redis for rate limiting store
|
||||
- Waiting on DevOps for Redis instance
|
||||
|
||||
Next steps:
|
||||
- Implement rate limiting middleware
|
||||
- Add API documentation
|
||||
- Set up monitoring
|
||||
"""
|
||||
|
||||
summary = compress_conversation_summary(text)
|
||||
print(summary)
|
||||
# Extracts phase, completed items, blockers, next actions
|
||||
```
|
||||
|
||||
## 2. create_context_snippet()
|
||||
|
||||
Creates structured snippets with auto-extracted tags.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import create_context_snippet
|
||||
|
||||
# Example 1: Decision snippet
|
||||
snippet = create_context_snippet(
|
||||
content="Using FastAPI instead of Flask for async support and better performance",
|
||||
snippet_type="decision",
|
||||
importance=8
|
||||
)
|
||||
print(snippet)
|
||||
# Output:
|
||||
# {
|
||||
# "content": "Using FastAPI instead of Flask for async support and better performance",
|
||||
# "type": "decision",
|
||||
# "tags": ["decision", "fastapi", "async", "api"],
|
||||
# "importance": 8,
|
||||
# "relevance_score": 8.0,
|
||||
# "created_at": "2026-01-16T12:00:00+00:00",
|
||||
# "usage_count": 0,
|
||||
# "last_used": None
|
||||
# }
|
||||
|
||||
# Example 2: Pattern snippet
|
||||
snippet = create_context_snippet(
|
||||
content="Always use dependency injection for database sessions to ensure proper cleanup",
|
||||
snippet_type="pattern",
|
||||
importance=7
|
||||
)
|
||||
# Tags auto-extracted: ["pattern", "dependency-injection", "database"]
|
||||
|
||||
# Example 3: Blocker snippet
|
||||
snippet = create_context_snippet(
|
||||
content="PostgreSQL connection pool exhausted under load - need to tune max_connections",
|
||||
snippet_type="blocker",
|
||||
importance=9
|
||||
)
|
||||
# Tags: ["blocker", "postgresql", "database", "critical"]
|
||||
```
|
||||
|
||||
## 3. compress_project_state()
|
||||
|
||||
Compresses project state into dense summary.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import compress_project_state
|
||||
|
||||
project_details = {
|
||||
"name": "ClaudeTools Context Recall System",
|
||||
"phase": "api_development",
|
||||
"progress_pct": 65,
|
||||
"blockers": ["Need Redis setup", "Waiting on security review"],
|
||||
"next_actions": ["Deploy to staging", "Load testing", "Documentation"]
|
||||
}
|
||||
|
||||
current_work = "Implementing context compression utilities for token efficiency"
|
||||
|
||||
files_changed = [
|
||||
"api/utils/context_compression.py",
|
||||
"api/utils/__init__.py",
|
||||
"tests/test_context_compression.py",
|
||||
"migrations/versions/add_context_recall.py"
|
||||
]
|
||||
|
||||
state = compress_project_state(project_details, current_work, files_changed)
|
||||
print(state)
|
||||
# Output:
|
||||
# {
|
||||
# "project": "ClaudeTools Context Recall System",
|
||||
# "phase": "api_development",
|
||||
# "progress": 65,
|
||||
# "current": "Implementing context compression utilities for token efficiency",
|
||||
# "files": [
|
||||
# {"path": "api/utils/context_compression.py", "type": "impl"},
|
||||
# {"path": "api/utils/__init__.py", "type": "impl"},
|
||||
# {"path": "tests/test_context_compression.py", "type": "test"},
|
||||
# {"path": "migrations/versions/add_context_recall.py", "type": "migration"}
|
||||
# ],
|
||||
# "blockers": ["Need Redis setup", "Waiting on security review"],
|
||||
# "next": ["Deploy to staging", "Load testing", "Documentation"]
|
||||
# }
|
||||
```
|
||||
|
||||
## 4. extract_key_decisions()
|
||||
|
||||
Extracts decisions with rationale from text.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import extract_key_decisions
|
||||
|
||||
text = """
|
||||
We decided to use FastAPI for the API framework because it provides native async
|
||||
support and automatic OpenAPI documentation generation.
|
||||
|
||||
Chose PostgreSQL for the database due to its robust JSON support and excellent
|
||||
performance with complex queries.
|
||||
|
||||
Will use Redis for caching because it's fast and integrates well with our stack.
|
||||
"""
|
||||
|
||||
decisions = extract_key_decisions(text)
|
||||
print(decisions)
|
||||
# Output:
|
||||
# [
|
||||
# {
|
||||
# "decision": "use fastapi for the api framework",
|
||||
# "rationale": "it provides native async support and automatic openapi documentation",
|
||||
# "impact": "high",
|
||||
# "timestamp": "2026-01-16T12:00:00+00:00"
|
||||
# },
|
||||
# {
|
||||
# "decision": "postgresql for the database",
|
||||
# "rationale": "its robust json support and excellent performance with complex queries",
|
||||
# "impact": "high",
|
||||
# "timestamp": "2026-01-16T12:00:00+00:00"
|
||||
# },
|
||||
# {
|
||||
# "decision": "redis for caching",
|
||||
# "rationale": "it's fast and integrates well with our stack",
|
||||
# "impact": "medium",
|
||||
# "timestamp": "2026-01-16T12:00:00+00:00"
|
||||
# }
|
||||
# ]
|
||||
```
|
||||
|
||||
## 5. calculate_relevance_score()
|
||||
|
||||
Calculates relevance score with time decay and usage boost.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import calculate_relevance_score
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
# Example 1: Recent, important snippet
|
||||
snippet = {
|
||||
"created_at": datetime.now(timezone.utc).isoformat(),
|
||||
"usage_count": 3,
|
||||
"importance": 8,
|
||||
"tags": ["critical", "security", "api"],
|
||||
"last_used": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
|
||||
score = calculate_relevance_score(snippet)
|
||||
print(f"Score: {score}") # ~11.1 (8 base + 0.6 usage + 1.5 tags + 1.0 recent)
|
||||
|
||||
# Example 2: Old, unused snippet
|
||||
old_snippet = {
|
||||
"created_at": (datetime.now(timezone.utc) - timedelta(days=30)).isoformat(),
|
||||
"usage_count": 0,
|
||||
"importance": 5,
|
||||
"tags": ["general"]
|
||||
}
|
||||
|
||||
score = calculate_relevance_score(old_snippet)
|
||||
print(f"Score: {score}") # ~3.0 (5 base - 2.0 time decay)
|
||||
|
||||
# Example 3: Frequently used pattern
|
||||
pattern_snippet = {
|
||||
"created_at": (datetime.now(timezone.utc) - timedelta(days=7)).isoformat(),
|
||||
"usage_count": 10,
|
||||
"importance": 7,
|
||||
"tags": ["pattern", "architecture"],
|
||||
"last_used": (datetime.now(timezone.utc) - timedelta(hours=2)).isoformat()
|
||||
}
|
||||
|
||||
score = calculate_relevance_score(pattern_snippet)
|
||||
print(f"Score: {score}") # ~9.3 (7 base - 0.7 decay + 2.0 usage + 0.0 tags + 1.0 recent)
|
||||
```
|
||||
|
||||
## 6. merge_contexts()
|
||||
|
||||
Merges multiple contexts with deduplication.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import merge_contexts
|
||||
|
||||
context1 = {
|
||||
"phase": "api_development",
|
||||
"completed": ["auth", "user_crud"],
|
||||
"in_progress": "rate_limiting",
|
||||
"blockers": ["need_redis"],
|
||||
"decisions": [{
|
||||
"decision": "use fastapi",
|
||||
"timestamp": "2026-01-15T10:00:00Z"
|
||||
}],
|
||||
"next": ["deploy"],
|
||||
"tags": ["api", "fastapi"]
|
||||
}
|
||||
|
||||
context2 = {
|
||||
"phase": "api_development",
|
||||
"completed": ["auth", "user_crud", "validation"],
|
||||
"in_progress": "testing",
|
||||
"blockers": [],
|
||||
"decisions": [{
|
||||
"decision": "use pydantic",
|
||||
"timestamp": "2026-01-16T10:00:00Z"
|
||||
}],
|
||||
"next": ["deploy", "monitoring"],
|
||||
"tags": ["api", "testing"]
|
||||
}
|
||||
|
||||
context3 = {
|
||||
"phase": "testing",
|
||||
"completed": ["unit_tests"],
|
||||
"files": ["tests/test_api.py", "tests/test_auth.py"],
|
||||
"tags": ["testing", "pytest"]
|
||||
}
|
||||
|
||||
merged = merge_contexts([context1, context2, context3])
|
||||
print(merged)
|
||||
# Output:
|
||||
# {
|
||||
# "phase": "api_development", # First non-null
|
||||
# "completed": ["auth", "unit_tests", "user_crud", "validation"], # Deduplicated, sorted
|
||||
# "in_progress": "testing", # Most recent
|
||||
# "blockers": ["need_redis"],
|
||||
# "decisions": [
|
||||
# {"decision": "use pydantic", "timestamp": "2026-01-16T10:00:00Z"}, # Newest first
|
||||
# {"decision": "use fastapi", "timestamp": "2026-01-15T10:00:00Z"}
|
||||
# ],
|
||||
# "next": ["deploy", "monitoring"],
|
||||
# "files": ["tests/test_api.py", "tests/test_auth.py"],
|
||||
# "tags": ["api", "fastapi", "pytest", "testing"]
|
||||
# }
|
||||
```
|
||||
|
||||
## 7. format_for_injection()
|
||||
|
||||
Formats contexts for token-efficient prompt injection.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import format_for_injection
|
||||
|
||||
contexts = [
|
||||
{
|
||||
"type": "blocker",
|
||||
"content": "Redis connection failing in production - needs debugging",
|
||||
"tags": ["redis", "production", "critical"],
|
||||
"relevance_score": 9.5
|
||||
},
|
||||
{
|
||||
"type": "decision",
|
||||
"content": "Using FastAPI for async support and auto-documentation",
|
||||
"tags": ["fastapi", "architecture"],
|
||||
"relevance_score": 8.2
|
||||
},
|
||||
{
|
||||
"type": "pattern",
|
||||
"content": "Always use dependency injection for DB sessions",
|
||||
"tags": ["pattern", "database"],
|
||||
"relevance_score": 7.8
|
||||
},
|
||||
{
|
||||
"type": "state",
|
||||
"content": "Currently at 65% completion of API development phase",
|
||||
"tags": ["progress", "api"],
|
||||
"relevance_score": 7.0
|
||||
}
|
||||
]
|
||||
|
||||
# Format with default token limit
|
||||
prompt = format_for_injection(contexts, max_tokens=500)
|
||||
print(prompt)
|
||||
# Output:
|
||||
# ## Context Recall
|
||||
#
|
||||
# **Blockers:**
|
||||
# - Redis connection failing in production - needs debugging [redis, production, critical]
|
||||
#
|
||||
# **Decisions:**
|
||||
# - Using FastAPI for async support and auto-documentation [fastapi, architecture]
|
||||
#
|
||||
# **Patterns:**
|
||||
# - Always use dependency injection for DB sessions [pattern, database]
|
||||
#
|
||||
# **States:**
|
||||
# - Currently at 65% completion of API development phase [progress, api]
|
||||
#
|
||||
# *4 contexts loaded*
|
||||
|
||||
# Format with tight token limit
|
||||
compact_prompt = format_for_injection(contexts, max_tokens=200)
|
||||
print(compact_prompt)
|
||||
# Only includes highest priority items within token budget
|
||||
```
|
||||
|
||||
## 8. extract_tags_from_text()
|
||||
|
||||
Auto-extracts relevant tags from text.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import extract_tags_from_text
|
||||
|
||||
# Example 1: Technology detection
|
||||
text1 = "Implementing authentication using FastAPI with PostgreSQL database and Redis caching"
|
||||
tags = extract_tags_from_text(text1)
|
||||
print(tags) # ["fastapi", "postgresql", "redis", "database", "api", "auth", "cache"]
|
||||
|
||||
# Example 2: Pattern detection
|
||||
text2 = "Refactoring async error handling middleware to optimize performance"
|
||||
tags = extract_tags_from_text(text2)
|
||||
print(tags) # ["async", "middleware", "error-handling", "optimization", "refactor"]
|
||||
|
||||
# Example 3: Category detection
|
||||
text3 = "Critical bug in production: database connection pool exhausted causing system blocker"
|
||||
tags = extract_tags_from_text(text3)
|
||||
print(tags) # ["database", "critical", "blocker", "bug"]
|
||||
|
||||
# Example 4: Mixed content
|
||||
text4 = """
|
||||
Building CRUD endpoints with FastAPI and SQLAlchemy.
|
||||
Using dependency injection pattern for database sessions.
|
||||
Need to add validation with Pydantic.
|
||||
Testing with pytest.
|
||||
"""
|
||||
tags = extract_tags_from_text(text4)
|
||||
print(tags)
|
||||
# ["fastapi", "sqlalchemy", "api", "database", "crud", "dependency-injection",
|
||||
# "validation", "testing"]
|
||||
```
|
||||
|
||||
## 9. compress_file_changes()
|
||||
|
||||
Compresses file change lists.
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import compress_file_changes
|
||||
|
||||
files = [
|
||||
"api/routes/auth.py",
|
||||
"api/routes/users.py",
|
||||
"api/models/user.py",
|
||||
"api/schemas/user.py",
|
||||
"tests/test_auth.py",
|
||||
"tests/test_users.py",
|
||||
"migrations/versions/001_add_users.py",
|
||||
"docker-compose.yml",
|
||||
"README.md",
|
||||
"requirements.txt"
|
||||
]
|
||||
|
||||
compressed = compress_file_changes(files)
|
||||
print(compressed)
|
||||
# Output:
|
||||
# [
|
||||
# {"path": "api/routes/auth.py", "type": "api"},
|
||||
# {"path": "api/routes/users.py", "type": "api"},
|
||||
# {"path": "api/models/user.py", "type": "schema"},
|
||||
# {"path": "api/schemas/user.py", "type": "schema"},
|
||||
# {"path": "tests/test_auth.py", "type": "test"},
|
||||
# {"path": "tests/test_users.py", "type": "test"},
|
||||
# {"path": "migrations/versions/001_add_users.py", "type": "migration"},
|
||||
# {"path": "docker-compose.yml", "type": "infra"},
|
||||
# {"path": "README.md", "type": "doc"},
|
||||
# {"path": "requirements.txt", "type": "config"}
|
||||
# ]
|
||||
```
|
||||
|
||||
## Complete Workflow Example
|
||||
|
||||
Here's a complete example showing how these functions work together:
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import (
|
||||
compress_conversation_summary,
|
||||
create_context_snippet,
|
||||
compress_project_state,
|
||||
merge_contexts,
|
||||
format_for_injection,
|
||||
calculate_relevance_score
|
||||
)
|
||||
|
||||
# 1. Compress ongoing conversation
|
||||
conversation = [
|
||||
{"role": "user", "content": "Build API with FastAPI and PostgreSQL"},
|
||||
{"role": "assistant", "content": "Completed auth system. Now working on CRUD endpoints."}
|
||||
]
|
||||
conv_summary = compress_conversation_summary(conversation)
|
||||
|
||||
# 2. Create snippets for important info
|
||||
decision_snippet = create_context_snippet(
|
||||
"Using FastAPI for async support",
|
||||
snippet_type="decision",
|
||||
importance=8
|
||||
)
|
||||
|
||||
blocker_snippet = create_context_snippet(
|
||||
"Need Redis for rate limiting",
|
||||
snippet_type="blocker",
|
||||
importance=9
|
||||
)
|
||||
|
||||
# 3. Compress project state
|
||||
project_state = compress_project_state(
|
||||
project_details={"name": "API", "phase": "development", "progress_pct": 60},
|
||||
current_work="Building CRUD endpoints",
|
||||
files_changed=["api/routes/users.py", "tests/test_users.py"]
|
||||
)
|
||||
|
||||
# 4. Merge all contexts
|
||||
all_contexts = [conv_summary, project_state]
|
||||
merged = merge_contexts(all_contexts)
|
||||
|
||||
# 5. Prepare snippets with relevance scores
|
||||
snippets = [decision_snippet, blocker_snippet]
|
||||
for snippet in snippets:
|
||||
snippet["relevance_score"] = calculate_relevance_score(snippet)
|
||||
|
||||
# Sort by relevance
|
||||
snippets.sort(key=lambda s: s["relevance_score"], reverse=True)
|
||||
|
||||
# 6. Format for prompt injection
|
||||
context_prompt = format_for_injection(snippets, max_tokens=300)
|
||||
|
||||
print("=" * 60)
|
||||
print("CONTEXT READY FOR CLAUDE:")
|
||||
print("=" * 60)
|
||||
print(context_prompt)
|
||||
# This prompt can now be injected into Claude's context
|
||||
```
|
||||
|
||||
## Integration with Database
|
||||
|
||||
Example of using these utilities with SQLAlchemy models:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
from api.models.context_recall import ContextSnippet
|
||||
from api.utils.context_compression import (
|
||||
create_context_snippet,
|
||||
calculate_relevance_score,
|
||||
format_for_injection
|
||||
)
|
||||
|
||||
def save_context(db: Session, content: str, snippet_type: str, importance: int):
|
||||
"""Save context snippet to database"""
|
||||
snippet = create_context_snippet(content, snippet_type, importance)
|
||||
|
||||
db_snippet = ContextSnippet(
|
||||
content=snippet["content"],
|
||||
type=snippet["type"],
|
||||
tags=snippet["tags"],
|
||||
importance=snippet["importance"],
|
||||
relevance_score=snippet["relevance_score"]
|
||||
)
|
||||
db.add(db_snippet)
|
||||
db.commit()
|
||||
return db_snippet
|
||||
|
||||
def load_relevant_contexts(db: Session, limit: int = 20):
|
||||
"""Load and format most relevant contexts"""
|
||||
snippets = (
|
||||
db.query(ContextSnippet)
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Convert to dicts and recalculate scores
|
||||
context_dicts = []
|
||||
for snippet in snippets:
|
||||
ctx = {
|
||||
"content": snippet.content,
|
||||
"type": snippet.type,
|
||||
"tags": snippet.tags,
|
||||
"importance": snippet.importance,
|
||||
"created_at": snippet.created_at.isoformat(),
|
||||
"usage_count": snippet.usage_count,
|
||||
"last_used": snippet.last_used.isoformat() if snippet.last_used else None
|
||||
}
|
||||
ctx["relevance_score"] = calculate_relevance_score(ctx)
|
||||
context_dicts.append(ctx)
|
||||
|
||||
# Sort by updated relevance score
|
||||
context_dicts.sort(key=lambda c: c["relevance_score"], reverse=True)
|
||||
|
||||
# Format for injection
|
||||
return format_for_injection(context_dicts, max_tokens=1000)
|
||||
```
|
||||
|
||||
## Token Efficiency Stats
|
||||
|
||||
These utilities achieve significant token compression:
|
||||
|
||||
- Raw conversation (500 tokens) → Compressed summary (50-80 tokens) = **85-90% reduction**
|
||||
- Full project state (1000 tokens) → Compressed state (100-150 tokens) = **85-90% reduction**
|
||||
- Multiple contexts merged → Deduplicated = **30-50% reduction**
|
||||
- Formatted injection → Only relevant info = **60-80% reduction**
|
||||
|
||||
**Overall pipeline efficiency: 90-95% token reduction while preserving critical information.**
|
||||
@@ -1,228 +0,0 @@
|
||||
# Context Compression - Quick Reference
|
||||
|
||||
**Location:** `D:\ClaudeTools\api\utils\context_compression.py`
|
||||
|
||||
## Quick Import
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import *
|
||||
# or
|
||||
from api.utils import compress_conversation_summary, create_context_snippet, format_for_injection
|
||||
```
|
||||
|
||||
## Function Quick Reference
|
||||
|
||||
| Function | Input | Output | Token Reduction |
|
||||
|----------|-------|--------|-----------------|
|
||||
| `compress_conversation_summary(conversation)` | str or list[dict] | Dense JSON summary | 85-90% |
|
||||
| `create_context_snippet(content, type, importance)` | str, str, int | Structured snippet | N/A |
|
||||
| `compress_project_state(details, work, files)` | dict, str, list | Dense state | 85-90% |
|
||||
| `extract_key_decisions(text)` | str | list[dict] | N/A |
|
||||
| `calculate_relevance_score(snippet, time)` | dict, datetime | float (0-10) | N/A |
|
||||
| `merge_contexts(contexts)` | list[dict] | Merged dict | 30-50% |
|
||||
| `format_for_injection(contexts, max_tokens)` | list[dict], int | Markdown str | 60-80% |
|
||||
| `extract_tags_from_text(text)` | str | list[str] | N/A |
|
||||
| `compress_file_changes(files)` | list[str] | list[dict] | N/A |
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Save Conversation Context
|
||||
|
||||
```python
|
||||
summary = compress_conversation_summary(messages)
|
||||
snippet = create_context_snippet(
|
||||
json.dumps(summary),
|
||||
snippet_type="state",
|
||||
importance=6
|
||||
)
|
||||
db.add(ContextSnippet(**snippet))
|
||||
db.commit()
|
||||
```
|
||||
|
||||
### Pattern 2: Load and Inject Context
|
||||
|
||||
```python
|
||||
snippets = db.query(ContextSnippet)\
|
||||
.order_by(ContextSnippet.relevance_score.desc())\
|
||||
.limit(20).all()
|
||||
|
||||
contexts = [s.to_dict() for s in snippets]
|
||||
prompt = format_for_injection(contexts, max_tokens=1000)
|
||||
|
||||
# Use in Claude prompt
|
||||
messages = [
|
||||
{"role": "system", "content": f"{system_msg}\n\n{prompt}"},
|
||||
{"role": "user", "content": user_msg}
|
||||
]
|
||||
```
|
||||
|
||||
### Pattern 3: Record Decision
|
||||
|
||||
```python
|
||||
decision = create_context_snippet(
|
||||
"Using PostgreSQL for better JSON support and performance",
|
||||
snippet_type="decision",
|
||||
importance=9
|
||||
)
|
||||
db.add(ContextSnippet(**decision))
|
||||
```
|
||||
|
||||
### Pattern 4: Track Blocker
|
||||
|
||||
```python
|
||||
blocker = create_context_snippet(
|
||||
"Redis connection failing in production",
|
||||
snippet_type="blocker",
|
||||
importance=10
|
||||
)
|
||||
db.add(ContextSnippet(**blocker))
|
||||
```
|
||||
|
||||
### Pattern 5: Update Relevance Scores
|
||||
|
||||
```python
|
||||
snippets = db.query(ContextSnippet).all()
|
||||
for snippet in snippets:
|
||||
data = snippet.to_dict()
|
||||
snippet.relevance_score = calculate_relevance_score(data)
|
||||
db.commit()
|
||||
```
|
||||
|
||||
### Pattern 6: Merge Agent Contexts
|
||||
|
||||
```python
|
||||
# Load contexts from multiple sources
|
||||
conv_context = compress_conversation_summary(messages)
|
||||
project_context = compress_project_state(project, work, files)
|
||||
db_contexts = [s.to_dict() for s in db.query(ContextSnippet).limit(10)]
|
||||
|
||||
# Merge all
|
||||
merged = merge_contexts([conv_context, project_context] + db_contexts)
|
||||
```
|
||||
|
||||
## Tag Categories
|
||||
|
||||
### Technologies (Auto-detected)
|
||||
`fastapi`, `postgresql`, `redis`, `docker`, `nginx`, `python`, `javascript`, `sqlalchemy`, `alembic`
|
||||
|
||||
### Patterns
|
||||
`async`, `crud`, `middleware`, `dependency-injection`, `error-handling`, `validation`, `optimization`, `refactor`
|
||||
|
||||
### Categories
|
||||
`critical`, `blocker`, `bug`, `feature`, `architecture`, `integration`, `security`, `testing`, `deployment`
|
||||
|
||||
## Relevance Score Formula
|
||||
|
||||
```
|
||||
Score = base_importance
|
||||
- min(2.0, age_days × 0.1) # Time decay
|
||||
+ min(2.0, usage_count × 0.2) # Usage boost
|
||||
+ (important_tags × 0.5) # Tag boost
|
||||
+ (1.0 if used_in_24h else 0.0) # Recency boost
|
||||
|
||||
Clamped to [0.0, 10.0]
|
||||
```
|
||||
|
||||
### Important Tags
|
||||
`critical`, `blocker`, `decision`, `architecture`, `security`, `performance`, `bug`
|
||||
|
||||
## File Type Detection
|
||||
|
||||
| Path Pattern | Type |
|
||||
|--------------|------|
|
||||
| `*test*` | test |
|
||||
| `*migration*` | migration |
|
||||
| `*config*.{yaml,json,toml}` | config |
|
||||
| `*model*`, `*schema*` | schema |
|
||||
| `*api*`, `*route*`, `*endpoint*` | api |
|
||||
| `.{py,js,ts,go,java}` | impl |
|
||||
| `.{md,txt,rst}` | doc |
|
||||
| `*docker*`, `*deploy*` | infra |
|
||||
|
||||
## One-Liner Examples
|
||||
|
||||
```python
|
||||
# Compress and save conversation
|
||||
db.add(ContextSnippet(**create_context_snippet(
|
||||
json.dumps(compress_conversation_summary(messages)),
|
||||
"state", 6
|
||||
)))
|
||||
|
||||
# Load top contexts as prompt
|
||||
prompt = format_for_injection(
|
||||
[s.to_dict() for s in db.query(ContextSnippet)
|
||||
.order_by(ContextSnippet.relevance_score.desc())
|
||||
.limit(20)],
|
||||
max_tokens=1000
|
||||
)
|
||||
|
||||
# Extract and save decisions
|
||||
for decision in extract_key_decisions(text):
|
||||
db.add(ContextSnippet(**create_context_snippet(
|
||||
f"{decision['decision']} because {decision['rationale']}",
|
||||
"decision",
|
||||
8 if decision['impact'] == 'high' else 6
|
||||
)))
|
||||
|
||||
# Auto-tag and save
|
||||
snippet = create_context_snippet(content, "general", 5)
|
||||
# Tags auto-extracted from content
|
||||
|
||||
# Update all relevance scores
|
||||
for s in db.query(ContextSnippet):
|
||||
s.relevance_score = calculate_relevance_score(s.to_dict())
|
||||
db.commit()
|
||||
```
|
||||
|
||||
## Token Budget Guide
|
||||
|
||||
| Max Tokens | Use Case | Contexts |
|
||||
|------------|----------|----------|
|
||||
| 200 | Critical only | 3-5 |
|
||||
| 500 | Essential | 8-12 |
|
||||
| 1000 | Standard | 15-25 |
|
||||
| 2000 | Extended | 30-50 |
|
||||
|
||||
## Error Handling
|
||||
|
||||
All functions handle edge cases:
|
||||
- Empty input → Empty/default output
|
||||
- Invalid dates → Current time
|
||||
- Missing fields → Defaults
|
||||
- Malformed JSON → Graceful degradation
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
cd D:\ClaudeTools
|
||||
python test_context_compression_quick.py
|
||||
```
|
||||
|
||||
All 9 tests should pass.
|
||||
|
||||
## Performance
|
||||
|
||||
- Conversation compression: ~1ms per message
|
||||
- Tag extraction: ~0.5ms per text
|
||||
- Relevance calculation: ~0.1ms per snippet
|
||||
- Format injection: ~10ms for 20 contexts
|
||||
|
||||
## Common Issues
|
||||
|
||||
**Issue:** Tags not extracted
|
||||
**Solution:** Check text contains recognized keywords
|
||||
|
||||
**Issue:** Low relevance scores
|
||||
**Solution:** Increase importance or usage_count
|
||||
|
||||
**Issue:** Injection too long
|
||||
**Solution:** Reduce max_tokens or limit contexts
|
||||
|
||||
**Issue:** Missing fields in snippet
|
||||
**Solution:** All required fields have defaults
|
||||
|
||||
## Full Documentation
|
||||
|
||||
- Examples: `api/utils/CONTEXT_COMPRESSION_EXAMPLES.md`
|
||||
- Summary: `api/utils/CONTEXT_COMPRESSION_SUMMARY.md`
|
||||
- Tests: `test_context_compression_quick.py`
|
||||
@@ -1,338 +0,0 @@
|
||||
# Context Compression Utilities - Summary
|
||||
|
||||
## Overview
|
||||
|
||||
Created comprehensive context compression utilities for the ClaudeTools Context Recall System. These utilities enable **90-95% token reduction** while preserving critical information for efficient context injection.
|
||||
|
||||
## Files Created
|
||||
|
||||
1. **D:\ClaudeTools\api\utils\context_compression.py** - Main implementation (680 lines)
|
||||
2. **D:\ClaudeTools\api\utils\CONTEXT_COMPRESSION_EXAMPLES.md** - Comprehensive usage examples
|
||||
3. **D:\ClaudeTools\test_context_compression_quick.py** - Functional tests (all passing)
|
||||
|
||||
## Functions Implemented
|
||||
|
||||
### Core Compression Functions
|
||||
|
||||
1. **compress_conversation_summary(conversation)**
|
||||
- Compresses conversations into dense JSON structure
|
||||
- Extracts: phase, completed tasks, in-progress work, blockers, decisions, next actions
|
||||
- Token reduction: 85-90%
|
||||
|
||||
2. **create_context_snippet(content, snippet_type, importance)**
|
||||
- Creates structured snippets with auto-extracted tags
|
||||
- Includes relevance scoring
|
||||
- Supports types: decision, pattern, lesson, blocker, state
|
||||
|
||||
3. **compress_project_state(project_details, current_work, files_changed)**
|
||||
- Compresses project state into dense summary
|
||||
- Includes: phase, progress %, blockers, next actions, file changes
|
||||
- Token reduction: 85-90%
|
||||
|
||||
4. **extract_key_decisions(text)**
|
||||
- Extracts decisions with rationale and impact
|
||||
- Auto-classifies impact level (low/medium/high)
|
||||
- Returns structured array with timestamps
|
||||
|
||||
### Relevance & Scoring
|
||||
|
||||
5. **calculate_relevance_score(snippet, current_time)**
|
||||
- Calculates 0.0-10.0 relevance score
|
||||
- Factors: age (time decay), usage count, importance, tags, recency
|
||||
- Formula: `base_importance - time_decay + usage_boost + tag_boost + recency_boost`
|
||||
|
||||
### Context Management
|
||||
|
||||
6. **merge_contexts(contexts)**
|
||||
- Merges multiple context objects
|
||||
- Deduplicates information
|
||||
- Keeps most recent values
|
||||
- Token reduction: 30-50%
|
||||
|
||||
7. **format_for_injection(contexts, max_tokens)**
|
||||
- Formats contexts for prompt injection
|
||||
- Token-efficient markdown output
|
||||
- Prioritizes by relevance score
|
||||
- Respects token budget
|
||||
|
||||
### Utilities
|
||||
|
||||
8. **extract_tags_from_text(text)**
|
||||
- Auto-detects technologies (fastapi, postgresql, redis, etc.)
|
||||
- Identifies patterns (async, crud, middleware, etc.)
|
||||
- Recognizes categories (critical, blocker, bug, etc.)
|
||||
|
||||
9. **compress_file_changes(file_paths)**
|
||||
- Compresses file change lists
|
||||
- Auto-classifies by type: api, test, schema, migration, config, doc, infra
|
||||
- Limits to 50 files max
|
||||
|
||||
## Key Features
|
||||
|
||||
### Maximum Token Efficiency
|
||||
- **Conversation compression**: 500 tokens → 50-80 tokens (85-90% reduction)
|
||||
- **Project state**: 1000 tokens → 100-150 tokens (85-90% reduction)
|
||||
- **Context merging**: 30-50% deduplication
|
||||
- **Overall pipeline**: 90-95% total reduction
|
||||
|
||||
### Intelligent Relevance Scoring
|
||||
```python
|
||||
Score = base_importance
|
||||
- (age_days × 0.1, max -2.0) # Time decay
|
||||
+ (usage_count × 0.2, max +2.0) # Usage boost
|
||||
+ (important_tags × 0.5) # Tag boost
|
||||
+ (1.0 if used_in_24h else 0.0) # Recency boost
|
||||
```
|
||||
|
||||
### Auto-Tag Extraction
|
||||
Detects 30+ technology and pattern keywords:
|
||||
- Technologies: fastapi, postgresql, redis, docker, nginx, etc.
|
||||
- Patterns: async, crud, middleware, dependency-injection, etc.
|
||||
- Categories: critical, blocker, bug, feature, architecture, etc.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import (
|
||||
compress_conversation_summary,
|
||||
create_context_snippet,
|
||||
format_for_injection
|
||||
)
|
||||
|
||||
# Compress conversation
|
||||
messages = [
|
||||
{"role": "user", "content": "Build auth with FastAPI"},
|
||||
{"role": "assistant", "content": "Completed auth endpoints"}
|
||||
]
|
||||
summary = compress_conversation_summary(messages)
|
||||
# {"phase": "api_development", "completed": ["auth endpoints"], ...}
|
||||
|
||||
# Create snippet
|
||||
snippet = create_context_snippet(
|
||||
"Using FastAPI for async support",
|
||||
snippet_type="decision",
|
||||
importance=8
|
||||
)
|
||||
# Auto-extracts tags: ["decision", "fastapi", "async", "api"]
|
||||
|
||||
# Format for prompt injection
|
||||
contexts = [snippet]
|
||||
prompt = format_for_injection(contexts, max_tokens=500)
|
||||
# "## Context Recall\n\n**Decisions:**\n- Using FastAPI..."
|
||||
```
|
||||
|
||||
### Database Integration
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
from api.models.context_recall import ContextSnippet
|
||||
from api.utils.context_compression import (
|
||||
create_context_snippet,
|
||||
calculate_relevance_score,
|
||||
format_for_injection
|
||||
)
|
||||
|
||||
def save_context(db: Session, content: str, type: str, importance: int):
|
||||
"""Save context to database"""
|
||||
snippet = create_context_snippet(content, type, importance)
|
||||
db_snippet = ContextSnippet(**snippet)
|
||||
db.add(db_snippet)
|
||||
db.commit()
|
||||
return db_snippet
|
||||
|
||||
def load_contexts(db: Session, limit: int = 20):
|
||||
"""Load and format relevant contexts"""
|
||||
snippets = db.query(ContextSnippet)\
|
||||
.order_by(ContextSnippet.relevance_score.desc())\
|
||||
.limit(limit).all()
|
||||
|
||||
# Convert to dicts and recalculate scores
|
||||
contexts = [snippet.to_dict() for snippet in snippets]
|
||||
for ctx in contexts:
|
||||
ctx["relevance_score"] = calculate_relevance_score(ctx)
|
||||
|
||||
# Sort and format
|
||||
contexts.sort(key=lambda c: c["relevance_score"], reverse=True)
|
||||
return format_for_injection(contexts, max_tokens=1000)
|
||||
```
|
||||
|
||||
### Complete Workflow
|
||||
|
||||
```python
|
||||
from api.utils.context_compression import (
|
||||
compress_conversation_summary,
|
||||
compress_project_state,
|
||||
merge_contexts,
|
||||
format_for_injection
|
||||
)
|
||||
|
||||
# 1. Compress conversation
|
||||
conv_summary = compress_conversation_summary(messages)
|
||||
|
||||
# 2. Compress project state
|
||||
project_state = compress_project_state(
|
||||
{"name": "API", "phase": "dev", "progress_pct": 60},
|
||||
"Building CRUD endpoints",
|
||||
["api/routes/users.py"]
|
||||
)
|
||||
|
||||
# 3. Merge contexts
|
||||
merged = merge_contexts([conv_summary, project_state])
|
||||
|
||||
# 4. Load snippets from DB (with relevance scores)
|
||||
snippets = load_contexts(db, limit=20)
|
||||
|
||||
# 5. Format for injection
|
||||
context_prompt = format_for_injection(snippets, max_tokens=1000)
|
||||
|
||||
# 6. Inject into Claude prompt
|
||||
full_prompt = f"{context_prompt}\n\n{user_message}"
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
All 9 functional tests passing:
|
||||
|
||||
```
|
||||
✓ compress_conversation_summary - Extracts phase, completed, in-progress, blockers
|
||||
✓ create_context_snippet - Creates structured snippets with tags
|
||||
✓ extract_tags_from_text - Detects technologies, patterns, categories
|
||||
✓ extract_key_decisions - Extracts decisions with rationale
|
||||
✓ calculate_relevance_score - Scores with time decay and boosts
|
||||
✓ merge_contexts - Merges and deduplicates contexts
|
||||
✓ compress_project_state - Compresses project state
|
||||
✓ compress_file_changes - Classifies and compresses file lists
|
||||
✓ format_for_injection - Formats for token-efficient injection
|
||||
```
|
||||
|
||||
Run tests:
|
||||
```bash
|
||||
cd D:\ClaudeTools
|
||||
python test_context_compression_quick.py
|
||||
```
|
||||
|
||||
## Type Safety
|
||||
|
||||
All functions include:
|
||||
- Full type hints (typing module)
|
||||
- Comprehensive docstrings
|
||||
- Usage examples in docstrings
|
||||
- Error handling for edge cases
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Token Efficiency
|
||||
- **Single conversation**: 500 → 60 tokens (88% reduction)
|
||||
- **Project state**: 1000 → 120 tokens (88% reduction)
|
||||
- **10 contexts merged**: 5000 → 300 tokens (94% reduction)
|
||||
- **Formatted injection**: Only relevant info within budget
|
||||
|
||||
### Time Complexity
|
||||
- `compress_conversation_summary`: O(n) - linear in text length
|
||||
- `create_context_snippet`: O(n) - linear in content length
|
||||
- `extract_key_decisions`: O(n) - regex matching
|
||||
- `calculate_relevance_score`: O(1) - constant time
|
||||
- `merge_contexts`: O(n×m) - n contexts, m items per context
|
||||
- `format_for_injection`: O(n log n) - sorting + formatting
|
||||
|
||||
### Space Complexity
|
||||
All functions use O(n) space relative to input size, with hard limits:
|
||||
- Max 10 completed items per context
|
||||
- Max 5 blockers per context
|
||||
- Max 10 next actions per context
|
||||
- Max 20 contexts in merged output
|
||||
- Max 50 files in compressed changes
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Database Models
|
||||
Works with SQLAlchemy models having these fields:
|
||||
- `content` (str)
|
||||
- `type` (str)
|
||||
- `tags` (list/JSON)
|
||||
- `importance` (int 1-10)
|
||||
- `relevance_score` (float 0.0-10.0)
|
||||
- `created_at` (datetime)
|
||||
- `usage_count` (int)
|
||||
- `last_used` (datetime, nullable)
|
||||
|
||||
### API Endpoints
|
||||
Expected API usage:
|
||||
- `POST /api/v1/context` - Save context snippet
|
||||
- `GET /api/v1/context` - Load contexts (sorted by relevance)
|
||||
- `POST /api/v1/context/merge` - Merge multiple contexts
|
||||
- `GET /api/v1/context/inject` - Get formatted prompt injection
|
||||
|
||||
### Claude Prompt Injection
|
||||
```python
|
||||
# Before sending to Claude
|
||||
context_prompt = load_contexts(db, agent_id=agent.id, limit=20)
|
||||
messages = [
|
||||
{"role": "system", "content": f"{base_system_prompt}\n\n{context_prompt}"},
|
||||
{"role": "user", "content": user_message}
|
||||
]
|
||||
response = claude_client.messages.create(messages=messages)
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Potential improvements:
|
||||
1. **Semantic similarity**: Group similar contexts
|
||||
2. **LLM-based summarization**: Use small model for ultra-compression
|
||||
3. **Context pruning**: Auto-remove stale contexts
|
||||
4. **Multi-agent support**: Share contexts across agents
|
||||
5. **Vector embeddings**: For semantic search
|
||||
6. **Streaming compression**: Handle very large conversations
|
||||
7. **Custom tag rules**: User-defined tag extraction
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
D:\ClaudeTools\api\utils\
|
||||
├── __init__.py # Updated exports
|
||||
├── context_compression.py # Main implementation (680 lines)
|
||||
├── CONTEXT_COMPRESSION_EXAMPLES.md # Usage examples
|
||||
└── CONTEXT_COMPRESSION_SUMMARY.md # This file
|
||||
|
||||
D:\ClaudeTools\
|
||||
└── test_context_compression_quick.py # Functional tests
|
||||
```
|
||||
|
||||
## Import Reference
|
||||
|
||||
```python
|
||||
# Import all functions
|
||||
from api.utils.context_compression import (
|
||||
# Core compression
|
||||
compress_conversation_summary,
|
||||
create_context_snippet,
|
||||
compress_project_state,
|
||||
extract_key_decisions,
|
||||
|
||||
# Relevance & scoring
|
||||
calculate_relevance_score,
|
||||
|
||||
# Context management
|
||||
merge_contexts,
|
||||
format_for_injection,
|
||||
|
||||
# Utilities
|
||||
extract_tags_from_text,
|
||||
compress_file_changes
|
||||
)
|
||||
|
||||
# Or import via utils package
|
||||
from api.utils import (
|
||||
compress_conversation_summary,
|
||||
create_context_snippet,
|
||||
# ... etc
|
||||
)
|
||||
```
|
||||
|
||||
## License & Attribution
|
||||
|
||||
Part of the ClaudeTools Context Recall System.
|
||||
Created: 2026-01-16
|
||||
All utilities designed for maximum token efficiency and information density.
|
||||
@@ -1,643 +0,0 @@
|
||||
"""
|
||||
Context Compression Utilities for ClaudeTools Context Recall System
|
||||
|
||||
Maximum information density, minimum token usage.
|
||||
All functions designed for efficient context summarization and injection.
|
||||
"""
|
||||
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def compress_conversation_summary(
|
||||
conversation: Union[str, List[Dict[str, str]]]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Compress conversation into dense JSON structure with key points.
|
||||
|
||||
Args:
|
||||
conversation: Raw conversation text or message list
|
||||
[{role: str, content: str}, ...] or str
|
||||
|
||||
Returns:
|
||||
Dense summary with phase, completed, in_progress, blockers, decisions, next
|
||||
|
||||
Example:
|
||||
>>> msgs = [{"role": "user", "content": "Build auth system"}]
|
||||
>>> compress_conversation_summary(msgs)
|
||||
{
|
||||
"phase": "api_development",
|
||||
"completed": ["auth"],
|
||||
"in_progress": None,
|
||||
"blockers": [],
|
||||
"decisions": [],
|
||||
"next": []
|
||||
}
|
||||
"""
|
||||
# Convert to text if list
|
||||
if isinstance(conversation, list):
|
||||
text = "\n".join([f"{msg.get('role', 'user')}: {msg.get('content', '')}"
|
||||
for msg in conversation])
|
||||
else:
|
||||
text = conversation
|
||||
|
||||
text_lower = text.lower()
|
||||
|
||||
# Extract phase
|
||||
phase = "unknown"
|
||||
phase_keywords = {
|
||||
"api_development": ["api", "endpoint", "fastapi", "route"],
|
||||
"testing": ["test", "pytest", "unittest"],
|
||||
"deployment": ["deploy", "docker", "production"],
|
||||
"debugging": ["bug", "error", "fix", "debug"],
|
||||
"design": ["design", "architecture", "plan"],
|
||||
"integration": ["integrate", "connect", "third-party"]
|
||||
}
|
||||
|
||||
for p, keywords in phase_keywords.items():
|
||||
if any(kw in text_lower for kw in keywords):
|
||||
phase = p
|
||||
break
|
||||
|
||||
# Extract completed tasks
|
||||
completed = []
|
||||
completed_patterns = [
|
||||
r"completed[:\s]+([^\n.]+)",
|
||||
r"finished[:\s]+([^\n.]+)",
|
||||
r"done[:\s]+([^\n.]+)",
|
||||
r"\[OK\]\s*([^\n.]+)",
|
||||
r"\[PASS\]\s*([^\n.]+)",
|
||||
r"implemented[:\s]+([^\n.]+)"
|
||||
]
|
||||
for pattern in completed_patterns:
|
||||
matches = re.findall(pattern, text_lower)
|
||||
completed.extend([m.strip()[:50] for m in matches])
|
||||
|
||||
# Extract in-progress
|
||||
in_progress = None
|
||||
in_progress_patterns = [
|
||||
r"in[- ]progress[:\s]+([^\n.]+)",
|
||||
r"working on[:\s]+([^\n.]+)",
|
||||
r"currently[:\s]+([^\n.]+)"
|
||||
]
|
||||
for pattern in in_progress_patterns:
|
||||
match = re.search(pattern, text_lower)
|
||||
if match:
|
||||
in_progress = match.group(1).strip()[:50]
|
||||
break
|
||||
|
||||
# Extract blockers
|
||||
blockers = []
|
||||
blocker_patterns = [
|
||||
r"blocker[s]?[:\s]+([^\n.]+)",
|
||||
r"blocked[:\s]+([^\n.]+)",
|
||||
r"issue[s]?[:\s]+([^\n.]+)",
|
||||
r"problem[s]?[:\s]+([^\n.]+)"
|
||||
]
|
||||
for pattern in blocker_patterns:
|
||||
matches = re.findall(pattern, text_lower)
|
||||
blockers.extend([m.strip()[:50] for m in matches])
|
||||
|
||||
# Extract decisions
|
||||
decisions = extract_key_decisions(text)
|
||||
|
||||
# Extract next actions
|
||||
next_actions = []
|
||||
next_patterns = [
|
||||
r"next[:\s]+([^\n.]+)",
|
||||
r"todo[:\s]+([^\n.]+)",
|
||||
r"will[:\s]+([^\n.]+)"
|
||||
]
|
||||
for pattern in next_patterns:
|
||||
matches = re.findall(pattern, text_lower)
|
||||
next_actions.extend([m.strip()[:50] for m in matches])
|
||||
|
||||
return {
|
||||
"phase": phase,
|
||||
"completed": list(set(completed))[:10], # Dedupe, limit
|
||||
"in_progress": in_progress,
|
||||
"blockers": list(set(blockers))[:5],
|
||||
"decisions": decisions[:5],
|
||||
"next": list(set(next_actions))[:10]
|
||||
}
|
||||
|
||||
|
||||
def create_context_snippet(
|
||||
content: str,
|
||||
snippet_type: str = "general",
|
||||
importance: int = 5
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create structured snippet with auto-extracted tags and relevance score.
|
||||
|
||||
Args:
|
||||
content: Raw information (decision, pattern, lesson)
|
||||
snippet_type: Type of snippet (decision, pattern, lesson, state)
|
||||
importance: Manual importance 1-10, default 5
|
||||
|
||||
Returns:
|
||||
Structured snippet with tags, relevance score, metadata
|
||||
|
||||
Example:
|
||||
>>> create_context_snippet("Using FastAPI for async support", "decision")
|
||||
{
|
||||
"content": "Using FastAPI for async support",
|
||||
"type": "decision",
|
||||
"tags": ["fastapi", "async"],
|
||||
"importance": 5,
|
||||
"relevance_score": 5.0,
|
||||
"created_at": "2026-01-16T...",
|
||||
"usage_count": 0
|
||||
}
|
||||
"""
|
||||
# Extract tags from content
|
||||
tags = extract_tags_from_text(content)
|
||||
|
||||
# Add type-specific tag
|
||||
if snippet_type not in tags:
|
||||
tags.insert(0, snippet_type)
|
||||
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
snippet = {
|
||||
"content": content[:500], # Limit content length
|
||||
"type": snippet_type,
|
||||
"tags": tags[:10], # Limit tags
|
||||
"importance": max(1, min(10, importance)), # Clamp 1-10
|
||||
"created_at": now,
|
||||
"usage_count": 0,
|
||||
"last_used": None
|
||||
}
|
||||
|
||||
# Calculate initial relevance score
|
||||
snippet["relevance_score"] = calculate_relevance_score(snippet)
|
||||
|
||||
return snippet
|
||||
|
||||
|
||||
def compress_project_state(
|
||||
project_details: Dict[str, Any],
|
||||
current_work: str,
|
||||
files_changed: Optional[List[str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Compress project state into dense summary.
|
||||
|
||||
Args:
|
||||
project_details: Dict with name, description, phase, etc.
|
||||
current_work: Description of current work
|
||||
files_changed: List of file paths that changed
|
||||
|
||||
Returns:
|
||||
Dense project state with phase, progress, blockers, next actions
|
||||
|
||||
Example:
|
||||
>>> compress_project_state(
|
||||
... {"name": "ClaudeTools", "phase": "api_dev"},
|
||||
... "Building auth endpoints",
|
||||
... ["api/auth.py"]
|
||||
... )
|
||||
{
|
||||
"project": "ClaudeTools",
|
||||
"phase": "api_dev",
|
||||
"progress": 0,
|
||||
"current": "Building auth endpoints",
|
||||
"files": ["api/auth.py"],
|
||||
"blockers": [],
|
||||
"next": []
|
||||
}
|
||||
"""
|
||||
files_changed = files_changed or []
|
||||
|
||||
state = {
|
||||
"project": project_details.get("name", "unknown")[:50],
|
||||
"phase": project_details.get("phase", "unknown")[:30],
|
||||
"progress": project_details.get("progress_pct", 0),
|
||||
"current": current_work[:200], # Compress description
|
||||
"files": compress_file_changes(files_changed),
|
||||
"blockers": project_details.get("blockers", [])[:5],
|
||||
"next": project_details.get("next_actions", [])[:10]
|
||||
}
|
||||
|
||||
return state
|
||||
|
||||
|
||||
def extract_key_decisions(text: str) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Extract key decisions from conversation text.
|
||||
|
||||
Args:
|
||||
text: Conversation text or work description
|
||||
|
||||
Returns:
|
||||
Array of decision objects with decision, rationale, impact, timestamp
|
||||
|
||||
Example:
|
||||
>>> extract_key_decisions("Decided to use FastAPI for async support")
|
||||
[{
|
||||
"decision": "use FastAPI",
|
||||
"rationale": "async support",
|
||||
"impact": "medium",
|
||||
"timestamp": "2026-01-16T..."
|
||||
}]
|
||||
"""
|
||||
decisions = []
|
||||
text_lower = text.lower()
|
||||
|
||||
# Decision patterns
|
||||
patterns = [
|
||||
r"decid(?:ed|e)[:\s]+([^.\n]+?)(?:because|for|due to)[:\s]+([^.\n]+)",
|
||||
r"chose[:\s]+([^.\n]+?)(?:because|for|due to)[:\s]+([^.\n]+)",
|
||||
r"using[:\s]+([^.\n]+?)(?:because|for|due to)[:\s]+([^.\n]+)",
|
||||
r"will use[:\s]+([^.\n]+?)(?:because|for|due to)[:\s]+([^.\n]+)"
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
matches = re.findall(pattern, text_lower)
|
||||
for match in matches:
|
||||
decision = match[0].strip()[:100]
|
||||
rationale = match[1].strip()[:100]
|
||||
|
||||
# Estimate impact based on keywords
|
||||
impact = "low"
|
||||
high_impact_keywords = ["architecture", "database", "framework", "major"]
|
||||
medium_impact_keywords = ["api", "endpoint", "feature", "integration"]
|
||||
|
||||
if any(kw in decision.lower() or kw in rationale.lower()
|
||||
for kw in high_impact_keywords):
|
||||
impact = "high"
|
||||
elif any(kw in decision.lower() or kw in rationale.lower()
|
||||
for kw in medium_impact_keywords):
|
||||
impact = "medium"
|
||||
|
||||
decisions.append({
|
||||
"decision": decision,
|
||||
"rationale": rationale,
|
||||
"impact": impact,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
})
|
||||
|
||||
return decisions
|
||||
|
||||
|
||||
def calculate_relevance_score(
|
||||
snippet: Dict[str, Any],
|
||||
current_time: Optional[datetime] = None
|
||||
) -> float:
|
||||
"""
|
||||
Calculate relevance score based on age, usage, tags, importance.
|
||||
|
||||
Args:
|
||||
snippet: Snippet metadata with created_at, usage_count, importance, tags
|
||||
current_time: Optional current time for testing, defaults to now
|
||||
|
||||
Returns:
|
||||
Float score 0.0-10.0 (higher = more relevant)
|
||||
|
||||
Example:
|
||||
>>> snippet = {
|
||||
... "created_at": "2026-01-16T12:00:00Z",
|
||||
... "usage_count": 5,
|
||||
... "importance": 8,
|
||||
... "tags": ["critical", "fastapi"]
|
||||
... }
|
||||
>>> calculate_relevance_score(snippet)
|
||||
9.2
|
||||
"""
|
||||
if current_time is None:
|
||||
current_time = datetime.now(timezone.utc)
|
||||
|
||||
# Parse created_at
|
||||
try:
|
||||
created_at = datetime.fromisoformat(snippet["created_at"].replace("Z", "+00:00"))
|
||||
except (ValueError, KeyError):
|
||||
created_at = current_time
|
||||
|
||||
# Base score from importance (0-10)
|
||||
score = float(snippet.get("importance", 5))
|
||||
|
||||
# Time decay - lose 0.1 points per day, max -2.0
|
||||
age_days = (current_time - created_at).total_seconds() / 86400
|
||||
time_penalty = min(2.0, age_days * 0.1)
|
||||
score -= time_penalty
|
||||
|
||||
# Usage boost - add 0.2 per use, max +2.0
|
||||
usage_count = snippet.get("usage_count", 0)
|
||||
usage_boost = min(2.0, usage_count * 0.2)
|
||||
score += usage_boost
|
||||
|
||||
# Tag boost for important tags
|
||||
important_tags = {"critical", "blocker", "decision", "architecture",
|
||||
"security", "performance", "bug"}
|
||||
tags = set(snippet.get("tags", []))
|
||||
tag_boost = len(tags & important_tags) * 0.5 # 0.5 per important tag
|
||||
score += tag_boost
|
||||
|
||||
# Recency boost if used recently
|
||||
last_used = snippet.get("last_used")
|
||||
if last_used:
|
||||
try:
|
||||
last_used_dt = datetime.fromisoformat(last_used.replace("Z", "+00:00"))
|
||||
hours_since_use = (current_time - last_used_dt).total_seconds() / 3600
|
||||
if hours_since_use < 24: # Used in last 24h
|
||||
score += 1.0
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
# Clamp to 0.0-10.0
|
||||
return max(0.0, min(10.0, score))
|
||||
|
||||
|
||||
def merge_contexts(contexts: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Merge multiple context objects into single deduplicated context.
|
||||
|
||||
Args:
|
||||
contexts: List of context objects to merge
|
||||
|
||||
Returns:
|
||||
Single merged context with deduplicated, most recent info
|
||||
|
||||
Example:
|
||||
>>> ctx1 = {"phase": "api_dev", "completed": ["auth"]}
|
||||
>>> ctx2 = {"phase": "api_dev", "completed": ["auth", "crud"]}
|
||||
>>> merge_contexts([ctx1, ctx2])
|
||||
{"phase": "api_dev", "completed": ["auth", "crud"], ...}
|
||||
"""
|
||||
if not contexts:
|
||||
return {}
|
||||
|
||||
merged = {
|
||||
"phase": None,
|
||||
"completed": [],
|
||||
"in_progress": None,
|
||||
"blockers": [],
|
||||
"decisions": [],
|
||||
"next": [],
|
||||
"files": [],
|
||||
"tags": []
|
||||
}
|
||||
|
||||
# Collect all items
|
||||
completed_set = set()
|
||||
blocker_set = set()
|
||||
next_set = set()
|
||||
files_set = set()
|
||||
tags_set = set()
|
||||
decisions_list = []
|
||||
|
||||
for ctx in contexts:
|
||||
# Take most recent phase
|
||||
if ctx.get("phase") and not merged["phase"]:
|
||||
merged["phase"] = ctx["phase"]
|
||||
|
||||
# Take most recent in_progress
|
||||
if ctx.get("in_progress"):
|
||||
merged["in_progress"] = ctx["in_progress"]
|
||||
|
||||
# Collect completed
|
||||
for item in ctx.get("completed", []):
|
||||
if isinstance(item, str):
|
||||
completed_set.add(item)
|
||||
|
||||
# Collect blockers
|
||||
for item in ctx.get("blockers", []):
|
||||
if isinstance(item, str):
|
||||
blocker_set.add(item)
|
||||
|
||||
# Collect next actions
|
||||
for item in ctx.get("next", []):
|
||||
if isinstance(item, str):
|
||||
next_set.add(item)
|
||||
|
||||
# Collect files
|
||||
for item in ctx.get("files", []):
|
||||
if isinstance(item, str):
|
||||
files_set.add(item)
|
||||
elif isinstance(item, dict) and "path" in item:
|
||||
files_set.add(item["path"])
|
||||
|
||||
# Collect tags
|
||||
for item in ctx.get("tags", []):
|
||||
if isinstance(item, str):
|
||||
tags_set.add(item)
|
||||
|
||||
# Collect decisions (keep all with timestamps)
|
||||
for decision in ctx.get("decisions", []):
|
||||
if isinstance(decision, dict):
|
||||
decisions_list.append(decision)
|
||||
|
||||
# Sort decisions by timestamp (most recent first)
|
||||
decisions_list.sort(
|
||||
key=lambda d: d.get("timestamp", ""),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
merged["completed"] = sorted(list(completed_set))[:20]
|
||||
merged["blockers"] = sorted(list(blocker_set))[:10]
|
||||
merged["next"] = sorted(list(next_set))[:20]
|
||||
merged["files"] = sorted(list(files_set))[:30]
|
||||
merged["tags"] = sorted(list(tags_set))[:20]
|
||||
merged["decisions"] = decisions_list[:10]
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def format_for_injection(
|
||||
contexts: List[Dict[str, Any]],
|
||||
max_tokens: int = 1000
|
||||
) -> str:
|
||||
"""
|
||||
Format context objects for token-efficient prompt injection.
|
||||
|
||||
Args:
|
||||
contexts: List of context objects from database (sorted by relevance)
|
||||
max_tokens: Approximate max tokens to use (rough estimate)
|
||||
|
||||
Returns:
|
||||
Token-efficient markdown string for Claude prompt
|
||||
|
||||
Example:
|
||||
>>> contexts = [{"content": "Use FastAPI", "tags": ["api"]}]
|
||||
>>> format_for_injection(contexts)
|
||||
"## Context Recall\\n\\n- Use FastAPI [api]\\n"
|
||||
"""
|
||||
if not contexts:
|
||||
return ""
|
||||
|
||||
lines = ["## Context Recall\n"]
|
||||
|
||||
# Estimate ~4 chars per token
|
||||
max_chars = max_tokens * 4
|
||||
current_chars = len(lines[0])
|
||||
|
||||
# Group by type
|
||||
by_type = defaultdict(list)
|
||||
for ctx in contexts:
|
||||
ctx_type = ctx.get("type", "general")
|
||||
by_type[ctx_type].append(ctx)
|
||||
|
||||
# Priority order for types
|
||||
type_priority = ["blocker", "decision", "state", "pattern", "lesson", "general"]
|
||||
|
||||
for ctx_type in type_priority:
|
||||
if ctx_type not in by_type:
|
||||
continue
|
||||
|
||||
# Add type header
|
||||
header = f"\n**{ctx_type.title()}s:**\n"
|
||||
if current_chars + len(header) > max_chars:
|
||||
break
|
||||
lines.append(header)
|
||||
current_chars += len(header)
|
||||
|
||||
# Add contexts of this type
|
||||
for ctx in by_type[ctx_type][:5]: # Max 5 per type
|
||||
content = ctx.get("content", "")
|
||||
tags = ctx.get("tags", [])
|
||||
|
||||
# Format with tags
|
||||
tag_str = f" [{', '.join(tags[:3])}]" if tags else ""
|
||||
line = f"- {content[:150]}{tag_str}\n"
|
||||
|
||||
if current_chars + len(line) > max_chars:
|
||||
break
|
||||
|
||||
lines.append(line)
|
||||
current_chars += len(line)
|
||||
|
||||
# Add summary stats
|
||||
summary = f"\n*{len(contexts)} contexts loaded*\n"
|
||||
if current_chars + len(summary) <= max_chars:
|
||||
lines.append(summary)
|
||||
|
||||
return "".join(lines)
|
||||
|
||||
|
||||
def extract_tags_from_text(text: str) -> List[str]:
|
||||
"""
|
||||
Auto-detect relevant tags from text content.
|
||||
|
||||
Args:
|
||||
text: Content to extract tags from
|
||||
|
||||
Returns:
|
||||
List of detected tags (technologies, patterns, categories)
|
||||
|
||||
Example:
|
||||
>>> extract_tags_from_text("Using FastAPI with PostgreSQL")
|
||||
["fastapi", "postgresql", "api", "database"]
|
||||
"""
|
||||
text_lower = text.lower()
|
||||
tags = []
|
||||
|
||||
# Technology keywords
|
||||
tech_keywords = {
|
||||
"fastapi": ["fastapi"],
|
||||
"postgresql": ["postgresql", "postgres", "psql"],
|
||||
"sqlalchemy": ["sqlalchemy", "orm"],
|
||||
"alembic": ["alembic", "migration"],
|
||||
"docker": ["docker", "container"],
|
||||
"redis": ["redis", "cache"],
|
||||
"nginx": ["nginx", "reverse proxy"],
|
||||
"python": ["python", "py"],
|
||||
"javascript": ["javascript", "js", "node"],
|
||||
"typescript": ["typescript", "ts"],
|
||||
"react": ["react", "jsx"],
|
||||
"vue": ["vue"],
|
||||
"api": ["api", "endpoint", "rest"],
|
||||
"database": ["database", "db", "sql"],
|
||||
"auth": ["auth", "authentication", "authorization"],
|
||||
"security": ["security", "encryption", "secure"],
|
||||
"testing": ["test", "pytest", "unittest"],
|
||||
"deployment": ["deploy", "deployment", "production"]
|
||||
}
|
||||
|
||||
for tag, keywords in tech_keywords.items():
|
||||
if any(kw in text_lower for kw in keywords):
|
||||
tags.append(tag)
|
||||
|
||||
# Pattern keywords
|
||||
pattern_keywords = {
|
||||
"async": ["async", "asynchronous", "await"],
|
||||
"crud": ["crud", "create", "read", "update", "delete"],
|
||||
"middleware": ["middleware"],
|
||||
"dependency-injection": ["dependency injection", "depends"],
|
||||
"error-handling": ["error", "exception", "try", "catch"],
|
||||
"validation": ["validation", "validate", "pydantic"],
|
||||
"optimization": ["optimize", "performance", "speed"],
|
||||
"refactor": ["refactor", "refactoring", "cleanup"]
|
||||
}
|
||||
|
||||
for tag, keywords in pattern_keywords.items():
|
||||
if any(kw in text_lower for kw in keywords):
|
||||
tags.append(tag)
|
||||
|
||||
# Category keywords
|
||||
category_keywords = {
|
||||
"critical": ["critical", "urgent", "important"],
|
||||
"blocker": ["blocker", "blocked", "blocking"],
|
||||
"bug": ["bug", "error", "issue", "problem"],
|
||||
"feature": ["feature", "enhancement", "add"],
|
||||
"architecture": ["architecture", "design", "structure"],
|
||||
"integration": ["integration", "integrate", "connect"]
|
||||
}
|
||||
|
||||
for tag, keywords in category_keywords.items():
|
||||
if any(kw in text_lower for kw in keywords):
|
||||
tags.append(tag)
|
||||
|
||||
# Deduplicate and return
|
||||
return list(dict.fromkeys(tags)) # Preserves order
|
||||
|
||||
|
||||
def compress_file_changes(file_paths: List[str]) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Compress file change list into brief summaries.
|
||||
|
||||
Args:
|
||||
file_paths: List of file paths that changed
|
||||
|
||||
Returns:
|
||||
Compressed summary with path and inferred change type
|
||||
|
||||
Example:
|
||||
>>> compress_file_changes(["api/auth.py", "tests/test_auth.py"])
|
||||
[
|
||||
{"path": "api/auth.py", "type": "impl"},
|
||||
{"path": "tests/test_auth.py", "type": "test"}
|
||||
]
|
||||
"""
|
||||
compressed = []
|
||||
|
||||
for path in file_paths[:50]: # Limit to 50 files
|
||||
# Infer change type from path
|
||||
change_type = "other"
|
||||
|
||||
path_lower = path.lower()
|
||||
if "test" in path_lower:
|
||||
change_type = "test"
|
||||
elif any(ext in path_lower for ext in [".py", ".js", ".ts", ".go", ".java"]):
|
||||
if "migration" in path_lower:
|
||||
change_type = "migration"
|
||||
elif "config" in path_lower or path_lower.endswith((".yaml", ".yml", ".json", ".toml")):
|
||||
change_type = "config"
|
||||
elif "model" in path_lower or "schema" in path_lower:
|
||||
change_type = "schema"
|
||||
elif "api" in path_lower or "endpoint" in path_lower or "route" in path_lower:
|
||||
change_type = "api"
|
||||
else:
|
||||
change_type = "impl"
|
||||
elif path_lower.endswith((".md", ".txt", ".rst")):
|
||||
change_type = "doc"
|
||||
elif "docker" in path_lower or "deploy" in path_lower:
|
||||
change_type = "infra"
|
||||
|
||||
compressed.append({
|
||||
"path": path,
|
||||
"type": change_type
|
||||
})
|
||||
|
||||
return compressed
|
||||
Reference in New Issue
Block a user