Completely removed the database context recall system while preserving database tables for safety. This major cleanup removes 80+ files and 16,831 lines of code. What was removed: - API layer: 4 routers (conversation-contexts, context-snippets, project-states, decision-logs) with 35+ endpoints - Database models: 5 models (ConversationContext, ContextSnippet, DecisionLog, ProjectState, ContextTag) - Services: 4 service layers with business logic - Schemas: 4 Pydantic schema files - Claude Code hooks: 13 hook files (user-prompt-submit, task-complete, sync-contexts, periodic saves) - Scripts: 15+ scripts (import, migration, testing, tombstone checking) - Tests: 5 test files (context recall, compression, diagnostics) - Documentation: 30+ markdown files (guides, architecture, quick starts) - Utilities: context compression, conversation parsing Files modified: - api/main.py: Removed router registrations - api/models/__init__.py: Removed model imports - api/schemas/__init__.py: Removed schema imports - api/services/__init__.py: Removed service imports - .claude/claude.md: Completely rewritten without context references Database tables preserved: - conversation_contexts, context_snippets, context_tags, project_states, decision_logs (5 orphaned tables remain for safety) - Migration created but NOT applied: 20260118_172743_remove_context_system.py - Tables can be dropped later when confirmed not needed New files added: - CONTEXT_SYSTEM_REMOVAL_SUMMARY.md: Detailed removal report - CONTEXT_SYSTEM_REMOVAL_COMPLETE.md: Final status - CONTEXT_EXPORT_RESULTS.md: Export attempt results - scripts/export-tombstoned-contexts.py: Export tool for future use - migrations/versions/20260118_172743_remove_context_system.py Impact: - Reduced from 130 to 95 API endpoints - Reduced from 43 to 38 active database tables - Removed 16,831 lines of code - System fully operational without context recall Reason for removal: - System was not actively used (no tombstoned contexts found) - Reduces codebase complexity - Focuses on core MSP work tracking functionality - Database preserved for safety (can rollback if needed) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
412 lines
12 KiB
Python
412 lines
12 KiB
Python
"""
|
|
Export Tombstoned Contexts Before Removal
|
|
|
|
This script exports all conversation contexts referenced by tombstone files
|
|
and any additional contexts in the database to markdown files before the
|
|
context system is removed from ClaudeTools.
|
|
|
|
Features:
|
|
- Finds all *.tombstone.json files
|
|
- Extracts context_ids from tombstones
|
|
- Retrieves contexts from database via API
|
|
- Exports to markdown files organized by project/date
|
|
- Handles cases where no tombstones or contexts exist
|
|
|
|
Usage:
|
|
# Export all tombstoned contexts
|
|
python scripts/export-tombstoned-contexts.py
|
|
|
|
# Specify custom output directory
|
|
python scripts/export-tombstoned-contexts.py --output exported-contexts
|
|
|
|
# Include all database contexts (not just tombstoned ones)
|
|
python scripts/export-tombstoned-contexts.py --export-all
|
|
"""
|
|
|
|
import argparse
|
|
import json
|
|
import sys
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from typing import Dict, List, Optional, Any
|
|
|
|
import requests
|
|
from dotenv import load_dotenv
|
|
import os
|
|
|
|
|
|
# Constants
|
|
DEFAULT_API_URL = "http://172.16.3.30:8001"
|
|
DEFAULT_OUTPUT_DIR = Path("D:/ClaudeTools/exported-contexts")
|
|
IMPORTED_CONVERSATIONS_DIR = Path("D:/ClaudeTools/imported-conversations")
|
|
|
|
# Load environment variables
|
|
load_dotenv()
|
|
|
|
|
|
def print_status(message: str, status: str = "INFO") -> None:
|
|
"""Print formatted status message."""
|
|
markers = {
|
|
"INFO": "[INFO]",
|
|
"SUCCESS": "[OK]",
|
|
"WARNING": "[WARNING]",
|
|
"ERROR": "[ERROR]"
|
|
}
|
|
print(f"{markers.get(status, '[INFO]')} {message}")
|
|
|
|
|
|
def get_jwt_token(api_url: str) -> Optional[str]:
|
|
"""
|
|
Get JWT token from environment or API.
|
|
|
|
Args:
|
|
api_url: Base URL for API
|
|
|
|
Returns:
|
|
JWT token or None if failed
|
|
"""
|
|
token = os.getenv("JWT_TOKEN")
|
|
if token:
|
|
return token
|
|
|
|
email = os.getenv("API_USER_EMAIL", "admin@claudetools.local")
|
|
password = os.getenv("API_USER_PASSWORD", "claudetools123")
|
|
|
|
try:
|
|
response = requests.post(
|
|
f"{api_url}/api/auth/token",
|
|
data={"username": email, "password": password}
|
|
)
|
|
response.raise_for_status()
|
|
return response.json()["access_token"]
|
|
except Exception as e:
|
|
print_status(f"Failed to get JWT token: {e}", "ERROR")
|
|
return None
|
|
|
|
|
|
def find_tombstone_files(base_dir: Path) -> List[Path]:
|
|
"""Find all tombstone files."""
|
|
if not base_dir.exists():
|
|
return []
|
|
return sorted(base_dir.rglob("*.tombstone.json"))
|
|
|
|
|
|
def extract_context_ids_from_tombstones(tombstone_files: List[Path]) -> List[str]:
|
|
"""
|
|
Extract all context IDs from tombstone files.
|
|
|
|
Args:
|
|
tombstone_files: List of tombstone file paths
|
|
|
|
Returns:
|
|
List of unique context IDs
|
|
"""
|
|
context_ids = set()
|
|
|
|
for tombstone_path in tombstone_files:
|
|
try:
|
|
with open(tombstone_path, "r", encoding="utf-8") as f:
|
|
data = json.load(f)
|
|
ids = data.get("context_ids", [])
|
|
context_ids.update(ids)
|
|
except Exception as e:
|
|
print_status(f"Failed to read {tombstone_path.name}: {e}", "WARNING")
|
|
|
|
return list(context_ids)
|
|
|
|
|
|
def fetch_context_from_api(
|
|
context_id: str,
|
|
api_url: str,
|
|
jwt_token: str
|
|
) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Fetch a single context from the API.
|
|
|
|
Args:
|
|
context_id: Context UUID
|
|
api_url: API base URL
|
|
jwt_token: JWT authentication token
|
|
|
|
Returns:
|
|
Context data dict or None if failed
|
|
"""
|
|
try:
|
|
headers = {"Authorization": f"Bearer {jwt_token}"}
|
|
response = requests.get(
|
|
f"{api_url}/api/conversation-contexts/{context_id}",
|
|
headers=headers
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
return response.json()
|
|
elif response.status_code == 404:
|
|
print_status(f"Context {context_id} not found in database", "WARNING")
|
|
else:
|
|
print_status(f"Failed to fetch context {context_id}: HTTP {response.status_code}", "WARNING")
|
|
|
|
except Exception as e:
|
|
print_status(f"Error fetching context {context_id}: {e}", "WARNING")
|
|
|
|
return None
|
|
|
|
|
|
def fetch_all_contexts(api_url: str, jwt_token: str) -> List[Dict[str, Any]]:
|
|
"""
|
|
Fetch all contexts from the API.
|
|
|
|
Args:
|
|
api_url: API base URL
|
|
jwt_token: JWT authentication token
|
|
|
|
Returns:
|
|
List of context data dicts
|
|
"""
|
|
contexts = []
|
|
headers = {"Authorization": f"Bearer {jwt_token}"}
|
|
|
|
try:
|
|
# Fetch paginated results
|
|
offset = 0
|
|
limit = 100
|
|
|
|
while True:
|
|
response = requests.get(
|
|
f"{api_url}/api/conversation-contexts",
|
|
headers=headers,
|
|
params={"offset": offset, "limit": limit}
|
|
)
|
|
|
|
if response.status_code != 200:
|
|
print_status(f"Failed to fetch contexts: HTTP {response.status_code}", "ERROR")
|
|
break
|
|
|
|
data = response.json()
|
|
|
|
# Handle different response formats
|
|
if isinstance(data, list):
|
|
batch = data
|
|
elif isinstance(data, dict) and "items" in data:
|
|
batch = data["items"]
|
|
else:
|
|
batch = []
|
|
|
|
if not batch:
|
|
break
|
|
|
|
contexts.extend(batch)
|
|
offset += len(batch)
|
|
|
|
# Check if we've fetched all
|
|
if len(batch) < limit:
|
|
break
|
|
|
|
except Exception as e:
|
|
print_status(f"Error fetching all contexts: {e}", "ERROR")
|
|
|
|
return contexts
|
|
|
|
|
|
def export_context_to_markdown(
|
|
context: Dict[str, Any],
|
|
output_dir: Path
|
|
) -> Optional[Path]:
|
|
"""
|
|
Export a single context to a markdown file.
|
|
|
|
Args:
|
|
context: Context data dict
|
|
output_dir: Output directory
|
|
|
|
Returns:
|
|
Path to exported file or None if failed
|
|
"""
|
|
try:
|
|
# Extract context data
|
|
context_id = context.get("id", "unknown")
|
|
title = context.get("title", "Untitled")
|
|
context_type = context.get("context_type", "unknown")
|
|
created_at = context.get("created_at", "unknown")
|
|
|
|
# Parse date for organization
|
|
try:
|
|
dt = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
|
|
date_dir = output_dir / dt.strftime("%Y-%m")
|
|
except:
|
|
date_dir = output_dir / "undated"
|
|
|
|
date_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Create safe filename
|
|
safe_title = "".join(c if c.isalnum() or c in (' ', '-', '_') else '_' for c in title)
|
|
safe_title = safe_title[:50] # Limit length
|
|
filename = f"{context_id[:8]}_{safe_title}.md"
|
|
output_path = date_dir / filename
|
|
|
|
# Build markdown content
|
|
markdown = f"""# {title}
|
|
|
|
**Type:** {context_type}
|
|
**Created:** {created_at}
|
|
**Context ID:** {context_id}
|
|
|
|
---
|
|
|
|
## Summary
|
|
|
|
{context.get('dense_summary', 'No summary available')}
|
|
|
|
---
|
|
|
|
## Key Decisions
|
|
|
|
{context.get('key_decisions', 'No key decisions recorded')}
|
|
|
|
---
|
|
|
|
## Current State
|
|
|
|
{context.get('current_state', 'No current state recorded')}
|
|
|
|
---
|
|
|
|
## Tags
|
|
|
|
{context.get('tags', 'No tags')}
|
|
|
|
---
|
|
|
|
## Metadata
|
|
|
|
- **Session ID:** {context.get('session_id', 'N/A')}
|
|
- **Project ID:** {context.get('project_id', 'N/A')}
|
|
- **Machine ID:** {context.get('machine_id', 'N/A')}
|
|
- **Relevance Score:** {context.get('relevance_score', 'N/A')}
|
|
|
|
---
|
|
|
|
*Exported on {datetime.now().isoformat()}*
|
|
"""
|
|
|
|
# Write to file
|
|
with open(output_path, "w", encoding="utf-8") as f:
|
|
f.write(markdown)
|
|
|
|
return output_path
|
|
|
|
except Exception as e:
|
|
print_status(f"Failed to export context {context.get('id', 'unknown')}: {e}", "ERROR")
|
|
return None
|
|
|
|
|
|
def main():
|
|
"""Main entry point."""
|
|
parser = argparse.ArgumentParser(
|
|
description="Export tombstoned contexts before removal"
|
|
)
|
|
parser.add_argument(
|
|
"--output",
|
|
type=Path,
|
|
default=DEFAULT_OUTPUT_DIR,
|
|
help=f"Output directory (default: {DEFAULT_OUTPUT_DIR})"
|
|
)
|
|
parser.add_argument(
|
|
"--api-url",
|
|
default=DEFAULT_API_URL,
|
|
help=f"API base URL (default: {DEFAULT_API_URL})"
|
|
)
|
|
parser.add_argument(
|
|
"--export-all",
|
|
action="store_true",
|
|
help="Export ALL database contexts, not just tombstoned ones"
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
print_status("=" * 80, "INFO")
|
|
print_status("ClaudeTools Context Export Tool", "INFO")
|
|
print_status("=" * 80, "INFO")
|
|
print_status(f"Output directory: {args.output}", "INFO")
|
|
print_status(f"Export all contexts: {'YES' if args.export_all else 'NO'}", "INFO")
|
|
print_status("=" * 80, "INFO")
|
|
|
|
# Create output directory
|
|
args.output.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Get JWT token
|
|
print_status("\nAuthenticating with API...", "INFO")
|
|
jwt_token = get_jwt_token(args.api_url)
|
|
if not jwt_token:
|
|
print_status("Cannot proceed without API access", "ERROR")
|
|
sys.exit(1)
|
|
|
|
print_status("Authentication successful", "SUCCESS")
|
|
|
|
# Find tombstone files
|
|
print_status("\nSearching for tombstone files...", "INFO")
|
|
tombstone_files = find_tombstone_files(IMPORTED_CONVERSATIONS_DIR)
|
|
print_status(f"Found {len(tombstone_files)} tombstone files", "INFO")
|
|
|
|
# Extract context IDs from tombstones
|
|
context_ids = []
|
|
if tombstone_files:
|
|
print_status("\nExtracting context IDs from tombstones...", "INFO")
|
|
context_ids = extract_context_ids_from_tombstones(tombstone_files)
|
|
print_status(f"Found {len(context_ids)} unique context IDs in tombstones", "INFO")
|
|
|
|
# Fetch contexts
|
|
contexts = []
|
|
|
|
if args.export_all:
|
|
print_status("\nFetching ALL contexts from database...", "INFO")
|
|
contexts = fetch_all_contexts(args.api_url, jwt_token)
|
|
print_status(f"Retrieved {len(contexts)} total contexts", "INFO")
|
|
elif context_ids:
|
|
print_status("\nFetching tombstoned contexts from database...", "INFO")
|
|
for i, context_id in enumerate(context_ids, 1):
|
|
print_status(f"Fetching context {i}/{len(context_ids)}: {context_id}", "INFO")
|
|
context = fetch_context_from_api(context_id, args.api_url, jwt_token)
|
|
if context:
|
|
contexts.append(context)
|
|
print_status(f"Successfully retrieved {len(contexts)} contexts", "INFO")
|
|
else:
|
|
print_status("\nNo tombstone files found and --export-all not specified", "WARNING")
|
|
print_status("Attempting to fetch all database contexts anyway...", "INFO")
|
|
contexts = fetch_all_contexts(args.api_url, jwt_token)
|
|
if contexts:
|
|
print_status(f"Retrieved {len(contexts)} contexts from database", "INFO")
|
|
|
|
# Export contexts to markdown
|
|
if not contexts:
|
|
print_status("\nNo contexts to export", "WARNING")
|
|
print_status("This is normal if the context system was never used", "INFO")
|
|
return
|
|
|
|
print_status(f"\nExporting {len(contexts)} contexts to markdown...", "INFO")
|
|
exported_count = 0
|
|
|
|
for i, context in enumerate(contexts, 1):
|
|
print_status(f"Exporting {i}/{len(contexts)}: {context.get('title', 'Untitled')}", "INFO")
|
|
output_path = export_context_to_markdown(context, args.output)
|
|
if output_path:
|
|
exported_count += 1
|
|
|
|
# Summary
|
|
print_status("\n" + "=" * 80, "INFO")
|
|
print_status("EXPORT SUMMARY", "INFO")
|
|
print_status("=" * 80, "INFO")
|
|
print_status(f"Tombstone files found: {len(tombstone_files)}", "INFO")
|
|
print_status(f"Contexts retrieved: {len(contexts)}", "INFO")
|
|
print_status(f"Contexts exported: {exported_count}", "SUCCESS")
|
|
print_status(f"Output directory: {args.output}", "INFO")
|
|
print_status("=" * 80, "INFO")
|
|
|
|
if exported_count > 0:
|
|
print_status(f"\n[SUCCESS] Exported {exported_count} contexts to {args.output}", "SUCCESS")
|
|
else:
|
|
print_status("\n[WARNING] No contexts were exported", "WARNING")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|