Files
claudetools/extract_license_plate.py
Mike Swanson fee9cc01ac sync: Auto-sync from ACG-M-L5090 at 2026-02-09
Synced files:
- ai-misconceptions-reading-list.md (radio show research)
- ai-misconceptions-radio-segments.md (distilled radio segments)
- extract_license_plate.py
- review_best_plates.py

Machine: ACG-M-L5090
Timestamp: 2026-02-09

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 20:24:03 -07:00

238 lines
8.8 KiB
Python

"""
Extract and enhance license plate from Tesla dash cam video
Target: Pickup truck at 25-30 seconds
"""
import cv2
import numpy as np
from pathlib import Path
from PIL import Image, ImageEnhance, ImageFilter
import os
def extract_frames_from_range(video_path, start_time, end_time, fps=10):
"""Extract frames from specific time range at given fps"""
cap = cv2.VideoCapture(str(video_path))
video_fps = cap.get(cv2.CAP_PROP_FPS)
frames = []
timestamps = []
# Calculate frame numbers for the time range
start_frame = int(start_time * video_fps)
end_frame = int(end_time * video_fps)
frame_interval = int(video_fps / fps)
print(f"[INFO] Video FPS: {video_fps}")
print(f"[INFO] Extracting frames {start_frame} to {end_frame} every {frame_interval} frames")
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
current_frame = start_frame
while current_frame <= end_frame:
ret, frame = cap.read()
if not ret:
break
if (current_frame - start_frame) % frame_interval == 0:
timestamp = current_frame / video_fps
frames.append(frame)
timestamps.append(timestamp)
print(f"[OK] Extracted frame at {timestamp:.2f}s (frame {current_frame})")
current_frame += 1
cap.release()
return frames, timestamps
def detect_license_plates(frame):
"""Detect potential license plate regions using multiple methods"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Method 1: Edge detection + contours
edges = cv2.Canny(gray, 50, 200)
contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
plate_candidates = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = w / float(h) if h > 0 else 0
area = w * h
# License plate characteristics: aspect ratio ~2-5, reasonable size
if 1.5 < aspect_ratio < 6 and 1000 < area < 50000:
plate_candidates.append({
'bbox': (x, y, w, h),
'aspect_ratio': aspect_ratio,
'area': area,
'score': area * aspect_ratio # Simple scoring
})
# Sort by score and return top candidates
plate_candidates.sort(key=lambda x: x['score'], reverse=True)
return plate_candidates[:10] # Return top 10 candidates
def enhance_license_plate(plate_img, upscale_factor=6):
"""Apply multiple enhancement techniques to license plate image"""
enhanced_versions = []
# Convert to PIL for some operations
plate_pil = Image.fromarray(cv2.cvtColor(plate_img, cv2.COLOR_BGR2RGB))
# 1. Upscale first
new_size = (plate_pil.width * upscale_factor, plate_pil.height * upscale_factor)
upscaled = plate_pil.resize(new_size, Image.Resampling.LANCZOS)
enhanced_versions.append(("upscaled", upscaled))
# 2. Sharpen heavily
sharpened = upscaled.filter(ImageFilter.SHARPEN)
sharpened = sharpened.filter(ImageFilter.SHARPEN)
enhanced_versions.append(("sharpened", sharpened))
# 3. High contrast
contrast = ImageEnhance.Contrast(sharpened)
high_contrast = contrast.enhance(2.5)
enhanced_versions.append(("high_contrast", high_contrast))
# 4. Brightness adjustment
brightness = ImageEnhance.Brightness(high_contrast)
bright = brightness.enhance(1.3)
enhanced_versions.append(("bright_contrast", bright))
# 5. Adaptive thresholding (OpenCV)
gray_cv = cv2.cvtColor(np.array(upscaled), cv2.COLOR_RGB2GRAY)
adaptive = cv2.adaptiveThreshold(gray_cv, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2)
enhanced_versions.append(("adaptive_thresh", Image.fromarray(adaptive)))
# 6. Bilateral filter + sharpen
bilateral = cv2.bilateralFilter(np.array(upscaled), 9, 75, 75)
bilateral_pil = Image.fromarray(bilateral)
bilateral_sharp = bilateral_pil.filter(ImageFilter.SHARPEN)
enhanced_versions.append(("bilateral_sharp", bilateral_sharp))
# 7. Unsharp mask
unsharp = upscaled.filter(ImageFilter.UnsharpMask(radius=2, percent=200, threshold=3))
enhanced_versions.append(("unsharp_mask", unsharp))
# 8. Extreme sharpening
extreme_sharp = sharpened.filter(ImageFilter.SHARPEN)
extreme_sharp = extreme_sharp.filter(ImageFilter.UnsharpMask(radius=3, percent=250, threshold=2))
enhanced_versions.append(("extreme_sharp", extreme_sharp))
return enhanced_versions
def main():
video_path = Path("E:/TeslaCam/SavedClips/2026-02-03_19-48-23/2026-02-03_19-42-36-front.mp4")
output_dir = Path("D:/Scratchpad/pickup_truck_25-30s")
output_dir.mkdir(parents=True, exist_ok=True)
print(f"[INFO] Processing video: {video_path}")
print(f"[INFO] Output directory: {output_dir}")
# Extract frames from 25-30 second range at 10 fps
start_time = 25.0
end_time = 30.0
target_fps = 10
frames, timestamps = extract_frames_from_range(video_path, start_time, end_time, target_fps)
print(f"[OK] Extracted {len(frames)} frames")
# Process each frame
all_plates = []
for idx, (frame, timestamp) in enumerate(zip(frames, timestamps)):
frame_name = f"frame_{timestamp:.2f}s"
# Save original frame
frame_path = output_dir / f"{frame_name}_original.jpg"
cv2.imwrite(str(frame_path), frame)
# Detect license plates
plate_candidates = detect_license_plates(frame)
print(f"[INFO] Frame {timestamp:.2f}s: Found {len(plate_candidates)} plate candidates")
# Process each candidate
for plate_idx, candidate in enumerate(plate_candidates[:5]): # Top 5 candidates
x, y, w, h = candidate['bbox']
# Extract plate region with some padding
padding = 10
x1 = max(0, x - padding)
y1 = max(0, y - padding)
x2 = min(frame.shape[1], x + w + padding)
y2 = min(frame.shape[0], y + h + padding)
plate_crop = frame[y1:y2, x1:x2]
if plate_crop.size == 0:
continue
# Draw bounding box on original frame
frame_with_box = frame.copy()
cv2.rectangle(frame_with_box, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame_with_box, f"Candidate {plate_idx+1}", (x, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Save frame with detection box
detection_path = output_dir / f"{frame_name}_detection_{plate_idx+1}.jpg"
cv2.imwrite(str(detection_path), frame_with_box)
# Save raw crop
crop_path = output_dir / f"{frame_name}_plate_{plate_idx+1}_raw.jpg"
cv2.imwrite(str(crop_path), plate_crop)
# Enhance plate
enhanced_versions = enhance_license_plate(plate_crop, upscale_factor=6)
for enhance_name, enhanced_img in enhanced_versions:
enhance_path = output_dir / f"{frame_name}_plate_{plate_idx+1}_{enhance_name}.jpg"
enhanced_img.save(str(enhance_path))
all_plates.append({
'timestamp': timestamp,
'candidate_idx': plate_idx,
'bbox': (x, y, w, h),
'aspect_ratio': candidate['aspect_ratio'],
'area': candidate['area']
})
print(f"[OK] Saved candidate {plate_idx+1} from {timestamp:.2f}s (AR: {candidate['aspect_ratio']:.2f}, Area: {candidate['area']})")
# Create summary
summary_path = output_dir / "summary.txt"
with open(summary_path, 'w') as f:
f.write("License Plate Extraction Summary\n")
f.write("=" * 60 + "\n\n")
f.write(f"Video: {video_path}\n")
f.write(f"Time Range: {start_time}-{end_time} seconds\n")
f.write(f"Frames Extracted: {len(frames)}\n")
f.write(f"Total Plate Candidates: {len(all_plates)}\n\n")
f.write("Candidates by Frame:\n")
f.write("-" * 60 + "\n")
for plate in all_plates:
f.write(f"Time: {plate['timestamp']:.2f}s | ")
f.write(f"Candidate #{plate['candidate_idx']+1} | ")
f.write(f"Aspect Ratio: {plate['aspect_ratio']:.2f} | ")
f.write(f"Area: {plate['area']}\n")
f.write("\n" + "=" * 60 + "\n")
f.write("Enhancement Techniques Applied:\n")
f.write("- Upscaled 6x (LANCZOS)\n")
f.write("- Heavy sharpening\n")
f.write("- High contrast boost\n")
f.write("- Brightness adjustment\n")
f.write("- Adaptive thresholding\n")
f.write("- Bilateral filtering\n")
f.write("- Unsharp masking\n")
f.write("- Extreme sharpening\n")
print(f"\n[SUCCESS] Processing complete!")
print(f"[INFO] Output directory: {output_dir}")
print(f"[INFO] Total plate candidates processed: {len(all_plates)}")
print(f"[INFO] Summary saved to: {summary_path}")
if __name__ == "__main__":
main()