test_final / api /drawing.py
k22056537
feat: sync integration updates across app and ML pipeline
eb4abb8
"""Server-side face mesh and HUD drawing for WebRTC/WS video frames."""
from __future__ import annotations
import cv2
import numpy as np
from mediapipe.tasks.python.vision import FaceLandmarksConnections
from models.face_mesh import FaceMeshDetector
_FONT = cv2.FONT_HERSHEY_SIMPLEX
_CYAN = (255, 255, 0)
_GREEN = (0, 255, 0)
_MAGENTA = (255, 0, 255)
_ORANGE = (0, 165, 255)
_RED = (0, 0, 255)
_WHITE = (255, 255, 255)
_LIGHT_GREEN = (144, 238, 144)
_TESSELATION_CONNS = [(c.start, c.end) for c in FaceLandmarksConnections.FACE_LANDMARKS_TESSELATION]
_CONTOUR_CONNS = [(c.start, c.end) for c in FaceLandmarksConnections.FACE_LANDMARKS_CONTOURS]
_LEFT_EYEBROW = [70, 63, 105, 66, 107, 55, 65, 52, 53, 46]
_RIGHT_EYEBROW = [300, 293, 334, 296, 336, 285, 295, 282, 283, 276]
_NOSE_BRIDGE = [6, 197, 195, 5, 4, 1, 19, 94, 2]
_LIPS_OUTER = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 409, 270, 269, 267, 0, 37, 39, 40, 185, 61]
_LIPS_INNER = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 415, 310, 311, 312, 13, 82, 81, 80, 191, 78]
_LEFT_EAR_POINTS = [33, 160, 158, 133, 153, 145]
_RIGHT_EAR_POINTS = [362, 385, 387, 263, 373, 380]
def _lm_px(lm: np.ndarray, idx: int, w: int, h: int) -> tuple[int, int]:
return (int(lm[idx, 0] * w), int(lm[idx, 1] * h))
def _draw_polyline(
frame: np.ndarray, lm: np.ndarray, indices: list[int], w: int, h: int, color: tuple, thickness: int
) -> None:
for i in range(len(indices) - 1):
cv2.line(
frame,
_lm_px(lm, indices[i], w, h),
_lm_px(lm, indices[i + 1], w, h),
color,
thickness,
cv2.LINE_AA,
)
def draw_face_mesh(frame: np.ndarray, lm: np.ndarray, w: int, h: int) -> None:
"""Draw tessellation, contours, eyebrows, nose, lips, eyes, irises, gaze lines on frame."""
overlay = frame.copy()
for s, e in _TESSELATION_CONNS:
cv2.line(overlay, _lm_px(lm, s, w, h), _lm_px(lm, e, w, h), (200, 200, 200), 1, cv2.LINE_AA)
cv2.addWeighted(overlay, 0.3, frame, 0.7, 0, frame)
for s, e in _CONTOUR_CONNS:
cv2.line(frame, _lm_px(lm, s, w, h), _lm_px(lm, e, w, h), _CYAN, 1, cv2.LINE_AA)
_draw_polyline(frame, lm, _LEFT_EYEBROW, w, h, _LIGHT_GREEN, 2)
_draw_polyline(frame, lm, _RIGHT_EYEBROW, w, h, _LIGHT_GREEN, 2)
_draw_polyline(frame, lm, _NOSE_BRIDGE, w, h, _ORANGE, 1)
_draw_polyline(frame, lm, _LIPS_OUTER, w, h, _MAGENTA, 1)
_draw_polyline(frame, lm, _LIPS_INNER, w, h, (200, 0, 200), 1)
left_pts = np.array([_lm_px(lm, i, w, h) for i in FaceMeshDetector.LEFT_EYE_INDICES], dtype=np.int32)
cv2.polylines(frame, [left_pts], True, _GREEN, 2, cv2.LINE_AA)
right_pts = np.array([_lm_px(lm, i, w, h) for i in FaceMeshDetector.RIGHT_EYE_INDICES], dtype=np.int32)
cv2.polylines(frame, [right_pts], True, _GREEN, 2, cv2.LINE_AA)
for indices in [_LEFT_EAR_POINTS, _RIGHT_EAR_POINTS]:
for idx in indices:
cv2.circle(frame, _lm_px(lm, idx, w, h), 3, (0, 255, 255), -1, cv2.LINE_AA)
for iris_idx, eye_inner, eye_outer in [
(FaceMeshDetector.LEFT_IRIS_INDICES, 133, 33),
(FaceMeshDetector.RIGHT_IRIS_INDICES, 362, 263),
]:
iris_pts = np.array([_lm_px(lm, i, w, h) for i in iris_idx], dtype=np.int32)
center = iris_pts[0]
if len(iris_pts) >= 5:
radii = [np.linalg.norm(iris_pts[j] - center) for j in range(1, 5)]
radius = max(int(np.mean(radii)), 2)
cv2.circle(frame, tuple(center), radius, _MAGENTA, 2, cv2.LINE_AA)
cv2.circle(frame, tuple(center), 2, _WHITE, -1, cv2.LINE_AA)
eye_cx = int((lm[eye_inner, 0] + lm[eye_outer, 0]) / 2.0 * w)
eye_cy = int((lm[eye_inner, 1] + lm[eye_outer, 1]) / 2.0 * h)
dx, dy = center[0] - eye_cx, center[1] - eye_cy
cv2.line(
frame,
tuple(center),
(int(center[0] + dx * 3), int(center[1] + dy * 3)),
_RED,
1,
cv2.LINE_AA,
)
def draw_hud(frame: np.ndarray, result: dict, model_name: str) -> None:
"""Draw status bar and detail overlay (FOCUSED/NOT FOCUSED, conf, s_face, s_eye, MAR, yawn)."""
h, w = frame.shape[:2]
is_focused = result["is_focused"]
status = "FOCUSED" if is_focused else "NOT FOCUSED"
color = _GREEN if is_focused else _RED
cv2.rectangle(frame, (0, 0), (w, 55), (0, 0, 0), -1)
cv2.putText(frame, status, (10, 28), _FONT, 0.8, color, 2, cv2.LINE_AA)
cv2.putText(frame, model_name.upper(), (w - 150, 28), _FONT, 0.45, _WHITE, 1, cv2.LINE_AA)
conf = result.get("mlp_prob", result.get("raw_score", 0.0))
mar_s = f" MAR:{result['mar']:.2f}" if result.get("mar") is not None else ""
sf, se = result.get("s_face", 0), result.get("s_eye", 0)
detail = f"conf:{conf:.2f} S_face:{sf:.2f} S_eye:{se:.2f}{mar_s}"
cv2.putText(frame, detail, (10, 48), _FONT, 0.4, _WHITE, 1, cv2.LINE_AA)
if result.get("yaw") is not None:
cv2.putText(
frame,
f"yaw:{result['yaw']:+.0f} pitch:{result['pitch']:+.0f} roll:{result['roll']:+.0f}",
(w - 280, 48),
_FONT,
0.4,
(180, 180, 180),
1,
cv2.LINE_AA,
)
if result.get("is_yawning"):
cv2.putText(frame, "YAWN", (10, 75), _FONT, 0.7, _ORANGE, 2, cv2.LINE_AA)
def get_tesselation_connections() -> list[tuple[int, int]]:
"""Return tessellation edge pairs for client-side face mesh (cached by client)."""
return list(_TESSELATION_CONNS)