Skip to content

Python Examples

Complete Python examples for integrating Moveris API (v2).

In plain terms

These examples show how to send video frames from Python to the Moveris API. Use them for server-side liveness checks (e.g., processing uploaded videos) or backend proxies that receive frames from a frontend.

Moveris API (v2)

These examples use Moveris API (v2) at https://api.moveris.com

Model selection

Examples use the v1 flow (model in body). For v2 resolution, add X-Model-Version: latest header and frame_count in the body. See Model Versioning & Frames.

REST API Example

import base64
import uuid
import cv2
import requests

API_KEY = "sk-your-api-key"
BASE_URL = "https://api.moveris.com"

def check_liveness(video_path: str, model: str = "mixed-10-v2") -> dict:
    """Check liveness. Frame count must match model (e.g. 10 for mixed-10-v2, 30 for mixed-30-v2)."""
    frame_count = 10 if "mixed-10" in model else (30 if "mixed-30" in model else 10)
    cap = cv2.VideoCapture(video_path)
    frames = []

    for i in range(frame_count):
        ret, frame = cap.read()
        if not ret:
            break

        frame = cv2.resize(frame, (640, 480))
        _, png_data = cv2.imencode('.png', frame)
        pixels = base64.b64encode(png_data.tobytes()).decode('utf-8')

        frames.append({
            'index': i,
            'timestamp_ms': i * 100,
            'pixels': pixels
        })

    cap.release()

    response = requests.post(
        f'{BASE_URL}/api/v1/fast-check',
        headers={
            'Content-Type': 'application/json',
            'X-API-Key': API_KEY,
            # v2 flow (optional): 'X-Model-Version': 'latest'
        },
        json={
            'session_id': str(uuid.uuid4()),
            'source': 'live',
            # v1 flow:
            'model': model,
            # v2 flow (alternative):
            # 'frame_count': 10,
            'frames': frames
        }
    )

    body = response.json()
    if not body.get("success", False):
        raise RuntimeError(body.get("message", "Request failed"))
    return body["data"]

# Usage
result = check_liveness("video.mp4")
print(f"Verdict: {result['verdict']}, Score: {result['score']}")

With Crops (Faster)

Dependencies

  • opencv-python
  • requests
  • mediapipe
import base64
import uuid
import cv2
import requests
import mediapipe as mp

API_KEY = "sk-your-api-key"
BASE_URL = "https://api.moveris.com"

def check_liveness_with_crops(video_path: str) -> dict:
    """Check liveness using pre-cropped faces with MediaPipe."""

    # Initialize MediaPipe Face Detection
    mp_face_detection = mp.solutions.face_detection
    face_detection = mp_face_detection.FaceDetection(
        model_selection=0,
        min_detection_confidence=0.5
    )

    cap = cv2.VideoCapture(video_path)
    crops = []

    frame_count = 10  # for mixed-10-v2; use getModels() for dynamic selection
    for i in range(frame_count):
        ret, frame = cap.read()
        if not ret:
            break

        # Convert to RGB for MediaPipe
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        results = face_detection.process(rgb_frame)

        if results.detections:
            detection = results.detections[0]
            bbox = detection.location_data.relative_bounding_box

            h, w = frame.shape[:2]
            x = int(bbox.xmin * w)
            y = int(bbox.ymin * h)
            face_w = int(bbox.width * w)
            face_h = int(bbox.height * h)

            # Calculate 3x expanded crop region
            center_x = x + face_w // 2
            center_y = y + face_h // 2
            crop_size = max(face_w, face_h) * 3

            x1 = max(0, int(center_x - crop_size // 2))
            y1 = max(0, int(center_y - crop_size // 2))
            x2 = min(w, int(center_x + crop_size // 2))
            y2 = min(h, int(center_y + crop_size // 2))

            # Crop and resize to 224x224
            crop = frame[y1:y2, x1:x2]
            crop = cv2.resize(crop, (224, 224))

            _, png_data = cv2.imencode('.png', crop)
            pixels = base64.b64encode(png_data.tobytes()).decode('utf-8')

            crops.append({
                'index': i,
                'pixels': pixels
            })

    cap.release()
    face_detection.close()

    response = requests.post(
        f'{BASE_URL}/api/v1/fast-check-crops',
        headers={
            'Content-Type': 'application/json',
            'X-API-Key': API_KEY,
            # v2 flow (optional): 'X-Model-Version': 'latest'
        },
        json={
            'session_id': str(uuid.uuid4()),
            'source': 'live',
            # v1 flow:
            'model': 'mixed-10-v2',
            # v2 flow (alternative):
            # 'frame_count': 10,
            'crops': crops,
            # 'bg_segmentation': True,  # Optional: set if you applied background segmentation to crops
        }
    )

    body = response.json()
    if not body.get("success", False):
        raise RuntimeError(body.get("message", "Request failed"))
    return body["data"]

Installation

pip install opencv-python requests mediapipe