Skip to content
Trackers Logo
version license python-version
colab discord

trackers is a unified library offering clean room re-implementations of leading multi-object tracking algorithms. Its modular design allows you to easily swap trackers and integrate them with object detectors from various libraries like inference, ultralytics, or transformers.

Tracker Paper MOTA Year Status Colab
SORT arXiv 74.6 2016 colab
DeepSORT arXiv 75.4 2017 colab
ByteTrack arXiv 77.8 2021 🚧 🚧
OC-SORT arXiv 75.9 2022 🚧 🚧
BoT-SORT arXiv 77.8 2022 🚧 🚧

Installation

You can install trackers in a Python>=3.9 environment.

Basic Installation

pip install trackers
poetry add trackers
uv pip install trackers

Hardware Acceleration

pip install "trackers[cpu]"
pip install "trackers[cu118]"
pip install "trackers[cu124]"
pip install "trackers[cu126]"
pip install "trackers[rocm61]"
pip install "trackers[rocm624]"

Quickstart

With a modular design, trackers lets you combine object detectors from different libraries with the tracker of your choice. Here's how you can use SORTTracker with various detectors:

import supervision as sv
from trackers import SORTTracker
from inference import get_model

tracker = SORTTracker()
model = get_model(model_id="yolov11m-640")
annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)

def callback(frame, _):
    result = model.infer(frame)[0]
    detections = sv.Detections.from_inference(result)
    detections = tracker.update(detections)
    return annotator.annotate(frame, detections, labels=detections.tracker_id)

sv.process_video(
    source_path="input.mp4",
    target_path="output.mp4",
    callback=callback,
)
import supervision as sv
from trackers import SORTTracker
from rfdetr import RFDETRBase

tracker = SORTTracker()
model = RFDETRBase()
annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)

def callback(frame, _):
    detections = model.predict(frame)
    detections = tracker.update(detections)
    return annotator.annotate(frame, detections, labels=detections.tracker_id)

sv.process_video(
    source_path="input.mp4",
    target_path="output.mp4",
    callback=callback,
)
import supervision as sv
from trackers import SORTTracker
from ultralytics import YOLO

tracker = SORTTracker()
model = YOLO("yolo11m.pt")
annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)

def callback(frame, _):
    result = model(frame)[0]
    detections = sv.Detections.from_ultralytics(result)
    detections = tracker.update(detections)
    return annotator.annotate(frame, detections, labels=detections.tracker_id)

sv.process_video(
    source_path="input.mp4",
    target_path="output.mp4",
    callback=callback,
)
import torch
import supervision as sv
from trackers import SORTTracker
from transformers import RTDetrV2ForObjectDetection, RTDetrImageProcessor

tracker = SORTTracker()
processor = RTDetrImageProcessor.from_pretrained("PekingU/rtdetr_v2_r18vd")
model = RTDetrV2ForObjectDetection.from_pretrained("PekingU/rtdetr_v2_r18vd")
annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)

def callback(frame, _):
    inputs = processor(images=frame, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)

    h, w, _ = frame.shape
    results = processor.post_process_object_detection(
        outputs,
        target_sizes=torch.tensor([(h, w)]),
        threshold=0.5
    )[0]

    detections = sv.Detections.from_transformers(
        transformers_results=results,
        id2label=model.config.id2label
    )

    detections = tracker.update(detections)
    return annotator.annotate(frame, detections, labels=detections.tracker_id)

sv.process_video(
    source_path="input.mp4",
    target_path="output.mp4",
    callback=callback,
)

Comments