fromultralyticsimportYOLO# Load an official or custom modelmodel=YOLO("yolo11n.pt")# Load an official Detect modelmodel=YOLO("yolo11n-seg.pt")# Load an official Segment modelmodel=YOLO("yolo11n-pose.pt")# Load an official Pose modelmodel=YOLO("path/to/best.pt")# Load a custom trained model# Perform tracking with the modelresults=model.track("https://youtu.be/LNwODJXcvt4",show=True)# Tracking with default trackerresults=model.track("https://youtu.be/LNwODJXcvt4",show=True,tracker="bytetrack.yaml")# with ByteTrack
# Perform tracking with various models using the command line interfaceyolotrackmodel=yolo11n.ptsource="https://youtu.be/LNwODJXcvt4"# Official Detect modelyolotrackmodel=yolo11n-seg.ptsource="https://youtu.be/LNwODJXcvt4"# Official Segment modelyolotrackmodel=yolo11n-pose.ptsource="https://youtu.be/LNwODJXcvt4"# Official Pose modelyolotrackmodel=path/to/best.ptsource="https://youtu.be/LNwODJXcvt4"# Custom trained model# Track using ByteTrack trackeryolotrackmodel=path/to/best.pttracker="bytetrack.yaml"
fromultralyticsimportYOLO# Load the model and run the tracker with a custom configuration filemodel=YOLO("yolo11n.pt")results=model.track(source="https://youtu.be/LNwODJXcvt4",tracker="custom_tracker.yaml")
# Load the model and run the tracker with a custom configuration file using the command line interfaceyolotrackmodel=yolo11n.ptsource="https://youtu.be/LNwODJXcvt4"tracker='custom_tracker.yaml'
启用 Re-Identification (ReID)
默认情况下,ReID 处于关闭状态,以最大限度地减少性能开销。 启用它很简单,只需设置 with_reid: True 在 跟踪器配置。您还可以自定义 model 用于 ReID,允许您根据用例权衡准确性和速度:
fromtorchimportnnfromultralyticsimportYOLO# Load the classification modelmodel=YOLO("yolo11n-cls.pt")# Add average pooling layerhead=model.model.model[-1]pool=nn.Sequential(nn.AdaptiveAvgPool2d((1,1)),nn.Flatten(start_dim=1))pool.f,pool.i=head.f,head.imodel.model.model[-1]=pool# Export to TensorRTmodel.export(format="engine",half=True,dynamic=True,batch=32)
importcv2fromultralyticsimportYOLO# Load the YOLO11 modelmodel=YOLO("yolo11n.pt")# Open the video filevideo_path="path/to/video.mp4"cap=cv2.VideoCapture(video_path)# Loop through the video frameswhilecap.isOpened():# Read a frame from the videosuccess,frame=cap.read()ifsuccess:# Run YOLO11 tracking on the frame, persisting tracks between framesresults=model.track(frame,persist=True)# Visualize the results on the frameannotated_frame=results[0].plot()# Display the annotated framecv2.imshow("YOLO11 Tracking",annotated_frame)# Break the loop if 'q' is pressedifcv2.waitKey(1)&0xFF==ord("q"):breakelse:# Break the loop if the end of the video is reachedbreak# Release the video capture object and close the display windowcap.release()cv2.destroyAllWindows()
fromcollectionsimportdefaultdictimportcv2importnumpyasnpfromultralyticsimportYOLO# Load the YOLO11 modelmodel=YOLO("yolo11n.pt")# Open the video filevideo_path="path/to/video.mp4"cap=cv2.VideoCapture(video_path)# Store the track historytrack_history=defaultdict(lambda:[])# Loop through the video frameswhilecap.isOpened():# Read a frame from the videosuccess,frame=cap.read()ifsuccess:# Run YOLO11 tracking on the frame, persisting tracks between framesresult=model.track(frame
,persist=True)[0]# Get the boxes and track IDsifresult.boxesandresult.boxes.is_track:boxes=result.boxes.xywh.cpu()track_ids=result.boxes.id.int().cpu().tolist()# Visualize the result on the frameframe=result.plot()# Plot the tracksforbox,track_idinzip(boxes,track_ids):x,y,w,h=boxtrack=track_history[track_id]track.append((float(x),float(y)))# x, y center pointiflen(track)>30:# retain 30 tracks for 30 framestrack.pop(0)# Draw the tracking linespoints=np.hstack(track).astype(np.int32).reshape((-1,1,2))cv2.polylines(frame,[points],isClosed=False,color=(230,230,230),thickness=10)# Display the annotated framecv2.imshow("YOLO11 Tracking",frame)# Break the loop if 'q' is pressedifcv2.waitKey(1)&0xFF==ord("q"):breakelse:# Break the loop if the end of the video is reachedbreak# Release the video capture object and close the display windowcap.release()cv2.destroyAllWindows()
importthreadingimportcv2fromultralyticsimportYOLO# Define model names and video sourcesMODEL_NAMES=["yolo11n.pt","yolo11n-seg.pt"]SOURCES=["path/to/video.mp4","0"]# local video, 0 for webcamdefrun_tracker_in_thread(model_name,filename): Run YOLO tracker in its own thread for concurrent processing. Args: model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source.model=YOLO(model_name)results=model.track(filename,save=True,stream=True)forrinresults:# Create and start tracker threads using a for looptracker_threads=[]forvideo_file,model_nameinzip(SOURCES,MODEL_NAMES):thread=threading.Thread(target=run_tracker_in_thread,args=(model_name,video_file),daemon=True)tracker_threads.append(thread)thread.start()# Wait for all tracker threads to finishforthreadintracker_threads:thread.join()# Clean up and close windowscv2.destroyAllWindows()
importthreadingimportcv2fromultralyticsimportYOLO# Define model names and video sourcesMODEL_NAMES=["yolo11n.pt","yolo11n-seg.pt"]SOURCES=["path/to/video.mp4","0"]# local video, 0 for webcamdefrun_tracker_in_thread(model_name,filename): Run YOLO tracker in its own thread for concurrent processing. Args: model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source.model=YOLO(model_name)results=model.track(filename,save=True,stream=True)forrinresults:# Create and start tracker threads using a for looptracker_threads=[]forvideo_file,model_nameinzip(SOURCES,MODEL_NAMES):thread=threading.Thread(target=run_tracker_in_thread,args=(model_name,video_file),daemon=True)tracker_threads.append(thread)thread.start()# Wait for all tracker threads to finishforthreadintracker_threads:thread.join()# Clean up and close windowscv2.destroyAllWindows()