Compare commits

...

31 Commits

Author SHA1 Message Date
s_kiani b79d689fc4 made code more clean and added getting thickness from client 2 months ago
s_kiani cd57a45562 ruuning watchdogs instead of app.py 2 months ago
s_kiani 54cc90691a changed default fps from 30 to 25 2 months ago
s_kiani e9668fbc55 added setting thickness from client 2 months ago
s_kiani eadb5f36e8 added watchdogs.py 2 months ago
s_kiani 80f4a47441 🎥 Refactor streaming logic to separate tracking and non-tracking frame updates 2 months ago
s_kiani bc4ac21f36 drawing frames debuged 2 months ago
s_kiani 25e3346736 added continuous streaming of the video sources. 3 months ago
s_kiani bea0241c32 added comment 3 months ago
s_kiani 522c3ddb55 changed gst pipline and converted server class from singleton to normal class 3 months ago
mht f228d0f30d added dynamic size to stream 3 months ago
mht 2e112c3a69 cleaning code by adding classes 3 months ago
mht e7823c55ba Merge remote-tracking branch 'origin/add-capture-card' into add-capture-card 3 months ago
mht acfa58f665 added auto ip finding and changed gst pipeline 3 months ago
mht 759e29d564 added video streaming through rtsp 3 months ago
mht bd13a03b61 Initial commit 3 months ago
mht 48e06828ea added capture card 3 months ago
mht a1e43b8e37 added capture card 3 months ago
s_kiani f18154a7e6 adedd vision_service.cpython-37m-x86_64-linux-gnu.so 3 months ago
s_kiani 60674e7b85 added torch.cuda.empty_cache() 3 months ago
s_kiani 3e77f014dc handeld emmiting null bboxes 4 months ago
s_kiani c5fcd089b3 changed search_area_scale from 5 to 7 for tracking small objects near corners 4 months ago
s_kiani 691a281bb0 made project thread safe by making detector and tracker objects out of core object and they are shared using pointers 4 months ago
s_kiani c13e231bec added torch.cuda.empty_cache() 4 months ago
s_kiani 1bae5c72fd edited newframe signal emition 4 months ago
s_kiani baf86a4132 handeled null bboxes and added debugging for prints 4 months ago
s_kiani 72340ecdcf handeled null bboxes 4 months ago
s_kiani abba3fadf1 added hearbeat 4 months ago
s_kiani 04e253a33b Merge remote-tracking branch 'origin/client-connection' into client-connection 4 months ago
s_kiani 59369df540 Add connection logic to a separate thread for independent client handling 4 months ago
s_kiani 7324e6d278 removed qtimer and added connection_thread 4 months ago
  1. BIN
      Oxygen-Sys-Warning.wav
  2. 171
      app.py
  3. 114
      conection.py
  4. 2
      config.yaml
  5. 189
      core.py
  6. 52
      cvStreamer.py
  7. 2
      detector/demo.py
  8. 47
      gpuMonitor.py
  9. 1
      message_queue/proto/SetCameraMessage.proto
  10. 11
      message_queue/proto/SetCameraMessage_pb2.py
  11. 2
      run.sh
  12. 140
      server.py
  13. 1
      shared_manager.py
  14. 1
      tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py
  15. 2
      tracker/ltr/models/backbone/resnet.py
  16. 1
      tracker/ltr/models/bbreg/atom_iou_net.py
  17. 2
      tracker/ltr/models/layers/distance.py
  18. 2
      tracker/ltr/models/target_classifier/features.py
  19. 6
      tracker/pytracking/features/augmentation.py
  20. 7
      tracker/pytracking/features/preprocessing.py
  21. 1
      tracker/pytracking/libs/dcf.py
  22. 3
      tracker/pytracking/tracker/dimp/dimp.py
  23. 2
      tracker/pytracking/utils/params.py
  24. 72
      utils.py
  25. BIN
      video_streamer/vision_service.cpython-37m-x86_64-linux-gnu.so
  26. 136
      watchdogs.py

BIN
Oxygen-Sys-Warning.wav

171
app.py

@ -4,182 +4,84 @@ import sys
from datetime import datetime, timedelta
from time import sleep
from typing import List
import sys
import os
import utils
from detector import Detector
# Add the proto directory to the Python path
proto_dir = os.path.join(os.path.dirname(__file__), 'message_queue', 'proto')
if proto_dir not in sys.path:
sys.path.append(proto_dir)
# Debug: Print the updated Python path
print("Updated Python Path:", sys.path)
import requests
from PyQt5.QtCore import QCoreApplication, Qt, pyqtSlot, QThread, QTimer
import numpy as np
from icecream import ic
# print("Updated Python Path:", sys.path)
from PyQt5.QtCore import QCoreApplication, Qt
import conection
from conection import ConnectionThread
import shared_manager
from utils import manager_callback, handle_camera_status, findUsbCams
from configs import ConfigManager
from core import Core
from tracker import Tracker
from message_queue.Bridge import Bridge
from message_queue.Manager import Manager
from message_queue.proto.ImageMessage_pb2 import ImageMessage, TrackMode
from message_queue.proto.Message_pb2 import Message
from message_queue.proto.Point_pb2 import Point
from message_queue.proto.TrackCommandMessage_pb2 import TrackCommand
from message_queue.proto.TrackCoordsMessage_pb2 import TrackCoordsMessage
from message_queue.proto.enums_pb2 import MessageType
from message_queue.proto.ConnectStatus_pb2 import ConnectStatus
from video_streamer.gst_video_streamer import GstVideoStreamer
import cv2
import os
import time # Ensure time module is imported
config_manager = ConfigManager('config.yaml')
rtsp_links = config_manager.configs['rtsp_links'].get()
debug = config_manager.configs['debug'].get()
def handle_camera_status(status: int):
m = Message()
m.msgType = MessageType.MESSAGE_TYPE_CAMERA_CONNECTION_STATUS
m.cam_status.status = status
manager.send_message(m.SerializeToString())
# Helper class to track client connection status
class ConnectionTracker:
def __init__(self):
self.last_message_time = time.time() # Use time.time() to get the current time
self.timeout = 15 # Timeout in seconds (adjust as needed)
def update_last_message_time(self):
self.last_message_time = time.time() # Update with the current time
def is_client_connected(self):
return (time.time() - self.last_message_time) < self.timeout # Use time.time()
# Create an instance of ConnectionTracker
connection_tracker = ConnectionTracker()
def check_client_connection():
if not connection_tracker.is_client_connected():
print("Client disconnected. Searching for another client...")
cl_ip, _ = start_discovery_service(12345)
ic(cl_ip)
# Reinitialize the Manager with the new client addressimport time # Ensure time module is imported
# Helper class to track client connection status
class ConnectionTracker:
def __init__(self):
self.last_message_time = time.time() # Use time.time() to get the current time
self.timeout = 15 # Timeout in seconds (adjust as needed)
def update_last_message_time(self):
self.last_message_time = time.time() # Update with the current time
def is_client_connected(self):
return (time.time() - self.last_message_time) < self.timeout # Use time.time()
# Create an instance of ConnectionTracker
connection_tracker = ConnectionTracker()
def check_client_connection():
if not connection_tracker.is_client_connected():
print("Client disconnected. Searching for another client...")
cl_ip, _ = start_discovery_service(12345)
print(cl_ip)
# Reinitialize the Manager with the new client address
global manager
manager = Manager(f"tcp://{cl_ip}:5558", f"tcp://{cl_ip}:5557")
manager.start(manager_callback) # Restart the Manager and re-register the callback
connection_tracker.update_last_message_time() # Reset the timer after reconnecting
if __name__ == '__main__':
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
app = QCoreApplication(sys.argv)
videoStreamers: List[GstVideoStreamer] = []
streamerThreads = []
for idx, rtsp_link in enumerate(rtsp_links):
videoStreamer = GstVideoStreamer(rtsp_link, [1920, 1080, 3], str(idx), fps=15)
videoStreamer.cameraStatus.connect(handle_camera_status)
print(f'{videoStreamer.id} connected')
videoStreamers.append(videoStreamer)
core = Core(videoStreamers)
def manager_callback(msg_str):
msg = Message()
msg.ParseFromString(msg_str)
connection_tracker.update_last_message_time() # Update the last message time
if msg.msgType == MessageType.MESSAGE_TYPE_TOGGLE_TRACK:
if msg.track_cmd.command == TrackCommand.TRACK_COMMAND_STOP:
core.stop_track()
elif msg.track_cmd.command == TrackCommand.TRACK_COMMAND_START:
core.start_track(msg.track_cmd.x, msg.track_cmd.y, msg.track_cmd.w, msg.track_cmd.h)
elif msg.track_cmd.command == TrackCommand.TRACK_COMMAND_START_DETECT:
core.start_detect(msg.track_cmd.x, msg.track_cmd.y, msg.track_cmd.w, msg.track_cmd.h)
elif msg.msgType == MessageType.MESSAGE_TYPE_TRACK_SETTINGS:
core.set_thickness(msg.track_settings.thickness)
elif msg.msgType == MessageType.MESSAGE_TYPE_SWITCH_CAMERA:
core.set_source(msg.cam_switch.primaryCamType)
elif msg.msgType == MessageType.MESSAGE_TYPE_SET_CAMERA:
ip = msg.cam_set.ip
videoStreamers = []
for src in range(2):
rtsp_link = f'rtsp://admin:Abc.12345@{ip}:554/ch{src}/stream0'
# ic(rtsp_link)
for idx, rtsp_link in enumerate(rtsp_links):
videoStreamer = GstVideoStreamer(rtsp_link, [1920, 1080, 3], str(idx), fps=15)
videoStreamer.cameraStatus.connect(handle_camera_status)
print(f'{videoStreamer.id} connected')
videoStreamers.append(videoStreamer)
core.set_video_sources(videoStreamers)
import socket
tracker = Tracker()
detector = Detector(classes=[0, 2, 5, 7])
def start_discovery_service(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', port))
print(f"Discovery service listening on port {port}...")
sources = findUsbCams()
if len(sources) >= 2:
core = Core(sources,tracker,detector)
else:
core = Core(videoStreamers, tracker, detector)
while True:
data, addr = sock.recvfrom(1024)
if data.decode() == "DISCOVER_SERVER":
print(f"Received discovery request from {addr}")
sock.sendto(b"SERVER_RESPONSE", addr)
break
return addr
utils.core = core
cl_ip, _ = start_discovery_service(12345)
cl_ip, _ = conection.start_discovery_service(12345)
print(cl_ip)
manager = Manager(f"tcp://{cl_ip}:5558", f"tcp://{cl_ip}:5557")
manager.start(manager_callback)
shared_manager.manager = Manager(f"tcp://{cl_ip}:5558", f"tcp://{cl_ip}:5557")
shared_manager.manager.start(manager_callback)
def gotNewFrame(bboxes, id_, isDetection, ctime):
#print(f"Got new frame, bboxes : {bboxes} Id: {id} Is detection {isDetection}")
m = Message()
m.msgType = MessageType.MESSAGE_TYPE_IMAGE
m.image.timestamp = int(ctime.value)
for bbox in bboxes:
# Skip if bbox is None, doesn't have exactly 4 elements, or contains None values
if bbox is None or len(bbox) != 4 or not all(element is not None for element in bbox):
continue
# Add the bounding box to the image
box = m.image.boxes.add()
box.x = bbox[0]
box.y = bbox[1]
box.w = bbox[2]
box.h = bbox[3]
box.x, box.y, box.w, box.h = bbox
m.image.camType = id_
if isDetection:
m.image.trackMode = TrackMode.TRACK_MODE_DETECT
else:
m.image.trackMode = TrackMode.TRACK_MODE_TRACK
manager.send_message(m.SerializeToString())
shared_manager.manager.send_message(m.SerializeToString())
def gotCoords(id_, coord, successful):
m = Message()
@ -188,17 +90,18 @@ if __name__ == '__main__':
m.track_coords.center.x = coord[0]
m.track_coords.center.y = coord[1]
m.track_coords.isLost = not successful
manager.send_message(m.SerializeToString())
shared_manager.manager.send_message(m.SerializeToString())
core.newFrame.connect(gotNewFrame)
core.coordsUpdated.connect(gotCoords)
# Set up the QTimer to check client connection every 10 seconds
timer = QTimer()
timer.timeout.connect(check_client_connection)
timer.start(10000) # 10 seconds
# Start the connection thread
connection_thread = ConnectionThread(debug,core)
connection_thread.start()
try:
app.exec_()
except KeyboardInterrupt:
connection_thread.stop()
connection_thread.join()
sys.exit(0)

114
conection.py

@ -0,0 +1,114 @@
# Helper class to track client connection status
import threading
import time
import socket
from message_queue.Manager import Manager
import shared_manager
from utils import manager_callback
def start_discovery_service(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', port))
print(f"Discovery service listening on port {port}...")
while True:
data, addr = sock.recvfrom(1024)
if data.decode() == "DISCOVER_SERVER":
print(f"Received discovery request from {addr}")
sock.sendto(b"SERVER_RESPONSE", addr)
break
sock.close()
return addr
class ConnectionTracker:
def __init__(self):
self.last_message_time = time.time()
self.timeout = 15 # 15-second timeout
self.last_client_ip = None # Track last connected client IP
def update_last_message_time(self, client_ip=None):
self.last_message_time = time.time()
if client_ip:
self.last_client_ip = client_ip # Update last known client
def is_client_active(self):
"""Check if the last client is still active within the last 15 seconds."""
return (time.time() - self.last_message_time) < self.timeout
# Create an instance of ConnectionTracker
connection_tracker = ConnectionTracker()
class ConnectionThread(threading.Thread):
def __init__(self,debug = False,core = None):
super().__init__()
self.running = True
self.__debug = debug
self.__core = core
def run(self):
while self.running:
time.sleep(0.5)
if connection_tracker.last_client_ip:
if self.__debug:
print(f"Checking if last client {connection_tracker.last_client_ip} is still available...")
if self.query_last_client(connection_tracker.last_client_ip):
connection_tracker.update_last_message_time()
if self.__debug:
print(f"Last client {connection_tracker.last_client_ip} responded. Continuing...")
continue # Skip discovering a new client
if not connection_tracker.is_client_active():
print("Client inactive for 15 seconds. Searching for another client...")
cl_ip, _ = start_discovery_service(12345)
print(f"New client found: {cl_ip}")
# Reinitialize the Manager with the new client address
shared_manager.manager = Manager(f"tcp://{cl_ip}:5558", f"tcp://{cl_ip}:5557")
shared_manager.manager.start(manager_callback)
connection_tracker.update_last_message_time(cl_ip) # Update with new client
def query_last_client(self, client_ip):
"""Send a heartbeat packet to the last known client IP on port 29170 and wait for a response."""
if self.__debug:
print(f"Sending heartbeat to {client_ip}:29170") # Debugging
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5) # 5-second timeout
heartbeat_port = 29170 # New port for heartbeat
try:
sock.sendto(b"HEARTBEAT", (client_ip, heartbeat_port)) # Send heartbeat message
data, addr = sock.recvfrom(1024) # Wait for response
if self.__debug:
print(f"Received response from {addr}: {data.decode()}") # Debugging
if data.decode() == "HEARTBEAT_ACK":
connection_tracker.update_last_message_time() # Update the last message time
if self.__debug:
print(f"Client {client_ip} is still responding.")
return True
except socket.timeout:
print(f"Client {client_ip} did not respond. Marking as inactive.")
finally:
sock.close()
return False # Client did not respond
def stop(self):
self.running = False

2
config.yaml

@ -6,4 +6,4 @@ rtsp_links: [
thickness: 2
debug: false
debug: False

189
core.py

@ -1,34 +1,44 @@
#import os
#os.environ['YOLO_VERBOSE'] = "false"
import datetime
import threading
import pygame
import cv2
import numpy as np
import torch
from threading import Event, Thread
from typing import List
import numpy as np
from PyQt5.QtCore import QThread, pyqtSlot, pyqtSignal, QUrl, QDir, pyqtProperty
#from icecream import ic
from matplotlib import pyplot as plt
from detector import Detector
from detector.utils import get_bbox_by_point
from tracker import Tracker
from video_streamer.videostreamer import VideoStreamer
from time import sleep
import time
from PyQt5.QtCore import QObject, pyqtSignal
import ctypes
from ctypes import c_int64
from server import run_server, RTSPServer, get_local_ip
showTrack = False
class Core(QThread):
newFrame = pyqtSignal(object, int, bool, ctypes.c_int64)
coordsUpdated = pyqtSignal(int, object, bool)
def __init__(self, video_sources: List[VideoStreamer], parent=None):
def __init__(self, video_sources, tracker=None, detector=None, parent=None):
super(QThread, self).__init__(parent)
self.__detector = Detector(classes=[0, 2, 5, 7])
self.__tracker = Tracker()
self.__detector = detector
self.__tracker = tracker
self.__tracker_roi = None
self.__tracker__succ = False
self.__rtspserver_0 = RTSPServer(get_local_ip(), 41231,"/stream")
threading.Thread(target=run_server,args=[self.__rtspserver_0], daemon=True).start()
self.__rtspserver_1 = RTSPServer(get_local_ip(), 41232,"/stream")
threading.Thread(target=run_server,args=[self.__rtspserver_1], daemon=True).start()
self.__video_sources = video_sources
self.__processing_source = video_sources[0]
@ -44,7 +54,17 @@ class Core(QThread):
self.__tracking_thread = None
self.__processing_id = 0
# ic()
self.__frame = None # Frame property for Pygame
# Start the continuous streaming thread
self.__is_streaming = True
self.__streaming_thread = Thread(target=self.__stream)
self.__streaming_thread.start()
@pyqtProperty(np.ndarray)
def frame(self):
return self.__frame
def set_thickness(self, thickness: int):
self.__thickness = thickness
@ -53,17 +73,49 @@ class Core(QThread):
self.__processing_source = self.__video_sources[source_id]
self.__processing_id = source_id
def set_video_sources(self, video_sources: List[VideoStreamer]):
def set_video_sources(self, video_sources):
if len(video_sources) >= 2:
self.__video_sources = video_sources
self.set_source(0)
def __stream(self):
"""Continuous streaming of the video source."""
while self.__is_streaming:
try:
frame_0 = self.__video_sources[0].get_frame()
frame_1 = self.__video_sources[1].get_frame()
if self.__is_tracking:
# Only update the non-tracking source
if self.__processing_id == 0:
if frame_1 is not None:
self.__rtspserver_1.update_frame(frame_1)
elif self.__processing_id == 1:
if frame_0 is not None:
self.__rtspserver_0.update_frame(frame_0)
else:
# Update both sources if not tracking
if frame_0 is not None:
self.__rtspserver_0.update_frame(frame_0)
if frame_1 is not None:
self.__rtspserver_1.update_frame(frame_1)
sleep(1/25)
except Exception as e:
print(e)
sleep(0.1)
def __detection(self):
while self.__is_detecting:
try:
torch.cuda.set_per_process_memory_fraction()
source = self.__processing_source
roi = self.__detection_roi
frame = source.get_frame()
cropped_frame = frame[roi[1]:roi[3], roi[0]:roi[2]]
with torch.no_grad():
results = self.__detector.predict(cropped_frame)
global_bboxes = list()
for result in results:
@ -71,10 +123,8 @@ class Core(QThread):
bbox = result[1:]
bbox[:2] += roi[:2]
global_bboxes.append(bbox)
# color = (0, 0, 255) if cls == 0 else (80, 127, 255)
# self.__draw_bbox(frame, bbox, color)
self.newFrame.emit(global_bboxes, self.__processing_id, True)
self.newFrame.emit(global_bboxes, self.__processing_id, True, c_int64(int(time.time() * 1e3)))
self.__detection_bboxes = np.array(global_bboxes)
self.__detection_frame = frame.copy()
sleep(0.03)
@ -82,21 +132,105 @@ class Core(QThread):
print(e)
sleep(0.1)
def __tracking(self):
source = self.__processing_source
track_color = (0, 255, 0) #green
lost_color = (255, 0, 0) #blue
if showTrack:
pygame.init()
# Get actual screen resolution
info = pygame.display.Info()
screen_width, screen_height = info.current_w, info.current_h
screen = pygame.display.set_mode((screen_width, screen_height), pygame.FULLSCREEN)
pygame.display.set_caption('Tracking Frame')
clock = pygame.time.Clock() # Add a clock to control frame rate
while self.__is_tracking:
if showTrack:
for event in pygame.event.get(): # Prevent freezing by handling events
if event.type == pygame.QUIT:
pygame.quit()
return
ctime = c_int64(int(time.time() * 1000)) # Convert to c_int64
frame = source.get_frame()
# print(f"intial frame size :{frame.shape}")
bbox, success = self.__tracker.update(frame)
center = None
self.__tracker_roi = bbox
self.__tracker__succ = success
if frame is not None:
if self.__is_tracking and self.__tracker is not None and self.__tracker_roi is not None :
x, y, w, h = map(int, self.__tracker_roi)
box_color = track_color if self.__tracker__succ else lost_color
cv2.rectangle(frame, (x, y), (x + w, y + h), box_color, self.__thickness)
if self.__processing_id == 1:
self.__rtspserver_1.update_frame(frame)
elif self.__processing_id == 0:
self.__rtspserver_0.update_frame(frame)
if bbox is not None:
center = bbox[:2] + bbox[2:] // 2
self.coordsUpdated.emit(self.__processing_id, center, success)
self.newFrame.emit([bbox], self.__processing_id, False, ctime)
sleep(0.01)
else:
self.newFrame.emit([bbox], self.__processing_id, False, ctime)
sleep(0.05)
x, y, w, h = map(int, bbox)
box_color = (0, 255, 0) if success else (255, 0, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), box_color, 2)
if showTrack:
# Convert OpenCV frame (BGR) to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 2.25
font_color = (255, 255, 0)
thickness = 6
position = (50, 450) # Bottom-left corner of the text in the image
now = datetime.datetime.now()
time_string = now.strftime("%H:%M:%S.%f")[:-3]
# 4. Use cv2.putText() to write time on the image
cv2.putText(frame, time_string, position, font, font_scale, font_color, thickness, cv2.LINE_AA)
cv2.putText(frame, f"{ctime}", (50, 380), font, font_scale, (255,255,255), thickness, cv2.LINE_AA)
# print(ctime)
frame = cv2.flip(frame, 1) # Flip horizontally
# Resize frame while maintaining aspect ratio
frame_height, frame_width, _ = frame.shape
aspect_ratio = frame_width / frame_height
if aspect_ratio > (screen_width / screen_height): # Wider than screen
new_width = screen_width
new_height = int(screen_width / aspect_ratio)
else: # Taller than screen
new_height = screen_height
new_width = int(screen_height * aspect_ratio)
resized_frame = cv2.resize(frame, (new_width, new_height))
# Convert to Pygame surface without unnecessary rotation
frame_surface = pygame.surfarray.make_surface(resized_frame)
# Optional: If rotation is needed, use pygame.transform.rotate()
frame_surface = pygame.transform.rotate(frame_surface, -90) # Example rotation
# Center the frame
x_offset = (screen_width - new_width) // 2
y_offset = (screen_height - new_height) // 2
screen.fill((0, 0, 0)) # Clear screen
screen.blit(frame_surface, (x_offset, y_offset))
pygame.display.flip()
clock.tick(30) # Limit FPS to prevent excessive CPU usage
def start_detect(self, x: int, y: int, w: int, h: int):
self.__detection_roi = [x, y, x + w, y + h]
@ -110,12 +244,13 @@ class Core(QThread):
def stop_detection(self):
self.__is_detecting = False
if self.__detection_thread is not None:
if self.__detection_thread is not None and self.__detection_thread.is_alive():
self.__detection_thread.join()
self.__detection_thread = None
def start_track(self, x: int, y: int, w: int = 0, h: int = 0):
print(f"start tracking: {x}, {y}, {w}, {h}")
try:
self.__is_detecting = False
self.__is_tracking = False
@ -140,21 +275,23 @@ class Core(QThread):
if self.__tracking_thread is not None:
self.__tracking_thread.join()
self.stop_track()
self.__is_tracking = True
self.__tracking_thread = Thread(target=self.__tracking)
self.__tracking_thread.start()
sleep(0.03)
def stop_track(self):
if showTrack:
pygame.quit()
print("stop tracking")
self.stop_detection()
self.__tracker.stop()
self.__is_tracking = False
if self.__tracking_thread is not None:
self.__tracking_thread.join()
self.__tracking_thread = None
self.__tracker_roi = None
def __draw_bbox(self, img: np.ndarray, bbox, color):
thickness = self.__thickness
# cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2] + bbox[0], bbox[3] + bbox[1]),
# color, thickness)
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2] + bbox[0], bbox[3] + bbox[1]),
color, thickness)

52
cvStreamer.py

@ -0,0 +1,52 @@
import cv2
from pywt.version import release
class cvStreamer():
def __init__(self, idx):
self.cap = cv2.VideoCapture(idx)
self.idx = idx
def isOpened(self):
isOpen = self.cap.isOpened()
if not isOpen:
self.release()
else:
print(f"usb cam open at {self.idx}")
return isOpen
def release(self):
self.cap.release()
def get_frame(self):
_, frame = self.cap.read()
# Get the original dimensions of the frame
height, width = frame.shape[:2]
# Define the maximum dimensions
max_width = 1920
max_height = 1080
# Calculate the aspect ratio
aspect_ratio = width / height
# Resize the frame if it exceeds the maximum dimensions
if width > max_width or height > max_height:
if aspect_ratio > 1: # Landscape orientation
new_width = max_width
new_height = int(new_width / aspect_ratio)
else: # Portrait orientation
new_height = max_height
new_width = int(new_height * aspect_ratio)
# Resize the frame
frame = cv2.resize(frame, (new_width, new_height))
return frame
def __del__(self):
self.release()

2
detector/demo.py

@ -7,7 +7,7 @@ from utils import get_bbox_by_point
if __name__ == '__main__':
detector = Detector(classes=[0, 2, 5, 7])
cap = cv2.VideoCapture(1)
cap = cv2.VideoCapture(0)
display_name = 'detector'
cv2.namedWindow(display_name, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(display_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

47
gpuMonitor.py

@ -0,0 +1,47 @@
import pynvml
import time
from colorama import Fore, Style, init
import os
# Initialize colorama
init(autoreset=True)
def monitor_gpu_ram_usage(interval=2, threshold_gb=2):
pynvml.nvmlInit()
# Initialize NVML
try:
device_count = pynvml.nvmlDeviceGetCount()
print(f"Found {device_count} GPU(s).")
while True:
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
print(f"GPU {i}:")
print(f" Total RAM: {info.total / 1024 ** 2:.2f} MB")
if(info.used / 1024 ** 2 >= 2.5 * 1024 ):
print(Fore.RED + f" Used RAM: {info.used / 1024 ** 2:.2f} MB")
os.system("aplay /home/rog/repos/Tracker/NE-Smart-Tracker/Oxygen-Sys-Warning.wav")
else:
print(f" Used RAM: {info.used / 1024 ** 2:.2f} MB")
print(f" Free RAM: {info.free / 1024 ** 2:.2f} MB")
print(Fore.GREEN + "-" * 30)
print(Fore.GREEN)
time.sleep(interval) # Wait for the specified interval before checking again
except KeyboardInterrupt:
print("Monitoring stopped by user.")
finally:
# Shutdown NVML
pynvml.nvmlShutdown()
if __name__ == "__main__":
monitor_gpu_ram_usage(interval=2, threshold_gb=2) # Check every 2 seconds, threshold is 2 GB

1
message_queue/proto/SetCameraMessage.proto

@ -3,4 +3,5 @@ syntax = "proto3";
message SetCameraMessage {
string ip = 1;
int32 port = 2;
int32 cameraSource = 3;
}

11
message_queue/proto/SetCameraMessage_pb2.py

@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16SetCameraMessage.proto\",\n\x10SetCameraMessage\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3'
serialized_pb=b'\n\x16SetCameraMessage.proto\"B\n\x10SetCameraMessage\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x12\x14\n\x0c\x63\x61meraSource\x18\x03 \x01(\x05\x62\x06proto3'
)
@ -47,6 +47,13 @@ _SETCAMERAMESSAGE = _descriptor.Descriptor(
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cameraSource', full_name='SetCameraMessage.cameraSource', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@ -60,7 +67,7 @@ _SETCAMERAMESSAGE = _descriptor.Descriptor(
oneofs=[
],
serialized_start=26,
serialized_end=70,
serialized_end=92,
)
DESCRIPTOR.message_types_by_name['SetCameraMessage'] = _SETCAMERAMESSAGE

2
run.sh

@ -21,4 +21,4 @@ eval "$(conda shell.bash hook)"
conda activate /opt/core/core/tracker_human_car
#python -c "from tracker.pytracking.evaluation.environment import create_default_local_file; create_default_local_file()"
#python -c "from tracker.ltr.admin.environment import create_default_local_file; create_default_local_file()"
python app.py
python watchdogs.py

140
server.py

@ -0,0 +1,140 @@
import gi
import threading
import numpy as np
import socket
import cv2
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GLib
def get_local_ip():
"""Retrieve the local IP address of the machine."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
except Exception as e:
return str(e)
class VideoStream(GstRtspServer.RTSPMediaFactory):
def __init__(self, fps, width, height):
super(VideoStream, self).__init__()
self.fps = fps
self.width = width
self.height = height
self.frame = np.zeros((height, width, 3), dtype=np.uint8) # Initial black frame
self.lock = threading.Lock()
def update_frame(self, frame):
"""Externally updates the current frame."""
with self.lock:
self.frame = cv2.resize(frame, (self.width, self.height))
def on_need_data(self, src, length):
"""Provides frames to the pipeline when requested."""
with self.lock:
frame_rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
data = frame_rgb.tobytes()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = Gst.SECOND // self.fps
buf.pts = buf.dts = Gst.CLOCK_TIME_NONE
src.emit("push-buffer", buf)
def do_create_element(self, url):
"""Creates the GStreamer pipeline for RTSP streaming."""
pipeline_str = (
"appsrc name=source is-live=true format=GST_FORMAT_TIME "
"caps=video/x-raw,format=RGB,width={},height={},framerate={}/1 "
"! videoconvert ! video/x-raw,format=I420 "
"! x264enc tune=zerolatency bitrate=500 speed-preset=ultrafast "
"! h264parse ! rtph264pay config-interval=1 name=pay0 pt=96"
).format(self.width, self.height, self.fps)
pipeline = Gst.parse_launch(pipeline_str)
src = pipeline.get_by_name("source")
src.connect("need-data", self.on_need_data)
return pipeline
class RTSPServer:
def __init__(self, ip="0.0.0.0", port=8554, mount_point="/stream"):
Gst.init(None)
self.server = GstRtspServer.RTSPServer()
self.server.set_address(ip)
self.server.set_service(str(port))
self.mount_point = mount_point
self.video_stream = None
self.current_fps = None
self.current_width = None
self.current_height = None
self.lock = threading.Lock()
self.server.attach(None)
def create_stream(self, fps, width, height):
"""Create a new RTSP stream with the given resolution and FPS."""
with self.lock:
# Check if we need to recreate the stream
if (self.video_stream and
self.current_fps == fps and
self.current_width == width and
self.current_height == height):
return # No need to recreate
# Remove old stream if exists
if self.video_stream:
print("Recreating RTSP stream for new resolution/fps...")
self.server.get_mount_points().remove_factory(self.mount_point)
# Create a new stream with the updated settings
self.video_stream = VideoStream(fps, width, height)
self.server.get_mount_points().add_factory(self.mount_point, self.video_stream)
self.current_fps = fps
self.current_width = width
self.current_height = height
print(f"New RTSP stream created: {width}x{height} at {fps}fps")
def update_frame(self, frame):
"""Push frames to the current streaming channel."""
if frame is None or frame.size == 0:
return
height, width, _ = frame.shape
fps = 25 # Default FPS
# Ensure we have a valid stream for the given resolution
self.create_stream(fps, width, height)
# Update the current stream with the new frame
if self.video_stream:
self.video_stream.update_frame(frame)
# Global RTSP server instance
# rtsp_server = RTSPServer(get_local_ip(), 41231)
def run_server(server):
"""Start the RTSP server loop."""
print(f"RTSP Server running at rtsp://{server.server.get_address()}:{server.server.get_service()}/stream")
loop = GLib.MainLoop()
loop.run()
# def stream_webcam():
# cap = cv2.VideoCapture("/home/mht/Downloads/bcd2890d71caaf0e095b95c9b525973f61186656-360p.mp4") # Open webcam
# while cap.isOpened():
# ret, frame = cap.read()
# if ret:
# rtsp_server.update_frame(frame) # Send frame to RTSP server
# if __name__ == "__main__":
# # Start RTSP server in a separate thread
# threading.Thread(target=run_server, daemon=True).start()
#
# # Stream webcam frames
# stream_webcam()

1
shared_manager.py

@ -0,0 +1 @@
manager = "nothing"

1
tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py

@ -12,6 +12,7 @@
import torch
import torch.autograd as ag
__all__ = ['prroi_pool2d']

2
tracker/ltr/models/backbone/resnet.py

@ -6,6 +6,7 @@ from torchvision.models.resnet import model_urls
from .base import Backbone
class Bottleneck(nn.Module):
expansion = 4
@ -22,6 +23,7 @@ class Bottleneck(nn.Module):
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x

1
tracker/ltr/models/bbreg/atom_iou_net.py

@ -2,6 +2,7 @@ import torch.nn as nn
import torch
from ltr.models.layers.blocks import LinearBlock
from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D
torch.cuda.empty_cache()
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):

2
tracker/ltr/models/layers/distance.py

@ -14,12 +14,14 @@ class DistanceMap(nn.Module):
super().__init__()
self.num_bins = num_bins
self.bin_displacement = bin_displacement
torch.cuda.empty_cache()
def forward(self, center, output_sz):
"""Create the distance map.
args:
center: Torch tensor with (y,x) center position. Dims (batch, 2)
output_sz: Size of output distance map. 2-dimensional tuple."""
torch.cuda.empty_cache()
center = center.view(-1,2)

2
tracker/ltr/models/target_classifier/features.py

@ -4,9 +4,11 @@ from ltr.models.layers.normalization import InstanceL2Norm
def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,
interp_cat=False, final_relu=False, final_pool=False, input_dim=None, final_stride=1):
"""Construct a network block based on the Bottleneck block used in ResNet."""
if out_dim is None:
out_dim = feature_dim
if input_dim is None:

6
tracker/pytracking/features/augmentation.py

@ -6,16 +6,18 @@ import cv2 as cv
import random
from pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy
class Transform:
"""Base data augmentation transform class."""
def __init__(self, output_sz = None, shift = None):
self.output_sz = output_sz
self.shift = (0,0) if shift is None else shift
torch.cuda.empty_cache()
def crop_to_output(self, image):
torch.cuda.empty_cache()
if isinstance(image, torch.Tensor):
imsz = image.shape[2:]
if self.output_sz is None:
@ -67,6 +69,7 @@ class Rotate(Transform):
super().__init__(output_sz, shift)
self.angle = math.pi * angle/180
def __call__(self, image, is_mask=False):
if isinstance(image, torch.Tensor):
return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))
@ -90,6 +93,7 @@ class Blur(Transform):
self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()
self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()
def __call__(self, image, is_mask=False):
if isinstance(image, torch.Tensor):
sz = image.shape[2:]

7
tracker/pytracking/features/preprocessing.py

@ -4,10 +4,12 @@ import numpy as np
def numpy_to_torch(a: np.ndarray):
torch.cuda.empty_cache()
return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)
def torch_to_numpy(a: torch.Tensor):
torch.cuda.empty_cache()
return a.squeeze(0).permute(1,2,0).numpy()
@ -20,7 +22,7 @@ def sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False
image_sz: Size to resize the image samples to before extraction.
transforms: A set of image transforms to apply.
"""
torch.cuda.empty_cache()
# Get image patche
im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask)
@ -39,6 +41,7 @@ def sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', ma
mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'
max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode
"""
torch.cuda.empty_cache()
if isinstance(scales, (int, float)):
scales = [scales]
@ -62,7 +65,7 @@ def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, o
mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'
max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode
"""
torch.cuda.empty_cache()
# if mode not in ['replicate', 'inside']:
# raise ValueError('Unknown border mode \'{}\'.'.format(mode))

1
tracker/pytracking/libs/dcf.py

@ -3,6 +3,7 @@ import torch
def max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""Computes maximum and argmax in the last two dimensions."""
torch.cuda.empty_cache()
max_val_row, argmax_row = torch.max(a, dim=-2)
max_val, argmax_col = torch.max(max_val_row, dim=-1)

3
tracker/pytracking/tracker/dimp/dimp.py

@ -65,6 +65,7 @@ class DiMP():
def track(self, image) -> dict:
torch.cuda.empty_cache()
# Convert image
im = numpy_to_torch(image)
@ -213,7 +214,7 @@ class DiMP():
# Compute augmentation size
aug_expansion_factor = self.params.get('augmentation_expansion_factor', None)
ic(self.params.get('augmentation_expansion_factor', None))
aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()

2
tracker/pytracking/utils/params.py

@ -5,7 +5,7 @@ class TrackerParams:
"""Class for tracker parameters."""
image_sample_size = 18 * 16
search_area_scale = 5
search_area_scale = 7
# Learning parameters
sample_memory_size = 50

72
utils.py

@ -0,0 +1,72 @@
import os
import sys
proto_dir = os.path.join(os.path.dirname(__file__), 'message_queue', 'proto')
if proto_dir not in sys.path:
sys.path.append(proto_dir)
import shared_manager
from cvStreamer import cvStreamer
from message_queue.proto.Message_pb2 import Message
from message_queue.proto.TrackCommandMessage_pb2 import TrackCommand
from message_queue.proto.enums_pb2 import MessageType
from video_streamer.gst_video_streamer import GstVideoStreamer
def findUsbCams():
cv_cameras = []
for i in range(50):
cap = cvStreamer(i)
if cap.isOpened():
cv_cameras.append(cap)
return cv_cameras
core = "a place holder for object"
def manager_callback(msg_str):
msg = Message()
msg.ParseFromString(msg_str)
if msg.msgType == MessageType.MESSAGE_TYPE_TOGGLE_TRACK:
if msg.track_cmd.command == TrackCommand.TRACK_COMMAND_STOP:
core.stop_track()
elif msg.track_cmd.command == TrackCommand.TRACK_COMMAND_START:
core.start_track(msg.track_cmd.x, msg.track_cmd.y, msg.track_cmd.w, msg.track_cmd.h)
elif msg.track_cmd.command == TrackCommand.TRACK_COMMAND_START_DETECT:
core.start_detect(msg.track_cmd.x, msg.track_cmd.y, msg.track_cmd.w, msg.track_cmd.h)
elif msg.msgType == MessageType.MESSAGE_TYPE_TRACK_SETTINGS:
core.set_thickness(msg.track_settings.thickness)
print(f"thickness: {msg.track_settings.thickness}")
elif msg.msgType == MessageType.MESSAGE_TYPE_SWITCH_CAMERA:
print(f"switch camera detected ,primary value is : {msg.cam_switch.primaryCamType}")
print(f"switch camera detected ,secondary value is : {msg.cam_switch.secondaryCamType}")
core.set_source(msg.cam_switch.primaryCamType)
elif msg.msgType == MessageType.MESSAGE_TYPE_SET_CAMERA:
if msg.cam_set.cameraSource == 1:
print(f"set camera source to network")
ip = msg.cam_set.ip
videoStreamers = []
for src in range(2):
rtsp_link = f'rtsp://admin:Abc.12345@{ip}:554/ch{src}/stream0'
# ic(rtsp_link)
videoStreamer = GstVideoStreamer(rtsp_link, [1920, 1080, 3], str(src), fps=15)
videoStreamer.cameraStatus.connect(handle_camera_status)
print(f'{videoStreamer.id} connected')
videoStreamers.append(videoStreamer)
core.set_video_sources(videoStreamers)
# videoStreamers = []
# for idx, rtsp_link in enumerate(rtsp_links):
# videoStreamer = GstVideoStreamer(rtsp_link, [1920, 1080, 3], str(idx), fps=15)
# videoStreamer.cameraStatus.connect(handle_camera_status)
# print(f'{videoStreamer.id} connected')
# videoStreamers.append(videoStreamer)
# core.set_video_sources(videoStreamers)
else:
sources = findUsbCams()
if len(sources) >= 2:
print("set camera source to captutre card")
core.set_video_sources(findUsbCams())
def handle_camera_status(status: int):
m = Message()
m.msgType = MessageType.MESSAGE_TYPE_CAMERA_CONNECTION_STATUS
m.cam_status.status = status
shared_manager.manager.send_message(m.SerializeToString())

BIN
video_streamer/vision_service.cpython-37m-x86_64-linux-gnu.so

136
watchdogs.py

@ -0,0 +1,136 @@
import subprocess
import time
import re
import psutil
import torch
import sys
from threading import Thread, Event
from queue import Queue, Empty
from colorama import Fore, Style, init
import os
def get_gpu_memory_usage():
"""Get current GPU memory usage percentage"""
if torch.cuda.is_available():
device = torch.cuda.current_device()
total_mem = torch.cuda.get_device_properties(device).total_memory
allocated_mem = torch.cuda.memory_allocated(device)
return (allocated_mem / total_mem) * 100
return 0
def check_for_cuda_errors(log_queue):
"""Check application output for CUDA-related errors"""
cuda_error_patterns = [
r"CUDA error",
r"out of memory",
r"cudaError",
r"RuntimeError: CUDA",
r"CUDA runtime error",
r"CUDA out of memory",
r"CUDA kernel failed"
]
try:
while True:
line = log_queue.get_nowait()
for pattern in cuda_error_patterns:
if re.search(pattern, line, re.IGNORECASE):
return True
except Empty:
pass
return False
def read_output(pipe, log_queue, print_event):
"""Read output from subprocess and distribute it"""
try:
for line in iter(pipe.readline, ''):
log_queue.put(line)
if print_event.is_set():
print(line, end='', flush=True)
except:
pass
def run_application(command, max_gpu_usage=90, check_interval=10):
"""Run application with watchdog functionality and live output"""
print_event = Event()
print_event.set() # Enable printing by default
while True:
print(f"\n{'=' * 40}")
print(f"Starting application: {' '.join(command)}")
print(f"{'=' * 40}\n")
log_queue = Queue()
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True
)
output_thread = Thread(
target=read_output,
args=(process.stdout, log_queue, print_event)
)
output_thread.daemon = True
output_thread.start()
try:
while True:
if process.poll() is not None:
print("\nApplication exited with code:", process.returncode)
break
gpu_usage = get_gpu_memory_usage()
if gpu_usage > max_gpu_usage:
print(f"\nGPU memory usage exceeded threshold ({gpu_usage:.1f}%)")
break
if check_for_cuda_errors(log_queue):
print("\nCUDA error detected in application output")
break
time.sleep(check_interval)
print("\nWaiting 1.5 seconds before restart...")
print(Fore.RED + f"{30 * '-'}")
print(Fore.RED + "RESTATRING...")
print(Fore.RED + f"{30 * '-'}")
print(Fore.WHITE)
os.system("aplay /home/rog/repos/Tracker/NE-Smart-Tracker/Oxygen-Sys-Warning.wav")
# Clean up
try:
if process.poll() is None:
parent = psutil.Process(process.pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
except psutil.NoSuchProcess:
pass
if torch.cuda.is_available():
torch.cuda.empty_cache()
time.sleep(1.5)
except KeyboardInterrupt:
print("\nStopping watchdog...")
print_event.clear()
output_thread.join()
try:
process.kill()
except:
pass
break
if __name__ == "__main__":
# Configure these parameters
APP_COMMAND = ["python", "app.py"] # Your application command
MAX_GPU_USAGE = 90 # Percentage threshold for GPU memory usage
CHECK_INTERVAL = 0.5 # Seconds between checks
run_application(APP_COMMAND, MAX_GPU_USAGE, CHECK_INTERVAL)
Loading…
Cancel
Save