Browse Source

added torch.cuda.empty_cache()

client-connection
s_kiani 1 month ago
parent
commit
60674e7b85
  1. BIN
      Oxygen-Sys-Warning.wav
  2. 180
      core.py
  3. 47
      gpuMonitor.py
  4. 1
      tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py
  5. 2
      tracker/ltr/models/backbone/resnet.py
  6. 1
      tracker/ltr/models/bbreg/atom_iou_net.py
  7. 2
      tracker/ltr/models/layers/distance.py
  8. 2
      tracker/ltr/models/target_classifier/features.py
  9. 6
      tracker/pytracking/features/augmentation.py
  10. 7
      tracker/pytracking/features/preprocessing.py
  11. 1
      tracker/pytracking/libs/dcf.py

BIN
Oxygen-Sys-Warning.wav

180
core.py

@ -1,31 +1,31 @@
#import os
#os.environ['YOLO_VERBOSE'] = "false"
from threading import Event, Thread
from typing import List
import datetime
import pygame
import cv2
import numpy as np
import torch
from threading import Event, Thread
from typing import List
from PyQt5.QtCore import QThread, pyqtSlot, pyqtSignal, QUrl, QDir, pyqtProperty
#from icecream import ic
from matplotlib import pyplot as plt
from detector import Detector
from detector.utils import get_bbox_by_point
from tracker import Tracker
from video_streamer.videostreamer import VideoStreamer
from time import sleep
import time
from PyQt5.QtCore import QObject, pyqtSignal
import ctypes
from ctypes import c_int64
showTrack = False
class Core(QThread):
newFrame = pyqtSignal(object, int, bool,ctypes.c_int64)
newFrame = pyqtSignal(object, int, bool, ctypes.c_int64)
coordsUpdated = pyqtSignal(int, object, bool)
def __init__(self, video_sources: List[VideoStreamer], tracker = None, detector = None , parent=None):
def __init__(self, video_sources: List[VideoStreamer], tracker=None, detector=None, parent=None):
super(QThread, self).__init__(parent)
self.__detector = detector
@ -45,7 +45,12 @@ class Core(QThread):
self.__tracking_thread = None
self.__processing_id = 0
# ic()
self.__frame = None # Frame property for Pygame
@pyqtProperty(np.ndarray)
def frame(self):
return self.__frame
def set_thickness(self, thickness: int):
self.__thickness = thickness
@ -73,10 +78,8 @@ class Core(QThread):
bbox = result[1:]
bbox[:2] += roi[:2]
global_bboxes.append(bbox)
# color = (0, 0, 255) if cls == 0 else (80, 127, 255)
# self.__draw_bbox(frame, bbox, color)
self.newFrame.emit(global_bboxes, self.__processing_id, True, c_int64(int(time.time()*1e3)))
self.newFrame.emit(global_bboxes, self.__processing_id, True, c_int64(int(time.time() * 1e3)))
self.__detection_bboxes = np.array(global_bboxes)
self.__detection_frame = frame.copy()
sleep(0.03)
@ -84,22 +87,94 @@ class Core(QThread):
print(e)
sleep(0.1)
def __tracking(self):
source = self.__processing_source
if showTrack:
pygame.init()
# Get actual screen resolution
info = pygame.display.Info()
screen_width, screen_height = info.current_w, info.current_h
screen = pygame.display.set_mode((screen_width, screen_height), pygame.FULLSCREEN)
pygame.display.set_caption('Tracking Frame')
clock = pygame.time.Clock() # Add a clock to control frame rate
while self.__is_tracking:
if showTrack:
for event in pygame.event.get(): # Prevent freezing by handling events
if event.type == pygame.QUIT:
pygame.quit()
return
ctime = c_int64(int(time.time() * 1000)) # Convert to c_int64
frame = source.get_frame()
bbox, success = self.__tracker.update(frame)
center = None
if bbox is not None:
center = bbox[:2] + bbox[2:] // 2
self.coordsUpdated.emit(self.__processing_id, center, success)
self.newFrame.emit([bbox], self.__processing_id, False, ctime)
sleep(0.01)
else:
# self.newFrame.emit([bbox], self.__processing_id, False, ctime)
print("null bbox")
sleep(0.05)
if showTrack:
x, y, w, h = map(int, bbox)
box_color = (0, 255, 0) if success else (255, 0, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), box_color, 2)
# Convert OpenCV frame (BGR) to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 2.25
font_color = (255, 255, 0)
thickness = 6
position = (50, 450) # Bottom-left corner of the text in the image
now = datetime.datetime.now()
time_string = now.strftime("%H:%M:%S.%f")[:-3]
# 4. Use cv2.putText() to write time on the image
cv2.putText(frame, time_string, position, font, font_scale, font_color, thickness, cv2.LINE_AA)
cv2.putText(frame, f"{ctime}", (50, 380), font, font_scale, (255,255,255), thickness, cv2.LINE_AA)
# print(ctime)
frame = cv2.flip(frame, 1) # Flip horizontally
# Resize frame while maintaining aspect ratio
frame_height, frame_width, _ = frame.shape
aspect_ratio = frame_width / frame_height
if aspect_ratio > (screen_width / screen_height): # Wider than screen
new_width = screen_width
new_height = int(screen_width / aspect_ratio)
else: # Taller than screen
new_height = screen_height
new_width = int(screen_height * aspect_ratio)
resized_frame = cv2.resize(frame, (new_width, new_height))
# Convert to Pygame surface without unnecessary rotation
frame_surface = pygame.surfarray.make_surface(resized_frame)
# Optional: If rotation is needed, use pygame.transform.rotate()
frame_surface = pygame.transform.rotate(frame_surface, -90) # Example rotation
# Center the frame
x_offset = (screen_width - new_width) // 2
y_offset = (screen_height - new_height) // 2
screen.fill((0, 0, 0)) # Clear screen
screen.blit(frame_surface, (x_offset, y_offset))
pygame.display.flip()
clock.tick(30) # Limit FPS to prevent excessive CPU usage
def start_detect(self, x: int, y: int, w: int, h: int):
self.__detection_roi = [x, y, x + w, y + h]
@ -113,54 +188,53 @@ class Core(QThread):
def stop_detection(self):
self.__is_detecting = False
if self.__detection_thread is not None:
if self.__detection_thread is not None and self.__detection_thread.is_alive():
self.__detection_thread.join()
self.__detection_thread = None
def start_track(self, x: int, y: int, w: int = 0, h: int = 0):
print(f"start tracking: {x}, {y}, {w}, {h}")
try:
self.__is_detecting = False
self.__is_tracking = False
bbox = None
if w == 0:
if len(self.__detection_bboxes):
bbox = get_bbox_by_point(self.__detection_bboxes, np.array([x, y]))
frame = self.__detection_frame
else:
bbox = np.array([x, y, w, h])
frame = self.__processing_source.get_frame()
print(f"start tracking: {x}, {y}, {w}, {h}")
try:
self.__is_detecting = False
self.__is_tracking = False
bbox = None
if w == 0:
if len(self.__detection_bboxes):
bbox = get_bbox_by_point(self.__detection_bboxes, np.array([x, y]))
frame = self.__detection_frame
else:
bbox = np.array([x, y, w, h])
frame = self.__processing_source.get_frame()
self.__tracker.stop()
self.__tracker.stop()
if bbox is not None:
self.__tracker.init(frame, bbox)
else:
if bbox is not None:
self.__tracker.init(frame, bbox)
else:
return
except Exception as e:
print(e)
return
except Exception as e:
print(e)
return
if self.__tracking_thread is not None:
self.__tracking_thread.join()
self.stop_track()
self.__is_tracking = True
self.__tracking_thread = Thread(target=self.__tracking)
self.__tracking_thread.start()
sleep(0.03)
if self.__tracking_thread is not None:
self.__tracking_thread.join()
self.stop_track()
self.__is_tracking = True
self.__tracking_thread = Thread(target=self.__tracking)
self.__tracking_thread.start()
sleep(0.03)
def stop_track(self):
if showTrack:
pygame.quit()
print("stop tracking")
self.stop_detection()
self.__tracker.stop()
self.__is_tracking = False
if self.__tracking_thread is not None:
self.__tracking_thread.join()
self.__tracking_thread = None
def __draw_bbox(self, img: np.ndarray, bbox, color):
thickness = self.__thickness
# cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2] + bbox[0], bbox[3] + bbox[1]),
# color, thickness)
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2] + bbox[0], bbox[3] + bbox[1]),
color, thickness)

47
gpuMonitor.py

@ -0,0 +1,47 @@
import pynvml
import time
from colorama import Fore, Style, init
import os
# Initialize colorama
init(autoreset=True)
def monitor_gpu_ram_usage(interval=2, threshold_gb=2):
pynvml.nvmlInit()
# Initialize NVML
try:
device_count = pynvml.nvmlDeviceGetCount()
print(f"Found {device_count} GPU(s).")
while True:
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
print(f"GPU {i}:")
print(f" Total RAM: {info.total / 1024 ** 2:.2f} MB")
if(info.used / 1024 ** 2 >= 2.5 * 1024 ):
print(Fore.RED + f" Used RAM: {info.used / 1024 ** 2:.2f} MB")
os.system("aplay /home/rog/repos/Tracker/NE-Smart-Tracker/Oxygen-Sys-Warning.wav")
else:
print(f" Used RAM: {info.used / 1024 ** 2:.2f} MB")
print(f" Free RAM: {info.free / 1024 ** 2:.2f} MB")
print(Fore.GREEN + "-" * 30)
print(Fore.GREEN)
time.sleep(interval) # Wait for the specified interval before checking again
except KeyboardInterrupt:
print("Monitoring stopped by user.")
finally:
# Shutdown NVML
pynvml.nvmlShutdown()
if __name__ == "__main__":
monitor_gpu_ram_usage(interval=2, threshold_gb=2) # Check every 2 seconds, threshold is 2 GB

1
tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py

@ -12,6 +12,7 @@
import torch
import torch.autograd as ag
__all__ = ['prroi_pool2d']

2
tracker/ltr/models/backbone/resnet.py

@ -6,6 +6,7 @@ from torchvision.models.resnet import model_urls
from .base import Backbone
class Bottleneck(nn.Module):
expansion = 4
@ -22,6 +23,7 @@ class Bottleneck(nn.Module):
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x

1
tracker/ltr/models/bbreg/atom_iou_net.py

@ -2,6 +2,7 @@ import torch.nn as nn
import torch
from ltr.models.layers.blocks import LinearBlock
from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D
torch.cuda.empty_cache()
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):

2
tracker/ltr/models/layers/distance.py

@ -14,12 +14,14 @@ class DistanceMap(nn.Module):
super().__init__()
self.num_bins = num_bins
self.bin_displacement = bin_displacement
torch.cuda.empty_cache()
def forward(self, center, output_sz):
"""Create the distance map.
args:
center: Torch tensor with (y,x) center position. Dims (batch, 2)
output_sz: Size of output distance map. 2-dimensional tuple."""
torch.cuda.empty_cache()
center = center.view(-1,2)

2
tracker/ltr/models/target_classifier/features.py

@ -4,9 +4,11 @@ from ltr.models.layers.normalization import InstanceL2Norm
def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,
interp_cat=False, final_relu=False, final_pool=False, input_dim=None, final_stride=1):
"""Construct a network block based on the Bottleneck block used in ResNet."""
if out_dim is None:
out_dim = feature_dim
if input_dim is None:

6
tracker/pytracking/features/augmentation.py

@ -6,16 +6,18 @@ import cv2 as cv
import random
from pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy
class Transform:
"""Base data augmentation transform class."""
def __init__(self, output_sz = None, shift = None):
self.output_sz = output_sz
self.shift = (0,0) if shift is None else shift
torch.cuda.empty_cache()
def crop_to_output(self, image):
torch.cuda.empty_cache()
if isinstance(image, torch.Tensor):
imsz = image.shape[2:]
if self.output_sz is None:
@ -67,6 +69,7 @@ class Rotate(Transform):
super().__init__(output_sz, shift)
self.angle = math.pi * angle/180
def __call__(self, image, is_mask=False):
if isinstance(image, torch.Tensor):
return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))
@ -90,6 +93,7 @@ class Blur(Transform):
self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()
self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()
def __call__(self, image, is_mask=False):
if isinstance(image, torch.Tensor):
sz = image.shape[2:]

7
tracker/pytracking/features/preprocessing.py

@ -4,10 +4,12 @@ import numpy as np
def numpy_to_torch(a: np.ndarray):
torch.cuda.empty_cache()
return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)
def torch_to_numpy(a: torch.Tensor):
torch.cuda.empty_cache()
return a.squeeze(0).permute(1,2,0).numpy()
@ -20,7 +22,7 @@ def sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False
image_sz: Size to resize the image samples to before extraction.
transforms: A set of image transforms to apply.
"""
torch.cuda.empty_cache()
# Get image patche
im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask)
@ -39,6 +41,7 @@ def sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', ma
mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'
max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode
"""
torch.cuda.empty_cache()
if isinstance(scales, (int, float)):
scales = [scales]
@ -62,7 +65,7 @@ def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, o
mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'
max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode
"""
torch.cuda.empty_cache()
# if mode not in ['replicate', 'inside']:
# raise ValueError('Unknown border mode \'{}\'.'.format(mode))

1
tracker/pytracking/libs/dcf.py

@ -3,6 +3,7 @@ import torch
def max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""Computes maximum and argmax in the last two dimensions."""
torch.cuda.empty_cache()
max_val_row, argmax_row = torch.max(a, dim=-2)
max_val, argmax_col = torch.max(max_val_row, dim=-1)

Loading…
Cancel
Save