Browse Source

Feat: Achieved perfect Conv1, good BN1/ReLU1/MaxPool similarity. README updated.

resnet
mht 2 months ago
parent
commit
e2f46802bf
  1. 100
      CMakeLists.txt
  2. 17
      README.md
  3. BIN
      bin/tracking_demo
  4. 154
      build.sh
  5. 212
      build/CMakeCache.txt
  6. 22
      build/CMakeFiles/3.22.1/CMakeCUDACompiler.cmake
  7. BIN
      build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_CUDA.bin
  8. 2
      build/CMakeFiles/3.22.1/CMakeSystem.cmake
  9. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/a.out
  10. 4246
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cpp1.ii
  11. 4120
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cpp4.ii
  12. 6
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.c
  13. 7546
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.cpp
  14. 4
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.gpu
  15. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.fatbin
  16. 38
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.fatbin.c
  17. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.o
  18. 6
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.ptx
  19. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.sm_52.cubin
  20. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.fatbin
  21. 57
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.fatbin.c
  22. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.o
  23. BIN
      build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.sm_52.cubin
  24. 1156
      build/CMakeFiles/CMakeOutput.log
  25. 44
      build/CMakeFiles/Makefile.cmake
  26. 62
      build/CMakeFiles/Makefile2
  27. 3
      build/CMakeFiles/TargetDirectories.txt
  28. 2
      build/CMakeFiles/bb_regressor.dir/DependInfo.cmake
  29. 26
      build/CMakeFiles/bb_regressor.dir/build.make
  30. BIN
      build/CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o
  31. 5157
      build/CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o.d
  32. 6
      build/CMakeFiles/bb_regressor.dir/cmake_clean.cmake
  33. 5367
      build/CMakeFiles/bb_regressor.dir/compiler_depend.internal
  34. 15247
      build/CMakeFiles/bb_regressor.dir/compiler_depend.make
  35. 11
      build/CMakeFiles/bb_regressor.dir/flags.make
  36. 2
      build/CMakeFiles/bb_regressor.dir/link.txt
  37. BIN
      build/CMakeFiles/classifier.dir/cimp/classifier/classifier.cpp.o
  38. 5088
      build/CMakeFiles/classifier.dir/cimp/classifier/classifier.cpp.o.d
  39. 5123
      build/CMakeFiles/classifier.dir/compiler_depend.internal
  40. 14799
      build/CMakeFiles/classifier.dir/compiler_depend.make
  41. 4
      build/CMakeFiles/classifier.dir/flags.make
  42. 1
      build/CMakeFiles/tracking_demo.dir/DependInfo.cmake
  43. 32
      build/CMakeFiles/tracking_demo.dir/build.make
  44. BIN
      build/CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o
  45. 4910
      build/CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o.d
  46. 2148
      build/CMakeFiles/tracking_demo.dir/compiler_depend.make
  47. 4
      build/CMakeFiles/tracking_demo.dir/flags.make
  48. 2
      build/CMakeFiles/tracking_demo.dir/link.txt
  49. 111
      build/Makefile
  50. 40
      build/cmake_install.cmake
  51. 3
      build/install_manifest.txt
  52. BIN
      build/libbb_regressor.a
  53. BIN
      build/libclassifier.a
  54. BIN
      build/tracking_demo
  55. 84
      cimp/bb_regressor/bb_regressor.cpp
  56. 2
      cimp/bb_regressor/bb_regressor.h
  57. 34
      cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu
  58. 287
      cimp/resnet/resnet.cpp
  59. 82
      cimp/resnet/resnet.h
  60. 156
      cmake-build-debug/CMakeCache.txt
  61. 13
      cmake-build-debug/CMakeFiles/clion-Debug-log.txt
  62. BIN
      cmake-build-debug/CMakeFiles/clion-environment.txt
  63. 178
      export_resnet_individual_tensors.py
  64. 205
      export_resnet_raw.py
  65. BIN
      exported_weights/backbone_regenerated/bn1_bias.pt
  66. BIN
      exported_weights/backbone_regenerated/bn1_num_batches_tracked.pt
  67. BIN
      exported_weights/backbone_regenerated/bn1_running_mean.pt
  68. BIN
      exported_weights/backbone_regenerated/bn1_running_var.pt
  69. BIN
      exported_weights/backbone_regenerated/bn1_weight.pt
  70. BIN
      exported_weights/backbone_regenerated/conv1_weight.pt
  71. BIN
      exported_weights/backbone_regenerated/fc_bias.pt
  72. BIN
      exported_weights/backbone_regenerated/fc_weight.pt
  73. BIN
      exported_weights/backbone_regenerated/layer1_0_bn1_bias.pt
  74. BIN
      exported_weights/backbone_regenerated/layer1_0_bn1_num_batches_tracked.pt
  75. BIN
      exported_weights/backbone_regenerated/layer1_0_bn1_running_mean.pt
  76. BIN
      exported_weights/backbone_regenerated/layer1_0_bn1_running_var.pt
  77. BIN
      exported_weights/backbone_regenerated/layer1_0_bn1_weight.pt
  78. BIN
      exported_weights/backbone_regenerated/layer1_0_bn2_bias.pt
  79. BIN
      exported_weights/backbone_regenerated/layer1_0_bn2_num_batches_tracked.pt
  80. BIN
      exported_weights/backbone_regenerated/layer1_0_bn2_running_mean.pt
  81. BIN
      exported_weights/backbone_regenerated/layer1_0_bn2_running_var.pt
  82. BIN
      exported_weights/backbone_regenerated/layer1_0_bn2_weight.pt
  83. BIN
      exported_weights/backbone_regenerated/layer1_0_bn3_bias.pt
  84. BIN
      exported_weights/backbone_regenerated/layer1_0_bn3_num_batches_tracked.pt
  85. BIN
      exported_weights/backbone_regenerated/layer1_0_bn3_running_mean.pt
  86. BIN
      exported_weights/backbone_regenerated/layer1_0_bn3_running_var.pt
  87. BIN
      exported_weights/backbone_regenerated/layer1_0_bn3_weight.pt
  88. BIN
      exported_weights/backbone_regenerated/layer1_0_conv1_weight.pt
  89. BIN
      exported_weights/backbone_regenerated/layer1_0_conv2_weight.pt
  90. BIN
      exported_weights/backbone_regenerated/layer1_0_conv3_weight.pt
  91. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_0_weight.pt
  92. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_1_bias.pt
  93. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_1_num_batches_tracked.pt
  94. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_1_running_mean.pt
  95. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_1_running_var.pt
  96. BIN
      exported_weights/backbone_regenerated/layer1_0_downsample_1_weight.pt
  97. BIN
      exported_weights/backbone_regenerated/layer1_1_bn1_bias.pt
  98. BIN
      exported_weights/backbone_regenerated/layer1_1_bn1_num_batches_tracked.pt
  99. BIN
      exported_weights/backbone_regenerated/layer1_1_bn1_running_mean.pt
  100. BIN
      exported_weights/backbone_regenerated/layer1_1_bn1_running_var.pt

100
CMakeLists.txt

@ -1,16 +1,23 @@
cmake_minimum_required(VERSION 3.18)
project(cpp_tracker LANGUAGES CXX)
project(ImageSimilarityTracker LANGUAGES CXX CUDA)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_STANDARD_REQUIRED True)
# Look for existing LibTorch installation (for systems with PyTorch already installed)
list(APPEND CMAKE_PREFIX_PATH "/usr/local/libtorch" "$ENV{HOME}/libtorch")
# --- vcpkg integration ---
# If vcpkg.json is present in the root, and CMAKE_TOOLCHAIN_FILE is set (by build.sh or environment)
# to point to vcpkg.cmake, dependencies listed in vcpkg.json (like opencv4) will be automatically
# found and linked by CMake. We don't need explicit find_package(OpenCV) here anymore if vcpkg handles it.
# Find dependencies
# Find LibTorch (should be set by build.sh via Torch_DIR or CMAKE_PREFIX_PATH)
find_package(Torch REQUIRED)
# OpenCV should be provided by vcpkg if vcpkg.json lists it and toolchain is used.
# Remove explicit find_package for OpenCV as vcpkg will provide it.
# find_package(OpenCV REQUIRED)
message(STATUS "Found LibTorch: ${TORCH_LIBRARIES}")
# message(STATUS "Found OpenCV: ${OpenCV_LIBS}") # OpenCV_LIBS will be available if vcpkg succeeds
# Always use CUDA implementation (no CPU fallback)
message(STATUS "Building with CUDA support")
@ -18,16 +25,21 @@ message(STATUS "Building with CUDA support")
# Define source files for the libraries
set(BB_REGRESSOR_SOURCES
cimp/bb_regressor/bb_regressor.cpp
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu
)
set(CLASSIFIER_SOURCES
cimp/classifier/classifier.cpp
)
set(RESNET_SOURCES
cimp/resnet/resnet.cpp
)
# Create static libraries
add_library(bb_regressor STATIC ${BB_REGRESSOR_SOURCES})
add_library(classifier STATIC ${CLASSIFIER_SOURCES})
add_library(resnet STATIC ${RESNET_SOURCES})
# Set include directories
target_include_directories(bb_regressor PUBLIC
@ -36,34 +48,92 @@ target_include_directories(bb_regressor PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src
)
target_include_directories(classifier PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cimp)
target_include_directories(resnet PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cimp)
# Link with LibTorch
target_link_libraries(bb_regressor PRIVATE ${TORCH_LIBRARIES})
target_link_libraries(classifier PRIVATE ${TORCH_LIBRARIES})
target_link_libraries(resnet PRIVATE ${TORCH_LIBRARIES})
# Create the demo executable
add_executable(tracking_demo cimp/demo.cpp)
# Link the demo with the libraries
target_link_libraries(tracking_demo PRIVATE bb_regressor classifier ${TORCH_LIBRARIES})
target_link_libraries(tracking_demo PRIVATE bb_regressor classifier resnet ${TORCH_LIBRARIES})
# Create the test models executable
add_executable(test_models test/test_models.cpp)
# Link the test with the libraries
target_link_libraries(test_models PRIVATE bb_regressor classifier ${TORCH_LIBRARIES})
# Link the test_models with the libraries
target_link_libraries(test_models PRIVATE bb_regressor classifier resnet ${TORCH_LIBRARIES})
# Create the test sample generator executable (without dependencies on our libraries)
add_executable(generate_test_samples test/generate_test_samples.cpp)
# Link the test sample generator only with LibTorch
target_link_libraries(generate_test_samples PRIVATE ${TORCH_LIBRARIES})
# add_executable(generate_test_samples ${CMAKE_CURRENT_SOURCE_DIR}/test/generate_test_samples.cpp) # COMMENTED OUT
# target_link_libraries(generate_test_samples PRIVATE ${TORCH_LIBRARIES}) # COMMENTED OUT
# Copy the executable to the binary directory
install(TARGETS tracking_demo DESTINATION bin)
install(TARGETS test_models DESTINATION bin)
install(TARGETS generate_test_samples DESTINATION bin)
# install(TARGETS generate_test_samples DESTINATION bin) # COMMENTED OUT
# Print some info during the build
message(STATUS "LibTorch found at: ${TORCH_INCLUDE_DIRS}")
message(STATUS "Using CUDA-enabled build")
message(STATUS "Using CUDA-enabled build")
# --- Installation ---
# Define where to install the executables and libraries
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install) # Install locally within the build directory
install(TARGETS test_models # generate_test_samples # COMMENTED OUT
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)
install(TARGETS resnet classifier bb_regressor
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)
# Install PyTorch and OpenCV shared libraries that our targets link against.
if(TORCH_LIBRARIES)
if(WIN32)
set(LIBTORCH_DLL_DIR "${Torch_DIR}/../lib")
else()
set(LIBTORCH_DLL_DIR "${Torch_DIR}/../lib") # Usually $HOME/libtorch_version/libtorch/lib
endif()
message(STATUS "Attempting to install Torch shared libs from: ${LIBTORCH_DLL_DIR}")
install(DIRECTORY ${LIBTORCH_DLL_DIR}/
DESTINATION lib
USE_SOURCE_PERMISSIONS
OPTIONAL
FILES_MATCHING PATTERN "*.so*" PATTERN "*.dylib*" PATTERN "*.dll"
PATTERN "c10*.dll" PATTERN "torch_cpu*.dll" PATTERN "torch_cuda*.dll" PATTERN "torch.dll"
PATTERN "cudnn*.dll" # Add cuDNN if it's part of LibTorch distribution
)
else()
message(WARNING "TORCH_LIBRARIES not set, cannot install PyTorch shared libraries.")
endif()
# If using vcpkg, it usually handles its own dependencies' deployment or they are statically linked.
# If OpenCV shared libraries need to be deployed and are not handled by vcpkg install component or not part of LibTorch distribution:
# You might need to explicitly find OpenCV shared library paths and install them if vcpkg doesn't put them in a common place with Torch.
# Example (very dependent on how OpenCV is found/provided):
# if(OpenCV_FOUND AND OpenCV_SHARED_LIBS_DIR) # Assume OpenCV_SHARED_LIBS_DIR is set if needed
# install(DIRECTORY ${OpenCV_SHARED_LIBS_DIR}/
# DESTINATION lib
# FILES_MATCHING PATTERN "*.so*" PATTERN "*.dylib*" PATTERN "*.dll"
# USE_SOURCE_PERMISSIONS
# OPTIONAL)
# endif()
# Enable CTest
enable_testing()
# Define a simple test that runs the test_models executable
# The actual test logic (passing arguments, checking output) is in run_tests.sh
add_test(
NAME RunCppModelTests
COMMAND test_models dummy_arg1 dummy_arg2 dummy_arg3 # Dummy args, real ones from run_tests.sh
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
)

17
README.md

@ -13,8 +13,8 @@ The project consists of two main components:
- CMake (3.18 or higher)
- C++17 compatible compiler
- LibTorch (PyTorch C++ API)
- **CUDA (required)** - This implementation requires CUDA and does not support CPU-only execution
- LibTorch (PyTorch C++ API) - Currently targeting **1.8.0+cu111**.
- **CUDA (required)** - This implementation requires CUDA. The `build.sh` script targets CUDA **11.1** for compatibility with LibTorch 1.8.0+cu111. If CUDA 11.1 is not found at `/usr/local/cuda-11.1`, the script will warn and use a fallback (e.g., `/usr/local/cuda-11.8` or an existing `CUDA_HOME`), which might lead to compilation or runtime issues if there's a significant mismatch.
## Building the Project
@ -95,4 +95,15 @@ python demo.py
## License
This project is licensed under the MIT License - see the LICENSE file for details.
This project is licensed under the MIT License - see the LICENSE file for details.
## C++ Implementation Details
The C++ version in the `cimp/` directory aims to replicate the core components of the Python tracker.
Key C++ modules:
* **BBRegressor**: Bounding Box Regressor (based on AtomIoUNet).
* **Classifier**: Target Classifier (based on DiMP DIMPNet classifier part).
* **ResNet**: ResNet-50 backbone for feature extraction.
* **Demo Application**: A simple executable in `bin/tracking_demo` to test components.
* **Test Models**: An executable in `bin/test_models` for running comparisons and tests.

BIN
bin/tracking_demo

154
build.sh

@ -1,82 +1,120 @@
#!/bin/bash
export PATH=/usr/local/cuda-11.5/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-11.5/lib64:$LD_LIBRARY_PATH
# Target LibTorch 1.8.0+cu111 to match Python environment
LIBTORCH_VERSION="1.8.0"
LIBTORCH_CUDA_VERSION_SHORT="cu111" # e.g., cu111, cu117, cu118
TARGET_CUDA_VERSION_FOR_LIBTORCH="11.1" # e.g., 11.1, 11.7, 11.8
# --- CUDA Setup ---
# Try to find the exact CUDA version LibTorch was built against
PROPOSED_CUDA_HOME="/usr/local/cuda-${TARGET_CUDA_VERSION_FOR_LIBTORCH}"
if [ -d "${PROPOSED_CUDA_HOME}" ] && [ -x "${PROPOSED_CUDA_HOME}/bin/nvcc" ]; then
echo "Found targeted CUDA version for LibTorch: ${TARGET_CUDA_VERSION_FOR_LIBTORCH} at ${PROPOSED_CUDA_HOME}"
export CUDA_HOME="${PROPOSED_CUDA_HOME}"
else
echo "Warning: Targeted CUDA version ${TARGET_CUDA_VERSION_FOR_LIBTORCH} for LibTorch not found at ${PROPOSED_CUDA_HOME}."
# Fallback to user's specified CUDA_HOME or a default, and warn about potential mismatch
if [ -z "$CUDA_HOME" ]; then # If CUDA_HOME is not already set in the environment
export CUDA_HOME=/usr/local/cuda-11.8 # Default fallback
echo "Warning: Using fallback CUDA_HOME: $CUDA_HOME. This might mismatch LibTorch's CUDA version ${TARGET_CUDA_VERSION_FOR_LIBTORCH}."
else
echo "Warning: Using externally set CUDA_HOME: $CUDA_HOME. Ensure this is compatible with LibTorch for CUDA ${TARGET_CUDA_VERSION_FOR_LIBTORCH}."
fi
# We proceed, but there's a higher chance of issues if nvcc version doesn't align with LibTorch's CUDA build.
fi
export PATH=${CUDA_HOME}/bin:${PATH}
export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
export CMAKE_CUDA_COMPILER=${CUDA_HOME}/bin/nvcc
# Exit on error
set -e
# Print info
# --- vcpkg setup ---
# Remove explicit VCPKG_INSTALLATION_ROOT.
# The CMAKE_TOOLCHAIN_FILE will be set in the cmake command if vcpkg is part of the project.
# It's often good practice to have vcpkg as a submodule or managed by CMake FetchContent.
echo "Building C++ Tracker"
echo "Using CUDA_HOME: $CUDA_HOME"
echo "Using PATH: $PATH"
echo "Using LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
# Set CUDA environment if needed
CUDA_AVAILABLE=0
if [ -z "$CUDA_HOME" ]; then
if [ -d "/usr/local/cuda" ]; then
export CUDA_HOME=/usr/local/cuda
CUDA_AVAILABLE=1
elif [ -d "/usr/lib/cuda" ]; then
export CUDA_HOME=/usr/lib/cuda
CUDA_AVAILABLE=1
fi
# Verify CUDA version from specified CUDA_HOME
NVCC_PATH="${CUDA_HOME}/bin/nvcc"
if ! [ -x "${NVCC_PATH}" ]; then
echo "Error: NVCC not found at specified path: ${NVCC_PATH}" >&2
echo "Please ensure CUDA_HOME is set correctly and NVCC is executable." >&2
exit 1
fi
DETECTED_CUDA_RUNTIME_VERSION=$(${NVCC_PATH} --version | grep "release" | awk '{print $6}' | cut -c2- | cut -d'.' -f1-2)
echo "Detected CUDA Runtime version (from ${NVCC_PATH}): $DETECTED_CUDA_RUNTIME_VERSION"
# Add CUDA to path if available
if [ $CUDA_AVAILABLE -eq 1 ]; then
echo "CUDA found at $CUDA_HOME"
export PATH=$CUDA_HOME/bin:$PATH
export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
# Target LibTorch 1.8.0+cu111 to match Python environment
LIBTORCH_VARIANT="cxx11-abi-shared-with-deps" # For shared libraries with dependencies
LIBTORCH_DIR_NAME="libtorch_${LIBTORCH_VERSION}_${LIBTORCH_CUDA_VERSION_SHORT}"
LIBTORCH_INSTALL_PATH="$HOME/${LIBTORCH_DIR_NAME}/libtorch"
LIBTORCH_ZIP_URL="https://download.pytorch.org/libtorch/${LIBTORCH_CUDA_VERSION_SHORT}/libtorch-${LIBTORCH_VARIANT}-${LIBTORCH_VERSION}%2B${LIBTORCH_CUDA_VERSION_SHORT}.zip"
EXPECTED_LIBTORCH_CMAKE_CONFIG="${LIBTORCH_INSTALL_PATH}/share/cmake/Torch/TorchConfig.cmake"
# Determine CUDA version
CUDA_VERSION=$(nvcc --version | grep "release" | awk '{print $6}' | cut -c2- | cut -d'.' -f1-2)
echo "Detected CUDA version: $CUDA_VERSION"
else
echo "CUDA not found. The project requires CUDA to build."
echo "Please install CUDA and try again."
exit 1
fi
echo "Targeting LibTorch ${LIBTORCH_VERSION}+${LIBTORCH_CUDA_VERSION_SHORT}"
# Download and extract LibTorch with appropriate CUDA support if not already present
LIBTORCH_DIR="$HOME/libtorch"
if [ ! -d "$LIBTORCH_DIR" ]; then
echo "Downloading LibTorch..."
# Use a compatible version based on detected CUDA
echo "Downloading CUDA-enabled LibTorch"
if [[ "$CUDA_VERSION" == "11.5" || "$CUDA_VERSION" == "11.6" || "$CUDA_VERSION" == "11.7" ]]; then
LIBTORCH_URL="https://download.pytorch.org/libtorch/cu116/libtorch-cxx11-abi-shared-with-deps-1.13.0%2Bcu116.zip"
elif [[ "$CUDA_VERSION" == "11.3" || "$CUDA_VERSION" == "11.4" ]]; then
LIBTORCH_URL="https://download.pytorch.org/libtorch/cu113/libtorch-cxx11-abi-shared-with-deps-1.12.1%2Bcu113.zip"
if [ ! -f "$EXPECTED_LIBTORCH_CMAKE_CONFIG" ]; then
echo "LibTorch ${LIBTORCH_VERSION}+${LIBTORCH_CUDA_VERSION_SHORT} not found at ${LIBTORCH_INSTALL_PATH}."
TMP_DIR=$(mktemp -d)
echo "Downloading LibTorch ${LIBTORCH_VERSION}+${LIBTORCH_CUDA_VERSION_SHORT}..."
echo "Using LibTorch URL: ${LIBTORCH_ZIP_URL}"
wget -q -O "${TMP_DIR}/libtorch.zip" "${LIBTORCH_ZIP_URL}"
echo "Extracting LibTorch to $HOME/${LIBTORCH_DIR_NAME}..."
mkdir -p "$HOME/${LIBTORCH_DIR_NAME}"
unzip -q "${TMP_DIR}/libtorch.zip" -d "$HOME/${LIBTORCH_DIR_NAME}"
rm -rf "${TMP_DIR}"
if [ -f "$EXPECTED_LIBTORCH_CMAKE_CONFIG" ]; then
echo "LibTorch ${LIBTORCH_VERSION}+${LIBTORCH_CUDA_VERSION_SHORT} extracted to ${LIBTORCH_INSTALL_PATH}"
else
LIBTORCH_URL="https://download.pytorch.org/libtorch/cu118/libtorch-cxx11-abi-shared-with-deps-2.0.0%2Bcu118.zip"
echo "Error: LibTorch extraction failed or TorchConfig.cmake not found at expected location: $EXPECTED_LIBTORCH_CMAKE_CONFIG"
exit 1
fi
wget $LIBTORCH_URL -O libtorch.zip
echo "Extracting LibTorch..."
mkdir -p $HOME
unzip -q libtorch.zip -d $HOME
rm libtorch.zip
echo "LibTorch extracted to $LIBTORCH_DIR"
else
echo "Using existing LibTorch installation at $LIBTORCH_DIR"
echo "Found existing LibTorch ${LIBTORCH_VERSION}+${LIBTORCH_CUDA_VERSION_SHORT} at ${LIBTORCH_INSTALL_PATH}"
fi
# Create build directory
mkdir -p build
cd build
# Set Torch_DIR for CMake
export Torch_DIR="${LIBTORCH_INSTALL_PATH}/share/cmake/Torch"
# Build directory
BUILD_DIR="build"
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
# Create local bin directory
mkdir -p ../bin
# Relative path to vcpkg.cmake from the build directory if vcpkg is a submodule in the project root
# This assumes vcpkg is at ../vcpkg (relative to build dir) or ./vcpkg (relative to project root)
VCPKG_TOOLCHAIN_FILE_PROJECT_SUBMODULE="../vcpkg/scripts/buildsystems/vcpkg.cmake"
VCPKG_TOOLCHAIN_FILE_BUILD_SUBDIR="vcpkg/scripts/buildsystems/vcpkg.cmake" # if vcpkg is cloned into build dir by CMake
CMAKE_TOOLCHAIN_ARG=""
if [ -f "$VCPKG_TOOLCHAIN_FILE_PROJECT_SUBMODULE" ]; then
echo "Using vcpkg toolchain: $VCPKG_TOOLCHAIN_FILE_PROJECT_SUBMODULE (relative to build dir)"
CMAKE_TOOLCHAIN_ARG="-DCMAKE_TOOLCHAIN_FILE=$VCPKG_TOOLCHAIN_FILE_PROJECT_SUBMODULE"
elif [ -f "$VCPKG_TOOLCHAIN_FILE_BUILD_SUBDIR" ]; then # this case is less common for user setup
echo "Using vcpkg toolchain: $VCPKG_TOOLCHAIN_FILE_BUILD_SUBDIR (relative to build dir)"
CMAKE_TOOLCHAIN_ARG="-DCMAKE_TOOLCHAIN_FILE=$VCPKG_TOOLCHAIN_FILE_BUILD_SUBDIR"
else
echo "vcpkg.cmake not found at common project submodule paths. CMake will try to find it via vcpkg.json if vcpkg is globally installed and configured."
# If vcpkg is globally installed and CMAKE_TOOLCHAIN_FILE is set in user's environment, CMake might pick it up.
# Or, if CMake has native vcpkg integration via CMAKE_PROJECT_TOP_LEVEL_INCLUDES with vcpkg.cmake.
fi
# Configure with CMake - always use CUDA, never use CPU_ONLY
echo "Configuring with CMake..."
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=.. -DCPU_ONLY=OFF
# Pass the CMAKE_TOOLCHAIN_FILE to cmake.
# The Torch_DIR is already exported.
cmake .. -DCMAKE_BUILD_TYPE=Release ${CMAKE_TOOLCHAIN_ARG}
# Build the project
echo "Building the project..."
cmake --build . --config Release -j $(nproc)
echo "Building..."
cmake --build . --config Release -- -j$(nproc)
# Install to local directory
echo "Installing to local bin directory..."
cmake --install . --config Release
echo "Build complete."
cd ..
echo "Build complete! Executable is in bin/"

212
build/CMakeCache.txt

@ -15,7 +15,7 @@
########################
//Path to a library.
C10_CUDA_LIBRARY:FILEPATH=/home/mht/libtorch/lib/libc10_cuda.so
C10_CUDA_LIBRARY:FILEPATH=/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10_cuda.so
//Path to a program.
CMAKE_ADDR2LINE:FILEPATH=/usr/bin/addr2line
@ -34,7 +34,7 @@ CMAKE_COLOR_MAKEFILE:BOOL=ON
CMAKE_CUDA_ARCHITECTURES:STRING=52
//CUDA compiler
CMAKE_CUDA_COMPILER:FILEPATH=/usr/bin/nvcc
CMAKE_CUDA_COMPILER:FILEPATH=/usr/local/cuda-11.8/bin/nvcc
//Flags used by the CUDA compiler during all build types.
CMAKE_CUDA_FLAGS:STRING=
@ -99,7 +99,7 @@ CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING=
CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=
//Install path prefix, prepended onto install directories.
CMAKE_INSTALL_PREFIX:PATH=/media/mht/ADATA/repos/cpp_tracker
CMAKE_INSTALL_PREFIX:PATH=/usr/local
//Path to a program.
CMAKE_LINKER:FILEPATH=/usr/bin/ld
@ -143,7 +143,7 @@ CMAKE_PROJECT_DESCRIPTION:STATIC=
CMAKE_PROJECT_HOMEPAGE_URL:STATIC=
//Value Computed by CMake
CMAKE_PROJECT_NAME:STATIC=cpp_tracker
CMAKE_PROJECT_NAME:STATIC=ImageSimilarityTracker
//Path to a program.
CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib
@ -201,15 +201,15 @@ CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Path to a program.
CMAKE_STRIP:FILEPATH=/usr/bin/strip
//No help, variable specified on the command line.
CMAKE_TOOLCHAIN_FILE:UNINITIALIZED=../vcpkg/scripts/buildsystems/vcpkg.cmake
//If this value is on, makefiles will be generated without the
// .SILENT directive, and all commands will be echoed to the console
// during the make. This is useful for debugging only. With Visual
// Studio IDE projects all commands are done without /nologo.
CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE
//No help, variable specified on the command line.
CPU_ONLY:UNINITIALIZED=OFF
//Compile device code in 64 bit mode
CUDA_64_BIT_DEVICE_CODE:BOOL=ON
@ -224,10 +224,10 @@ CUDA_BUILD_CUBIN:BOOL=OFF
CUDA_BUILD_EMULATION:BOOL=OFF
//"cudart" library
CUDA_CUDART_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcudart.so
CUDA_CUDART_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcudart.so
//Path to a library.
CUDA_CUDA_LIB:FILEPATH=/usr/lib/x86_64-linux-gnu/libcuda.so
CUDA_CUDA_LIB:FILEPATH=/usr/local/cuda-11.8/lib64/stubs/libcuda.so
//"cuda" library (older versions only).
CUDA_CUDA_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcuda.so
@ -243,7 +243,7 @@ CUDA_HOST_COMPILATION_CPP:BOOL=ON
CUDA_HOST_COMPILER:FILEPATH=
//Path to a program.
CUDA_NVCC_EXECUTABLE:FILEPATH=/usr/bin/nvcc
CUDA_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda-11.8/bin/nvcc
//Semi-colon delimit multiple arguments. during all build types.
CUDA_NVCC_FLAGS:STRING=
@ -262,7 +262,7 @@ CUDA_NVCC_FLAGS_RELEASE:STRING=
CUDA_NVCC_FLAGS_RELWITHDEBINFO:STRING=
//Path to a library.
CUDA_NVRTC_LIB:FILEPATH=/usr/lib/x86_64-linux-gnu/libnvrtc.so
CUDA_NVRTC_LIB:FILEPATH=/usr/local/cuda-11.8/lib64/libnvrtc.so
//Propagate C/CXX_FLAGS and friends to the host compiler via -Xcompile
CUDA_PROPAGATE_HOST_FLAGS:BOOL=ON
@ -278,10 +278,10 @@ CUDA_SDK_ROOT_DIR:PATH=CUDA_SDK_ROOT_DIR-NOTFOUND
CUDA_SEPARABLE_COMPILATION:BOOL=OFF
//Path to a file.
CUDA_TOOLKIT_INCLUDE:PATH=/usr/include
CUDA_TOOLKIT_INCLUDE:PATH=/usr/local/cuda-11.8/include
//Toolkit location.
CUDA_TOOLKIT_ROOT_DIR:PATH=/usr
CUDA_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda-11.8
//Print out the commands run while compiling the CUDA source file.
// With the Makefile generator this defaults to VERBOSE variable
@ -290,76 +290,100 @@ CUDA_TOOLKIT_ROOT_DIR:PATH=/usr
CUDA_VERBOSE_BUILD:BOOL=OFF
//Version of CUDA as computed from nvcc.
CUDA_VERSION:STRING=11.5
//"cublasLt" library
CUDA_cublasLt_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcublasLt.so
CUDA_VERSION:STRING=11.8
//"cublas" library
CUDA_cublas_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcublas.so
CUDA_cublas_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcublas.so
//"cudadevrt" library
CUDA_cudadevrt_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcudadevrt.a
CUDA_cudadevrt_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcudadevrt.a
//static CUDA runtime library
CUDA_cudart_static_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcudart_static.a
CUDA_cudart_static_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcudart_static.a
//"cufft" library
CUDA_cufft_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcufft.so
CUDA_cufft_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcufft.so
//"cupti" library
CUDA_cupti_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcupti.so
CUDA_cupti_LIBRARY:FILEPATH=CUDA_cupti_LIBRARY-NOTFOUND
//"curand" library
CUDA_curand_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcurand.so
CUDA_curand_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcurand.so
//"cusolver" library
CUDA_cusolver_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcusolver.so
CUDA_cusolver_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcusolver.so
//"cusparse" library
CUDA_cusparse_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcusparse.so
CUDA_cusparse_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libcusparse.so
//"nppc" library
CUDA_nppc_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppc.so
CUDA_nppc_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppc.so
//"nppial" library
CUDA_nppial_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppial.so
CUDA_nppial_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppial.so
//"nppicc" library
CUDA_nppicc_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppicc.so
CUDA_nppicc_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppicc.so
//"nppicom" library
CUDA_nppicom_LIBRARY:FILEPATH=CUDA_nppicom_LIBRARY-NOTFOUND
//"nppidei" library
CUDA_nppidei_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppidei.so
CUDA_nppidei_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppidei.so
//"nppif" library
CUDA_nppif_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppif.so
CUDA_nppif_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppif.so
//"nppig" library
CUDA_nppig_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppig.so
CUDA_nppig_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppig.so
//"nppim" library
CUDA_nppim_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppim.so
CUDA_nppim_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppim.so
//"nppist" library
CUDA_nppist_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppist.so
CUDA_nppist_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppist.so
//"nppisu" library
CUDA_nppisu_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppisu.so
CUDA_nppisu_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppisu.so
//"nppitc" library
CUDA_nppitc_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnppitc.so
CUDA_nppitc_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnppitc.so
//"npps" library
CUDA_npps_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libnpps.so
CUDA_npps_LIBRARY:FILEPATH=/usr/local/cuda-11.8/lib64/libnpps.so
//Folder containing NVIDIA cuDNN header files
CUDNN_INCLUDE_DIR:PATH=
//Path to a file.
CUDNN_INCLUDE_PATH:PATH=/usr/include
//Path to the cudnn library file (e.g., libcudnn.so)
CUDNN_LIBRARY:PATH=
//Path to a library.
CUDNN_LIBRARY_PATH:FILEPATH=/usr/lib/x86_64-linux-gnu/libcudnn.so
//Folder containing NVIDIA cuDNN
CUDNN_ROOT:PATH=
//Look for static CUDNN
CUDNN_STATIC:BOOL=OFF
//The directory containing a CMake configuration file for Caffe2.
Caffe2_DIR:PATH=/home/mht/libtorch/share/cmake/Caffe2
Caffe2_DIR:PATH=/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2
//Value Computed by CMake
ImageSimilarityTracker_BINARY_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/build
//Value Computed by CMake
ImageSimilarityTracker_IS_TOP_LEVEL:STATIC=ON
//Value Computed by CMake
ImageSimilarityTracker_SOURCE_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker
//Path to a library.
LIBNVTOOLSEXT:FILEPATH=/usr/lib/x86_64-linux-gnu/libnvToolsExt.so
LIBNVTOOLSEXT:FILEPATH=/usr/local/cuda-11.8/lib64/libnvToolsExt.so
//The directory containing a CMake configuration file for MKLDNN.
MKLDNN_DIR:PATH=MKLDNN_DIR-NOTFOUND
@ -368,13 +392,78 @@ MKLDNN_DIR:PATH=MKLDNN_DIR-NOTFOUND
MKL_DIR:PATH=MKL_DIR-NOTFOUND
//Path to a library.
TORCH_LIBRARY:FILEPATH=/home/mht/libtorch/lib/libtorch.so
TORCH_LIBRARY:FILEPATH=/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch.so
//The directory containing a CMake configuration file for Torch.
Torch_DIR:PATH=/home/mht/libtorch/share/cmake/Torch
Torch_DIR:PATH=/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Torch
//Automatically copy dependencies into the output directory for
// executables.
VCPKG_APPLOCAL_DEPS:BOOL=ON
//Additional options to bootstrap vcpkg
VCPKG_BOOTSTRAP_OPTIONS:STRING=
//The directory which contains the installed libraries for each
// triplet
VCPKG_INSTALLED_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker/build/vcpkg_installed
//Additional install options to pass to vcpkg
VCPKG_INSTALL_OPTIONS:STRING=
//The path to the vcpkg manifest directory.
VCPKG_MANIFEST_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker
//Install the dependencies listed in your manifest:
//\n If this is off, you will have to manually install your dependencies.
//\n See https://github.com/microsoft/vcpkg/tree/master/docs/specifications/manifests.md
// for more info.
//\n
VCPKG_MANIFEST_INSTALL:BOOL=ON
//Use manifest mode, as opposed to classic mode.
VCPKG_MANIFEST_MODE:BOOL=ON
//Overlay ports to use for vcpkg install in manifest mode
VCPKG_OVERLAY_PORTS:STRING=
//Overlay triplets to use for vcpkg install in manifest mode
VCPKG_OVERLAY_TRIPLETS:STRING=
//Appends the vcpkg paths to CMAKE_PREFIX_PATH, CMAKE_LIBRARY_PATH
// and CMAKE_FIND_ROOT_PATH so that vcpkg libraries/packages are
// found after toolchain/system libraries/packages.
VCPKG_PREFER_SYSTEM_LIBS:BOOL=OFF
//Enable the setup of CMAKE_PROGRAM_PATH to vcpkg paths
VCPKG_SETUP_CMAKE_PROGRAM_PATH:BOOL=ON
//Vcpkg target triplet (ex. x86-windows)
VCPKG_TARGET_TRIPLET:STRING=x64-linux
//Trace calls to find_package()
VCPKG_TRACE_FIND_PACKAGE:BOOL=OFF
//Enables messages from the VCPKG toolchain for debugging purposes.
VCPKG_VERBOSE:BOOL=OFF
//(experimental) Automatically copy dependencies into the install
// target directory for executables. Requires CMake 3.14.
X_VCPKG_APPLOCAL_DEPS_INSTALL:BOOL=OFF
//(experimental) Add USES_TERMINAL to VCPKG_APPLOCAL_DEPS to force
// serialization.
X_VCPKG_APPLOCAL_DEPS_SERIALIZED:BOOL=OFF
//Path to a program.
Z_VCPKG_CL:FILEPATH=Z_VCPKG_CL-NOTFOUND
//The directory which contains the installed libraries for each
// triplet
_VCPKG_INSTALLED_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker/build/vcpkg_installed
//Path to a library.
c10_LIBRARY:FILEPATH=/home/mht/libtorch/lib/libc10.so
c10_LIBRARY:FILEPATH=/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10.so
//Value Computed by CMake
cpp_tracker_BINARY_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/build
@ -385,9 +474,6 @@ cpp_tracker_IS_TOP_LEVEL:STATIC=ON
//Value Computed by CMake
cpp_tracker_SOURCE_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker
//Path to a library.
kineto_LIBRARY:FILEPATH=/home/mht/libtorch/lib/libkineto.a
########################
# INTERNAL cache entries
@ -467,6 +553,10 @@ CMAKE_GENERATOR_INSTANCE:INTERNAL=
CMAKE_GENERATOR_PLATFORM:INTERNAL=
//Name of generator toolset.
CMAKE_GENERATOR_TOOLSET:INTERNAL=
//Test CMAKE_HAVE_LIBC_PTHREAD
CMAKE_HAVE_LIBC_PTHREAD:INTERNAL=1
//Have include pthread.h
CMAKE_HAVE_PTHREAD_H:INTERNAL=1
//Source directory with the top level CMakeLists.txt file for this
// project
CMAKE_HOME_DIRECTORY:INTERNAL=/media/mht/ADATA/repos/cpp_tracker
@ -528,6 +618,8 @@ CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STRIP
CMAKE_STRIP-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_TOOLCHAIN_FILE
CMAKE_TOOLCHAIN_FILE-ADVANCED:INTERNAL=1
//uname command
CMAKE_UNAME:INTERNAL=/usr/bin/uname
//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE
@ -575,18 +667,16 @@ CUDA_SEPARABLE_COMPILATION-ADVANCED:INTERNAL=1
CUDA_TOOLKIT_INCLUDE-ADVANCED:INTERNAL=1
//This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was
// set successfully.
CUDA_TOOLKIT_ROOT_DIR_INTERNAL:INTERNAL=/usr
CUDA_TOOLKIT_ROOT_DIR_INTERNAL:INTERNAL=/usr/local/cuda-11.8
//This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was
// set successfully.
CUDA_TOOLKIT_TARGET_DIR_INTERNAL:INTERNAL=/usr
CUDA_TOOLKIT_TARGET_DIR_INTERNAL:INTERNAL=/usr/local/cuda-11.8
//Use the static version of the CUDA runtime library if available
CUDA_USE_STATIC_CUDA_RUNTIME:INTERNAL=OFF
//ADVANCED property for variable: CUDA_VERBOSE_BUILD
CUDA_VERBOSE_BUILD-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_VERSION
CUDA_VERSION-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_cublasLt_LIBRARY
CUDA_cublasLt_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_cublas_LIBRARY
CUDA_cublas_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_cudadevrt_LIBRARY
@ -604,7 +694,7 @@ CUDA_cusolver_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_cusparse_LIBRARY
CUDA_cusparse_LIBRARY-ADVANCED:INTERNAL=1
//Location of make2cmake.cmake
CUDA_make2cmake:INTERNAL=/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake
CUDA_make2cmake:INTERNAL=/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake
//ADVANCED property for variable: CUDA_nppc_LIBRARY
CUDA_nppc_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_nppial_LIBRARY
@ -630,13 +720,29 @@ CUDA_nppitc_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDA_npps_LIBRARY
CUDA_npps_LIBRARY-ADVANCED:INTERNAL=1
//Location of parse_cubin.cmake
CUDA_parse_cubin:INTERNAL=/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake
CUDA_parse_cubin:INTERNAL=/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake
//Location of run_nvcc.cmake
CUDA_run_nvcc:INTERNAL=/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake
CUDA_run_nvcc:INTERNAL=/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake
//ADVANCED property for variable: CUDNN_INCLUDE_DIR
CUDNN_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDNN_LIBRARY
CUDNN_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CUDNN_ROOT
CUDNN_ROOT-ADVANCED:INTERNAL=1
//Details about finding CUDA
FIND_PACKAGE_MESSAGE_DETAILS_CUDA:INTERNAL=[/usr][/usr/bin/nvcc][/usr/include][/usr/lib/x86_64-linux-gnu/libcudart.so][v11.5()]
FIND_PACKAGE_MESSAGE_DETAILS_CUDA:INTERNAL=[/usr/local/cuda-11.8][/usr/local/cuda-11.8/bin/nvcc][/usr/local/cuda-11.8/include][/usr/local/cuda-11.8/lib64/libcudart.so][v11.8()]
//Details about finding CUDNN
FIND_PACKAGE_MESSAGE_DETAILS_CUDNN:INTERNAL=[/usr/lib/x86_64-linux-gnu/libcudnn.so][/usr/include][v()]
//Details about finding Threads
FIND_PACKAGE_MESSAGE_DETAILS_Threads:INTERNAL=[TRUE][v()]
//Details about finding Torch
FIND_PACKAGE_MESSAGE_DETAILS_Torch:INTERNAL=[/home/mht/libtorch/lib/libtorch.so][/home/mht/libtorch/include;/home/mht/libtorch/include/torch/csrc/api/include][v()]
FIND_PACKAGE_MESSAGE_DETAILS_Torch:INTERNAL=[/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch.so][/home/mht/libtorch_1.8.0_cu111/libtorch/include;/home/mht/libtorch_1.8.0_cu111/libtorch/include/torch/csrc/api/include][v()]
//ADVANCED property for variable: VCPKG_VERBOSE
VCPKG_VERBOSE-ADVANCED:INTERNAL=1
//Making sure VCPKG_MANIFEST_MODE doesn't change
Z_VCPKG_CHECK_MANIFEST_MODE:INTERNAL=ON
//Vcpkg root directory
Z_VCPKG_ROOT_DIR:INTERNAL=/media/mht/ADATA/repos/cpp_tracker/vcpkg
//Result of TRY_COMPILE
compile_result:INTERNAL=TRUE
//Result of TRY_RUN

22
build/CMakeFiles/3.22.1/CMakeCUDACompiler.cmake

@ -1,10 +1,10 @@
set(CMAKE_CUDA_COMPILER "/usr/bin/nvcc")
set(CMAKE_CUDA_COMPILER "/usr/local/cuda-11.8/bin/nvcc")
set(CMAKE_CUDA_HOST_COMPILER "")
set(CMAKE_CUDA_HOST_LINK_LAUNCHER "/usr/lib/nvidia-cuda-toolkit/bin/g++")
set(CMAKE_CUDA_HOST_LINK_LAUNCHER "/usr/bin/g++")
set(CMAKE_CUDA_COMPILER_ID "NVIDIA")
set(CMAKE_CUDA_COMPILER_VERSION "11.5.119")
set(CMAKE_CUDA_DEVICE_LINKER "/usr/bin/nvlink")
set(CMAKE_CUDA_FATBINARY "/usr/bin/fatbinary")
set(CMAKE_CUDA_COMPILER_VERSION "11.8.89")
set(CMAKE_CUDA_DEVICE_LINKER "/usr/local/cuda-11.8/bin/nvlink")
set(CMAKE_CUDA_FATBINARY "/usr/local/cuda-11.8/bin/fatbinary")
set(CMAKE_CUDA_STANDARD_COMPUTED_DEFAULT "17")
set(CMAKE_CUDA_EXTENSIONS_COMPUTED_DEFAULT "ON")
set(CMAKE_CUDA_COMPILE_FEATURES "cuda_std_03;cuda_std_11;cuda_std_14;cuda_std_17")
@ -48,19 +48,19 @@ if(CMAKE_CUDA_LIBRARY_ARCHITECTURE)
set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu")
endif()
set(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT "/usr")
set(CMAKE_CUDA_COMPILER_TOOLKIT_LIBRARY_ROOT "/usr/lib/nvidia-cuda-toolkit")
set(CMAKE_CUDA_COMPILER_LIBRARY_ROOT "/usr/lib/cuda")
set(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT "/usr/local/cuda-11.8")
set(CMAKE_CUDA_COMPILER_TOOLKIT_LIBRARY_ROOT "/usr/local/cuda-11.8")
set(CMAKE_CUDA_COMPILER_LIBRARY_ROOT "/usr/local/cuda-11.8")
set(CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES "")
set(CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES "/usr/local/cuda-11.8/targets/x86_64-linux/include")
set(CMAKE_CUDA_HOST_IMPLICIT_LINK_LIBRARIES "")
set(CMAKE_CUDA_HOST_IMPLICIT_LINK_DIRECTORIES "/usr/lib/x86_64-linux-gnu/stubs;/usr/lib/x86_64-linux-gnu")
set(CMAKE_CUDA_HOST_IMPLICIT_LINK_DIRECTORIES "/usr/local/cuda-11.8/targets/x86_64-linux/lib/stubs;/usr/local/cuda-11.8/targets/x86_64-linux/lib")
set(CMAKE_CUDA_HOST_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "")
set(CMAKE_CUDA_IMPLICIT_INCLUDE_DIRECTORIES "/usr/include/c++/11;/usr/include/x86_64-linux-gnu/c++/11;/usr/include/c++/11/backward;/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include")
set(CMAKE_CUDA_IMPLICIT_LINK_LIBRARIES "stdc++;m;gcc_s;gcc;c;gcc_s;gcc")
set(CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES "/usr/lib/x86_64-linux-gnu/stubs;/usr/lib/x86_64-linux-gnu;/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib;/lib/x86_64-linux-gnu;/lib")
set(CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES "/usr/local/cuda-11.8/targets/x86_64-linux/lib/stubs;/usr/local/cuda-11.8/targets/x86_64-linux/lib;/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib")
set(CMAKE_CUDA_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "")
set(CMAKE_CUDA_RUNTIME_LIBRARY_DEFAULT "STATIC")

BIN
build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_CUDA.bin

2
build/CMakeFiles/3.22.1/CMakeSystem.cmake

@ -3,7 +3,7 @@ set(CMAKE_HOST_SYSTEM_NAME "Linux")
set(CMAKE_HOST_SYSTEM_VERSION "6.8.0-59-generic")
set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64")
include("/media/mht/ADATA/repos/cpp_tracker/vcpkg/scripts/buildsystems/vcpkg.cmake")
set(CMAKE_SYSTEM "Linux-6.8.0-59-generic")
set(CMAKE_SYSTEM_NAME "Linux")

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/a.out

4246
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cpp1.ii
File diff suppressed because it is too large
View File

4120
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cpp4.ii
File diff suppressed because it is too large
View File

6
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.c

@ -796,12 +796,14 @@ extern __attribute__((__weak__)) /* COMDAT group: _ZNSt14numeric_limitsIeE15tin
extern __attribute__((__weak__)) /* COMDAT group: _ZNSt14numeric_limitsIeE11round_styleE */ const enum _ZSt17float_round_style _ZNSt14numeric_limitsIeE11round_styleE __attribute__((visibility("default")));
# 83 "/usr/include/c++/11/bits/stl_pair.h" 3
extern __attribute__((__weak__)) /* COMDAT group: _ZSt19piecewise_construct */ const struct _ZSt21piecewise_construct_t _ZSt19piecewise_construct __attribute__((visibility("default")));
# 356 "/usr/include/c++/11/utility" 3
extern __attribute__((__weak__)) /* COMDAT group: _ZSt8in_place */ const struct _ZSt10in_place_t _ZSt8in_place __attribute__((visibility("default")));
# 64 "CMakeCUDACompilerId.cu"
const char *info_compiler = ((const char *)"INFO:compiler[NVIDIA]");
const char *info_simulate = ((const char *)"INFO:simulate[GNU]");
# 336 "CMakeCUDACompilerId.cu"
static const char info_version[50] = {((char)73),((char)78),((char)70),((char)79),((char)58),((char)99),((char)111),((char)109),((char)112),((char)105),((char)108),((char)101),((char)114),((char)95),((char)118),((char)101),((char)114),((char)115),((char)105),((char)111),((char)110),((char)91),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)49),((char)49),((char)46),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)53),((char)46),((char)48),((char)48),((char)48),((char)48),((char)48),((char)49),((char)49),((char)57),((char)93),((char)0)};
static const char info_version[50] = {((char)73),((char)78),((char)70),((char)79),((char)58),((char)99),((char)111),((char)109),((char)112),((char)105),((char)108),((char)101),((char)114),((char)95),((char)118),((char)101),((char)114),((char)115),((char)105),((char)111),((char)110),((char)91),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)49),((char)49),((char)46),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)56),((char)46),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)56),((char)57),((char)93),((char)0)};
# 365 "CMakeCUDACompilerId.cu"
static const char info_simulate_version[41] = {((char)73),((char)78),((char)70),((char)79),((char)58),((char)115),((char)105),((char)109),((char)117),((char)108),((char)97),((char)116),((char)101),((char)95),((char)118),((char)101),((char)114),((char)115),((char)105),((char)111),((char)110),((char)91),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)49),((char)49),((char)46),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)48),((char)52),((char)93),((char)0)};
# 385 "CMakeCUDACompilerId.cu"
@ -1593,3 +1595,5 @@ const char *info_language_extensions_default = ((const char *)"INFO:extensions_d
__attribute__((__weak__)) /* COMDAT group: _ZNSt14numeric_limitsIeE11round_styleE */ const enum _ZSt17float_round_style _ZNSt14numeric_limitsIeE11round_styleE __attribute__((visibility("default"))) = _ZSt16round_to_nearest;
# 83 "/usr/include/c++/11/bits/stl_pair.h" 3
__attribute__((__weak__)) /* COMDAT group: _ZSt19piecewise_construct */ const struct _ZSt21piecewise_construct_t _ZSt19piecewise_construct __attribute__((visibility("default"))) = {};
# 356 "/usr/include/c++/11/utility" 3
__attribute__((__weak__)) /* COMDAT group: _ZSt8in_place */ const struct _ZSt10in_place_t _ZSt8in_place __attribute__((visibility("default"))) = {};

7546
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.cpp
File diff suppressed because it is too large
View File

4
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.cudafe1.gpu

@ -65,6 +65,8 @@ struct _ZSt14numeric_limitsIfE;
struct _ZSt14numeric_limitsIdE;
# 1818 "/usr/include/c++/11/limits" 3
struct _ZSt14numeric_limitsIeE;
# 352 "/usr/include/c++/11/utility" 3
struct _ZSt10in_place_t;
# 209 "/usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h" 3
typedef unsigned long size_t;
#include "crt/device_runtime.h"
@ -116,6 +118,8 @@ struct _ZSt14numeric_limitsIfE {};
struct _ZSt14numeric_limitsIdE {};
# 1818 "/usr/include/c++/11/limits" 3
struct _ZSt14numeric_limitsIeE {};
# 352 "/usr/include/c++/11/utility" 3
struct _ZSt10in_place_t {};
#if !defined(__CUDABE__)
#endif
#if !defined(__CUDABE__)

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.fatbin

38
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.fatbin.c

@ -7,11 +7,11 @@ asm(
".section .nv_fatbin, \"a\"\n"
".align 8\n"
"fatbinData:\n"
".quad 0x00100001ba55ed50,0x00000000000003d0,0x0000004001010002,0x0000000000000310\n"
".quad 0x00100001ba55ed50,0x0000000000000448,0x0000004001010002,0x0000000000000388\n"
".quad 0x0000000000000000,0x0000003400010007,0x0000000000000000,0x0000000000000011\n"
".quad 0x0000000000000000,0x0000000000000000,0x33010102464c457f,0x0000000000000007\n"
".quad 0x0000007300be0002,0x0000000000000000,0x0000000000000000,0x00000000000001d0\n"
".quad 0x0000004000340534,0x0001000500400000,0x7472747368732e00,0x747274732e006261\n"
".quad 0x0000007600be0002,0x0000000000000000,0x0000000000000318,0x00000000000001d8\n"
".quad 0x0038004000340534,0x0001000500400002,0x7472747368732e00,0x747274732e006261\n"
".quad 0x746d79732e006261,0x746d79732e006261,0x78646e68735f6261,0x666e692e766e2e00\n"
".quad 0x65722e766e2e006f,0x6e6f697463612e6c,0x72747368732e0000,0x7274732e00626174\n"
".quad 0x6d79732e00626174,0x6d79732e00626174,0x646e68735f626174,0x6e692e766e2e0078\n"
@ -24,25 +24,29 @@ asm(
".quad 0x0000000008280001,0x0000000008300001,0x0000000008380001,0x0000000008000002\n"
".quad 0x0000000008080002,0x0000000008100002,0x0000000008180002,0x0000000008200002\n"
".quad 0x0000000008280002,0x0000000008300002,0x0000000008380002,0x0000002c14000000\n"
".quad 0x000000000c000003,0x0000000000000000,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000300000001,0x0000000000000000,0x0000000000000000,0x0000000000000040\n"
".quad 0x0000000000000041,0x0000000000000000,0x0000000000000001,0x0000000000000000\n"
".quad 0x000000030000000b,0x0000000000000000,0x0000000000000000,0x0000000000000081\n"
".quad 0x0000000000000041,0x0000000000000000,0x0000000000000001,0x0000000000000000\n"
".quad 0x0000000200000013,0x0000000000000000,0x0000000000000000,0x00000000000000c8\n"
".quad 0x0000000000000030,0x0000000200000002,0x0000000000000008,0x0000000000000018\n"
".quad 0x7000000b00000032,0x0000000000000000,0x0000000000000000,0x00000000000000f8\n"
".quad 0x00000000000000d8,0x0000000000000000,0x0000000000000008,0x0000000000000008\n"
".quad 0x0000004801010001,0x0000000000000038,0x0000004000000036,0x0000003400070005\n"
".quad 0x0000000000000000,0x0000000000002011,0x0000000000000000,0x0000000000000038\n"
".quad 0x0000000000000000,0x762e21f000010a13,0x37206e6f69737265,0x677261742e0a352e\n"
".quad 0x32355f6d73207465,0x7365726464612e0a,0x3620657a69735f73,0x0000000a0a0a0a34\n"
".quad 0x0000000000000000,0x0000000300000001,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000040,0x0000000000000041,0x0000000000000000,0x0000000000000001\n"
".quad 0x0000000000000000,0x000000030000000b,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000081,0x0000000000000041,0x0000000000000000,0x0000000000000001\n"
".quad 0x0000000000000000,0x0000000200000013,0x0000000000000000,0x0000000000000000\n"
".quad 0x00000000000000c8,0x0000000000000030,0x0000000200000002,0x0000000000000008\n"
".quad 0x0000000000000018,0x7000000b00000032,0x0000000000000000,0x0000000000000000\n"
".quad 0x00000000000000f8,0x00000000000000e0,0x0000000000000000,0x0000000000000008\n"
".quad 0x0000000000000008,0x0000000500000006,0x0000000000000318,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000070,0x0000000000000070,0x0000000000000008\n"
".quad 0x0000000500000001,0x0000000000000318,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000070,0x0000000000000070,0x0000000000000008,0x0000004801010001\n"
".quad 0x0000000000000038,0x0000004000000036,0x0000003400070008,0x0000000000000000\n"
".quad 0x0000000000002011,0x0000000000000000,0x0000000000000038,0x0000000000000000\n"
".quad 0x762e21f000010a13,0x37206e6f69737265,0x677261742e0a382e,0x32355f6d73207465\n"
".quad 0x7365726464612e0a, 0x3620657a69735f73, 0x0000000a0a0a0a34\n"
".text\n");
#ifdef __cplusplus
extern "C" {
#endif
extern const unsigned long long fatbinData[124];
extern const unsigned long long fatbinData[139];
#ifdef __cplusplus
}
#endif

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.o

6
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.ptx

@ -1,12 +1,12 @@
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-30672275
// Cuda compilation tools, release 11.5, V11.5.119
// Compiler Build ID: CL-31833905
// Cuda compilation tools, release 11.8, V11.8.89
// Based on NVVM 7.0.1
//
.version 7.5
.version 7.8
.target sm_52
.address_size 64

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/CMakeCUDACompilerId.sm_52.cubin

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.fatbin

57
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.fatbin.c

@ -7,38 +7,47 @@ asm(
".section .nv_fatbin, \"a\"\n"
".align 8\n"
"fatbinData:\n"
".quad 0x00100001ba55ed50,0x0000000000000350,0x0000004001010002,0x0000000000000310\n"
".quad 0x00100001ba55ed50,0x0000000000000470,0x0000004001010002,0x0000000000000430\n"
".quad 0x0000000000000000,0x0000003400010007,0x0000000000000000,0x0000000000000011\n"
".quad 0x0000000000000000,0x0000000000000000,0x33010102464c457f,0x0000000000000007\n"
".quad 0x0000007300be0002,0x0000000000000000,0x0000000000000000,0x00000000000001d0\n"
".quad 0x0000004000340534,0x0001000500400000,0x7472747368732e00,0x747274732e006261\n"
".quad 0x0000007600be0002,0x0000000000000000,0x00000000000003c0,0x0000000000000240\n"
".quad 0x0038004000340534,0x0001000600400002,0x7472747368732e00,0x747274732e006261\n"
".quad 0x746d79732e006261,0x746d79732e006261,0x78646e68735f6261,0x666e692e766e2e00\n"
".quad 0x65722e766e2e006f,0x6e6f697463612e6c,0x72747368732e0000,0x7274732e00626174\n"
".quad 0x6d79732e00626174,0x6d79732e00626174,0x646e68735f626174,0x6e692e766e2e0078\n"
".quad 0x722e766e2e006f66,0x6f697463612e6c65,0x000000000000006e,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000000,0x0004000300000032,0x0000000000000000\n"
".quad 0x0000000000000000,0x000000000000004b,0x222f0a1008020200,0x0000000008000000\n"
".quad 0x0000000008080000,0x0000000008100000,0x0000000008180000,0x0000000008200000\n"
".quad 0x0000000008280000,0x0000000008300000,0x0000000008380000,0x0000000008000001\n"
".quad 0x0000000008080001,0x0000000008100001,0x0000000008180001,0x0000000008200001\n"
".quad 0x0000000008280001,0x0000000008300001,0x0000000008380001,0x0000000008000002\n"
".quad 0x0000000008080002,0x0000000008100002,0x0000000008180002,0x0000000008200002\n"
".quad 0x0000000008280002,0x0000000008300002,0x0000000008380002,0x0000002c14000000\n"
".quad 0x61632e766e2e006f,0x0068706172676c6c,0x746f72702e766e2e,0x6e2e00657079746f\n"
".quad 0x63612e6c65722e76,0x732e00006e6f6974,0x0062617472747368,0x006261747274732e\n"
".quad 0x006261746d79732e,0x5f6261746d79732e,0x6e2e0078646e6873,0x2e006f666e692e76\n"
".quad 0x676c6c61632e766e,0x766e2e0068706172,0x79746f746f72702e,0x722e766e2e006570\n"
".quad 0x6f697463612e6c65,0x000000000000006e,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000000,0x0004000300000032,0x0000000000000000,0x0000000000000000\n"
".quad 0x000500030000004e,0x0000000000000000,0x0000000000000000,0xffffffff00000000\n"
".quad 0xfffffffe00000000,0xfffffffd00000000,0x000000000000004b,0x222f0a1008020200\n"
".quad 0x0000000008000000,0x0000000008080000,0x0000000008100000,0x0000000008180000\n"
".quad 0x0000000008200000,0x0000000008280000,0x0000000008300000,0x0000000008380000\n"
".quad 0x0000000008000001,0x0000000008080001,0x0000000008100001,0x0000000008180001\n"
".quad 0x0000000008200001,0x0000000008280001,0x0000000008300001,0x0000000008380001\n"
".quad 0x0000000008000002,0x0000000008080002,0x0000000008100002,0x0000000008180002\n"
".quad 0x0000000008200002,0x0000000008280002,0x0000000008300002,0x0000000008380002\n"
".quad 0x0000002c14000000,0x000000000c000003,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000000000000,0x0000000000000000\n"
".quad 0x0000000300000001,0x0000000000000000,0x0000000000000000,0x0000000000000040\n"
".quad 0x0000000000000041,0x0000000000000000,0x0000000000000001,0x0000000000000000\n"
".quad 0x000000030000000b,0x0000000000000000,0x0000000000000000,0x0000000000000081\n"
".quad 0x0000000000000041,0x0000000000000000,0x0000000000000001,0x0000000000000000\n"
".quad 0x0000000200000013,0x0000000000000000,0x0000000000000000,0x00000000000000c8\n"
".quad 0x0000000000000030,0x0000000200000002,0x0000000000000008,0x0000000000000018\n"
".quad 0x7000000b00000032,0x0000000000000000,0x0000000000000000,0x00000000000000f8\n"
".quad 0x00000000000000d8,0x0000000000000000,0x0000000000000008,0x0000000000000008\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000300000001,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000040,0x000000000000005d,0x0000000000000000\n"
".quad 0x0000000000000001,0x0000000000000000,0x000000030000000b,0x0000000000000000\n"
".quad 0x0000000000000000,0x000000000000009d,0x000000000000005d,0x0000000000000000\n"
".quad 0x0000000000000001,0x0000000000000000,0x0000000200000013,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000100,0x0000000000000048,0x0000000300000002\n"
".quad 0x0000000000000008,0x0000000000000018,0x7000000100000032,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000148,0x0000000000000018,0x0000000000000003\n"
".quad 0x0000000000000004,0x0000000000000008,0x7000000b0000004e,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000160,0x00000000000000e0,0x0000000000000000\n"
".quad 0x0000000000000008,0x0000000000000008,0x0000000500000006,0x00000000000003c0\n"
".quad 0x0000000000000000,0x0000000000000000,0x0000000000000070,0x0000000000000070\n"
".quad 0x0000000000000008,0x0000000500000001,0x00000000000003c0,0x0000000000000000\n"
".quad 0x0000000000000000,0x0000000000000070,0x0000000000000070,0x0000000000000008\n"
".text\n");
#ifdef __cplusplus
extern "C" {
#endif
extern const unsigned long long fatbinData[108];
extern const unsigned long long fatbinData[144];
#ifdef __cplusplus
}
#endif

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.o

BIN
build/CMakeFiles/3.22.1/CompilerIdCUDA/tmp/a_dlink.sm_52.cubin

1156
build/CMakeFiles/CMakeOutput.log
File diff suppressed because it is too large
View File

44
build/CMakeFiles/Makefile.cmake

@ -7,42 +7,52 @@ set(CMAKE_DEPENDS_GENERATOR "Unix Makefiles")
# The top level Makefile was generated from the following files:
set(CMAKE_MAKEFILE_DEPENDS
"CMakeCache.txt"
"/home/mht/libtorch/share/cmake/Caffe2/Caffe2Config.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Caffe2ConfigVersion.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Caffe2Targets-release.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Caffe2Targets.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageMessage.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/public/cuda.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/public/mkl.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/public/mkldnn.cmake"
"/home/mht/libtorch/share/cmake/Caffe2/public/utils.cmake"
"/home/mht/libtorch/share/cmake/Torch/TorchConfig.cmake"
"/home/mht/libtorch/share/cmake/Torch/TorchConfigVersion.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Caffe2Config.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Caffe2ConfigVersion.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Caffe2Targets-release.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Caffe2Targets.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageMessage.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/public/cuda.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/public/mkl.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/public/mkldnn.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/public/threads.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Caffe2/public/utils.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Torch/TorchConfig.cmake"
"/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Torch/TorchConfigVersion.cmake"
"../CMakeLists.txt"
"CMakeFiles/3.22.1/CMakeCUDACompiler.cmake"
"CMakeFiles/3.22.1/CMakeCXXCompiler.cmake"
"CMakeFiles/3.22.1/CMakeSystem.cmake"
"detect_cuda_compute_capabilities.cu"
"detect_cuda_version.cc"
"../vcpkg.json"
"../vcpkg/scripts/buildsystems/vcpkg.cmake"
"/usr/share/cmake-3.22/Modules/CMakeCUDAInformation.cmake"
"/usr/share/cmake-3.22/Modules/CMakeCXXInformation.cmake"
"/usr/share/cmake-3.22/Modules/CMakeCommonLanguageInclude.cmake"
"/usr/share/cmake-3.22/Modules/CMakeDependentOption.cmake"
"/usr/share/cmake-3.22/Modules/CMakeGenericSystem.cmake"
"/usr/share/cmake-3.22/Modules/CMakeInitializeConfigs.cmake"
"/usr/share/cmake-3.22/Modules/CMakeLanguageInformation.cmake"
"/usr/share/cmake-3.22/Modules/CMakeSystemSpecificInformation.cmake"
"/usr/share/cmake-3.22/Modules/CMakeSystemSpecificInitialize.cmake"
"/usr/share/cmake-3.22/Modules/CheckCXXSourceCompiles.cmake"
"/usr/share/cmake-3.22/Modules/CheckIncludeFileCXX.cmake"
"/usr/share/cmake-3.22/Modules/CheckLibraryExists.cmake"
"/usr/share/cmake-3.22/Modules/Compiler/CMakeCommonCompilerMacros.cmake"
"/usr/share/cmake-3.22/Modules/Compiler/GNU-CXX.cmake"
"/usr/share/cmake-3.22/Modules/Compiler/GNU.cmake"
"/usr/share/cmake-3.22/Modules/Compiler/NVIDIA-CUDA.cmake"
"/usr/share/cmake-3.22/Modules/FindPackageHandleStandardArgs.cmake"
"/usr/share/cmake-3.22/Modules/FindPackageMessage.cmake"
"/usr/share/cmake-3.22/Modules/FindThreads.cmake"
"/usr/share/cmake-3.22/Modules/Internal/CheckSourceCompiles.cmake"
"/usr/share/cmake-3.22/Modules/Platform/Linux-GNU-CXX.cmake"
"/usr/share/cmake-3.22/Modules/Platform/Linux-GNU.cmake"
"/usr/share/cmake-3.22/Modules/Platform/Linux.cmake"
@ -64,7 +74,7 @@ set(CMAKE_MAKEFILE_PRODUCTS
set(CMAKE_DEPEND_INFO_FILES
"CMakeFiles/bb_regressor.dir/DependInfo.cmake"
"CMakeFiles/classifier.dir/DependInfo.cmake"
"CMakeFiles/resnet.dir/DependInfo.cmake"
"CMakeFiles/tracking_demo.dir/DependInfo.cmake"
"CMakeFiles/test_models.dir/DependInfo.cmake"
"CMakeFiles/generate_test_samples.dir/DependInfo.cmake"
)

62
build/CMakeFiles/Makefile2

@ -65,9 +65,9 @@ CMAKE_BINARY_DIR = /media/mht/ADATA/repos/cpp_tracker/build
# The main recursive "all" target.
all: CMakeFiles/bb_regressor.dir/all
all: CMakeFiles/classifier.dir/all
all: CMakeFiles/resnet.dir/all
all: CMakeFiles/tracking_demo.dir/all
all: CMakeFiles/test_models.dir/all
all: CMakeFiles/generate_test_samples.dir/all
.PHONY : all
# The main recursive "preinstall" target.
@ -77,9 +77,9 @@ preinstall:
# The main recursive "clean" target.
clean: CMakeFiles/bb_regressor.dir/clean
clean: CMakeFiles/classifier.dir/clean
clean: CMakeFiles/resnet.dir/clean
clean: CMakeFiles/tracking_demo.dir/clean
clean: CMakeFiles/test_models.dir/clean
clean: CMakeFiles/generate_test_samples.dir/clean
.PHONY : clean
#=============================================================================
@ -134,11 +134,38 @@ CMakeFiles/classifier.dir/clean:
$(MAKE) $(MAKESILENT) -f CMakeFiles/classifier.dir/build.make CMakeFiles/classifier.dir/clean
.PHONY : CMakeFiles/classifier.dir/clean
#=============================================================================
# Target rules for target CMakeFiles/resnet.dir
# All Build rule for target.
CMakeFiles/resnet.dir/all:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/depend
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/build
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=6,7 "Built target resnet"
.PHONY : CMakeFiles/resnet.dir/all
# Build rule for subdir invocation for target.
CMakeFiles/resnet.dir/rule: cmake_check_build_system
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 2
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 CMakeFiles/resnet.dir/all
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 0
.PHONY : CMakeFiles/resnet.dir/rule
# Convenience name for target.
resnet: CMakeFiles/resnet.dir/rule
.PHONY : resnet
# clean rule for target.
CMakeFiles/resnet.dir/clean:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/clean
.PHONY : CMakeFiles/resnet.dir/clean
#=============================================================================
# Target rules for target CMakeFiles/tracking_demo.dir
# All Build rule for target.
CMakeFiles/tracking_demo.dir/all: CMakeFiles/bb_regressor.dir/all
CMakeFiles/tracking_demo.dir/all: CMakeFiles/resnet.dir/all
CMakeFiles/tracking_demo.dir/all: CMakeFiles/classifier.dir/all
$(MAKE) $(MAKESILENT) -f CMakeFiles/tracking_demo.dir/build.make CMakeFiles/tracking_demo.dir/depend
$(MAKE) $(MAKESILENT) -f CMakeFiles/tracking_demo.dir/build.make CMakeFiles/tracking_demo.dir/build
@ -147,7 +174,7 @@ CMakeFiles/tracking_demo.dir/all: CMakeFiles/classifier.dir/all
# Build rule for subdir invocation for target.
CMakeFiles/tracking_demo.dir/rule: cmake_check_build_system
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 7
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 9
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 CMakeFiles/tracking_demo.dir/all
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 0
.PHONY : CMakeFiles/tracking_demo.dir/rule
@ -166,6 +193,7 @@ CMakeFiles/tracking_demo.dir/clean:
# All Build rule for target.
CMakeFiles/test_models.dir/all: CMakeFiles/bb_regressor.dir/all
CMakeFiles/test_models.dir/all: CMakeFiles/resnet.dir/all
CMakeFiles/test_models.dir/all: CMakeFiles/classifier.dir/all
$(MAKE) $(MAKESILENT) -f CMakeFiles/test_models.dir/build.make CMakeFiles/test_models.dir/depend
$(MAKE) $(MAKESILENT) -f CMakeFiles/test_models.dir/build.make CMakeFiles/test_models.dir/build
@ -174,7 +202,7 @@ CMakeFiles/test_models.dir/all: CMakeFiles/classifier.dir/all
# Build rule for subdir invocation for target.
CMakeFiles/test_models.dir/rule: cmake_check_build_system
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 7
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 9
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 CMakeFiles/test_models.dir/all
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 0
.PHONY : CMakeFiles/test_models.dir/rule
@ -188,32 +216,6 @@ CMakeFiles/test_models.dir/clean:
$(MAKE) $(MAKESILENT) -f CMakeFiles/test_models.dir/build.make CMakeFiles/test_models.dir/clean
.PHONY : CMakeFiles/test_models.dir/clean
#=============================================================================
# Target rules for target CMakeFiles/generate_test_samples.dir
# All Build rule for target.
CMakeFiles/generate_test_samples.dir/all:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/depend
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/build
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=6,7 "Built target generate_test_samples"
.PHONY : CMakeFiles/generate_test_samples.dir/all
# Build rule for subdir invocation for target.
CMakeFiles/generate_test_samples.dir/rule: cmake_check_build_system
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 2
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 CMakeFiles/generate_test_samples.dir/all
$(CMAKE_COMMAND) -E cmake_progress_start /media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles 0
.PHONY : CMakeFiles/generate_test_samples.dir/rule
# Convenience name for target.
generate_test_samples: CMakeFiles/generate_test_samples.dir/rule
.PHONY : generate_test_samples
# clean rule for target.
CMakeFiles/generate_test_samples.dir/clean:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/clean
.PHONY : CMakeFiles/generate_test_samples.dir/clean
#=============================================================================
# Special targets to cleanup operation of make.

3
build/CMakeFiles/TargetDirectories.txt

@ -1,8 +1,9 @@
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/bb_regressor.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/classifier.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/resnet.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/tracking_demo.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/test_models.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/generate_test_samples.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/test.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/edit_cache.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/rebuild_cache.dir
/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/list_install_components.dir

2
build/CMakeFiles/bb_regressor.dir/DependInfo.cmake

@ -8,8 +8,8 @@ set(CMAKE_DEPENDS_LANGUAGES
# The set of dependency files which are needed:
set(CMAKE_DEPENDS_DEPENDENCY_FILES
"/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o" "gcc" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o.d"
"/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/bb_regressor.cpp" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o" "gcc" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o.d"
"/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o" "gcc" "CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o.d"
)
# Targets to which this target links.

26
build/CMakeFiles/bb_regressor.dir/build.make

@ -83,30 +83,30 @@ CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.s: cmake_force
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.s"
/usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/bb_regressor.cpp -o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.s
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o: CMakeFiles/bb_regressor.dir/flags.make
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o: ../cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o: CMakeFiles/bb_regressor.dir/compiler_depend.ts
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building CXX object CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o"
/usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -MD -MT CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o -MF CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o.d -o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o -c /media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o: CMakeFiles/bb_regressor.dir/flags.make
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o: ../cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o: CMakeFiles/bb_regressor.dir/compiler_depend.ts
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building CUDA object CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o"
/usr/local/cuda-11.8/bin/nvcc -forward-unknown-to-host-compiler $(CUDA_DEFINES) $(CUDA_INCLUDES) $(CUDA_FLAGS) -MD -MT CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o -MF CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o.d -x cu -c /media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu -o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i: cmake_force
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i"
/usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp > CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i: cmake_force
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CUDA source to CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i"
$(CMAKE_COMMAND) -E cmake_unimplemented_variable CMAKE_CUDA_CREATE_PREPROCESSED_SOURCE
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s: cmake_force
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s"
/usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp -o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s
CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s: cmake_force
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CUDA source to assembly CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s"
$(CMAKE_COMMAND) -E cmake_unimplemented_variable CMAKE_CUDA_CREATE_ASSEMBLY_SOURCE
# Object files for target bb_regressor
bb_regressor_OBJECTS = \
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o" \
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o"
# External object files for target bb_regressor
bb_regressor_EXTERNAL_OBJECTS =
libbb_regressor.a: CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o
libbb_regressor.a: CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o
libbb_regressor.a: CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
libbb_regressor.a: CMakeFiles/bb_regressor.dir/build.make
libbb_regressor.a: CMakeFiles/bb_regressor.dir/link.txt
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_3) "Linking CXX static library libbb_regressor.a"

BIN
build/CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o

5157
build/CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o.d
File diff suppressed because it is too large
View File

6
build/CMakeFiles/bb_regressor.dir/cmake_clean.cmake

@ -1,13 +1,13 @@
file(REMOVE_RECURSE
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o.d"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o.d"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o"
"CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o.d"
"libbb_regressor.a"
"libbb_regressor.pdb"
)
# Per-language clean rules from dependency scanning.
foreach(lang CXX)
foreach(lang CUDA CXX)
include(CMakeFiles/bb_regressor.dir/cmake_clean_${lang}.cmake OPTIONAL)
endforeach()

5367
build/CMakeFiles/bb_regressor.dir/compiler_depend.internal
File diff suppressed because it is too large
View File

15247
build/CMakeFiles/bb_regressor.dir/compiler_depend.make
File diff suppressed because it is too large
View File

11
build/CMakeFiles/bb_regressor.dir/flags.make

@ -1,10 +1,17 @@
# CMAKE generated file: DO NOT EDIT!
# Generated by "Unix Makefiles" Generator, CMake Version 3.22
# compile CUDA with /usr/local/cuda-11.8/bin/nvcc
# compile CXX with /usr/bin/c++
CXX_DEFINES = -DUSE_C10D_GLOO -DUSE_C10D_NCCL -DUSE_DISTRIBUTED -DUSE_RPC -DUSE_TENSORPIPE
CUDA_DEFINES =
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -I/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling -I/media/mht/ADATA/repos/cpp_tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src -isystem /home/mht/libtorch/include -isystem /home/mht/libtorch/include/torch/csrc/api/include
CUDA_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -I/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling -I/media/mht/ADATA/repos/cpp_tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src -isystem=/home/mht/libtorch_1.8.0_cu111/libtorch/include -isystem=/home/mht/libtorch_1.8.0_cu111/libtorch/include/torch/csrc/api/include -isystem=/usr/local/cuda-11.8/include
CUDA_FLAGS = -O3 -DNDEBUG --generate-code=arch=compute_52,code=[compute_52,sm_52] -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++17
CXX_DEFINES =
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -I/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling -I/media/mht/ADATA/repos/cpp_tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include/torch/csrc/api/include -isystem /usr/local/cuda-11.8/include
CXX_FLAGS = -O3 -DNDEBUG -D_GLIBCXX_USE_CXX11_ABI=1 -std=gnu++17

2
build/CMakeFiles/bb_regressor.dir/link.txt

@ -1,2 +1,2 @@
/usr/bin/ar qc libbb_regressor.a CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o
/usr/bin/ar qc libbb_regressor.a CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.o CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
/usr/bin/ranlib libbb_regressor.a

BIN
build/CMakeFiles/classifier.dir/cimp/classifier/classifier.cpp.o

5088
build/CMakeFiles/classifier.dir/cimp/classifier/classifier.cpp.o.d
File diff suppressed because it is too large
View File

5123
build/CMakeFiles/classifier.dir/compiler_depend.internal
File diff suppressed because it is too large
View File

14799
build/CMakeFiles/classifier.dir/compiler_depend.make
File diff suppressed because it is too large
View File

4
build/CMakeFiles/classifier.dir/flags.make

@ -2,9 +2,9 @@
# Generated by "Unix Makefiles" Generator, CMake Version 3.22
# compile CXX with /usr/bin/c++
CXX_DEFINES = -DUSE_C10D_GLOO -DUSE_C10D_NCCL -DUSE_DISTRIBUTED -DUSE_RPC -DUSE_TENSORPIPE
CXX_DEFINES =
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -isystem /home/mht/libtorch/include -isystem /home/mht/libtorch/include/torch/csrc/api/include
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include/torch/csrc/api/include -isystem /usr/local/cuda-11.8/include
CXX_FLAGS = -O3 -DNDEBUG -D_GLIBCXX_USE_CXX11_ABI=1 -std=gnu++17

1
build/CMakeFiles/tracking_demo.dir/DependInfo.cmake

@ -15,6 +15,7 @@ set(CMAKE_DEPENDS_DEPENDENCY_FILES
set(CMAKE_TARGET_LINKED_INFO_FILES
"/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/bb_regressor.dir/DependInfo.cmake"
"/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/classifier.dir/DependInfo.cmake"
"/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles/resnet.dir/DependInfo.cmake"
)
# Fortran module output directory.

32
build/CMakeFiles/tracking_demo.dir/build.make

@ -94,22 +94,22 @@ tracking_demo: CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o
tracking_demo: CMakeFiles/tracking_demo.dir/build.make
tracking_demo: libbb_regressor.a
tracking_demo: libclassifier.a
tracking_demo: /home/mht/libtorch/lib/libtorch.so
tracking_demo: /home/mht/libtorch/lib/libc10.so
tracking_demo: /home/mht/libtorch/lib/libkineto.a
tracking_demo: /usr/lib/x86_64-linux-gnu/libcuda.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libnvrtc.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libnvToolsExt.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcudart.so
tracking_demo: /home/mht/libtorch/lib/libc10_cuda.so
tracking_demo: /home/mht/libtorch/lib/libc10_cuda.so
tracking_demo: /home/mht/libtorch/lib/libc10.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcufft.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcurand.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcublas.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcublasLt.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libnvToolsExt.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcudart.so
tracking_demo: libresnet.a
tracking_demo: /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch.so
tracking_demo: /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10.so
tracking_demo: /usr/local/cuda-11.8/lib64/stubs/libcuda.so
tracking_demo: /usr/local/cuda-11.8/lib64/libnvrtc.so
tracking_demo: /usr/local/cuda-11.8/lib64/libnvToolsExt.so
tracking_demo: /usr/local/cuda-11.8/lib64/libcudart.so
tracking_demo: /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10_cuda.so
tracking_demo: /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10_cuda.so
tracking_demo: /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10.so
tracking_demo: /usr/local/cuda-11.8/lib64/libcufft.so
tracking_demo: /usr/local/cuda-11.8/lib64/libcurand.so
tracking_demo: /usr/local/cuda-11.8/lib64/libcublas.so
tracking_demo: /usr/lib/x86_64-linux-gnu/libcudnn.so
tracking_demo: /usr/local/cuda-11.8/lib64/libnvToolsExt.so
tracking_demo: /usr/local/cuda-11.8/lib64/libcudart.so
tracking_demo: CMakeFiles/tracking_demo.dir/link.txt
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/media/mht/ADATA/repos/cpp_tracker/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Linking CXX executable tracking_demo"
$(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/tracking_demo.dir/link.txt --verbose=$(VERBOSE)

BIN
build/CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o

4910
build/CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o.d
File diff suppressed because it is too large
View File

2148
build/CMakeFiles/tracking_demo.dir/compiler_depend.make
File diff suppressed because it is too large
View File

4
build/CMakeFiles/tracking_demo.dir/flags.make

@ -2,9 +2,9 @@
# Generated by "Unix Makefiles" Generator, CMake Version 3.22
# compile CXX with /usr/bin/c++
CXX_DEFINES = -DUSE_C10D_GLOO -DUSE_C10D_NCCL -DUSE_DISTRIBUTED -DUSE_RPC -DUSE_TENSORPIPE
CXX_DEFINES =
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -I/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling -I/media/mht/ADATA/repos/cpp_tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src -isystem /home/mht/libtorch/include -isystem /home/mht/libtorch/include/torch/csrc/api/include
CXX_INCLUDES = -I/media/mht/ADATA/repos/cpp_tracker/cimp -I/media/mht/ADATA/repos/cpp_tracker/cimp/bb_regressor/prroi_pooling -I/media/mht/ADATA/repos/cpp_tracker/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include -isystem /home/mht/libtorch_1.8.0_cu111/libtorch/include/torch/csrc/api/include -isystem /usr/local/cuda-11.8/include
CXX_FLAGS = -O3 -DNDEBUG -D_GLIBCXX_USE_CXX11_ABI=1 -std=gnu++17

2
build/CMakeFiles/tracking_demo.dir/link.txt

@ -1 +1 @@
/usr/bin/c++ -O3 -DNDEBUG CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o -o tracking_demo -Wl,-rpath,/home/mht/libtorch/lib: libbb_regressor.a libclassifier.a /home/mht/libtorch/lib/libtorch.so /home/mht/libtorch/lib/libc10.so /home/mht/libtorch/lib/libkineto.a /usr/lib/x86_64-linux-gnu/libcuda.so /usr/lib/x86_64-linux-gnu/libnvrtc.so /usr/lib/x86_64-linux-gnu/libnvToolsExt.so /usr/lib/x86_64-linux-gnu/libcudart.so /home/mht/libtorch/lib/libc10_cuda.so -Wl,--no-as-needed,"/home/mht/libtorch/lib/libtorch_cpu.so" -Wl,--as-needed -Wl,--no-as-needed,"/home/mht/libtorch/lib/libtorch_cuda.so" -Wl,--as-needed /home/mht/libtorch/lib/libc10_cuda.so /home/mht/libtorch/lib/libc10.so /usr/lib/x86_64-linux-gnu/libcufft.so /usr/lib/x86_64-linux-gnu/libcurand.so /usr/lib/x86_64-linux-gnu/libcublas.so /usr/lib/x86_64-linux-gnu/libcublasLt.so -Wl,--no-as-needed,"/home/mht/libtorch/lib/libtorch.so" -Wl,--as-needed /usr/lib/x86_64-linux-gnu/libnvToolsExt.so /usr/lib/x86_64-linux-gnu/libcudart.so
/usr/bin/c++ -O3 -DNDEBUG CMakeFiles/tracking_demo.dir/cimp/demo.cpp.o -o tracking_demo -L/usr/local/cuda-11.8/targets/x86_64-linux/lib/stubs -L/usr/local/cuda-11.8/targets/x86_64-linux/lib -Wl,-rpath,/home/mht/libtorch_1.8.0_cu111/libtorch/lib:/usr/local/cuda-11.8/lib64/stubs:/usr/local/cuda-11.8/lib64: libbb_regressor.a libclassifier.a libresnet.a /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch.so /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10.so /usr/local/cuda-11.8/lib64/stubs/libcuda.so /usr/local/cuda-11.8/lib64/libnvrtc.so /usr/local/cuda-11.8/lib64/libnvToolsExt.so /usr/local/cuda-11.8/lib64/libcudart.so /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10_cuda.so -Wl,--no-as-needed,"/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch_cuda.so" -Wl,--as-needed -Wl,--no-as-needed,"/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch_cuda_cu.so" -Wl,--as-needed -Wl,--no-as-needed,"/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch_cpu.so" -Wl,--as-needed /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10_cuda.so /home/mht/libtorch_1.8.0_cu111/libtorch/lib/libc10.so /usr/local/cuda-11.8/lib64/libcufft.so /usr/local/cuda-11.8/lib64/libcurand.so /usr/local/cuda-11.8/lib64/libcublas.so /usr/lib/x86_64-linux-gnu/libcudnn.so -Wl,--no-as-needed,"/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch_cuda_cpp.so" -Wl,--as-needed -Wl,--no-as-needed,"/home/mht/libtorch_1.8.0_cu111/libtorch/lib/libtorch.so" -Wl,--as-needed /usr/local/cuda-11.8/lib64/libnvToolsExt.so /usr/local/cuda-11.8/lib64/libcudart.so -lcudadevrt -lcudart_static -lrt -lpthread -ldl

111
build/Makefile

@ -65,6 +65,16 @@ CMAKE_BINARY_DIR = /media/mht/ADATA/repos/cpp_tracker/build
#=============================================================================
# Targets provided globally by CMake.
# Special rule for the target test
test:
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running tests..."
/usr/bin/ctest --force-new-ctest-process $(ARGS)
.PHONY : test
# Special rule for the target test
test/fast: test
.PHONY : test/fast
# Special rule for the target edit_cache
edit_cache:
@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "No interactive CMake dialog available..."
@ -187,6 +197,19 @@ classifier/fast:
$(MAKE) $(MAKESILENT) -f CMakeFiles/classifier.dir/build.make CMakeFiles/classifier.dir/build
.PHONY : classifier/fast
#=============================================================================
# Target rules for targets named resnet
# Build rule for target.
resnet: cmake_check_build_system
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 resnet
.PHONY : resnet
# fast build rule for target.
resnet/fast:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/build
.PHONY : resnet/fast
#=============================================================================
# Target rules for targets named tracking_demo
@ -213,19 +236,6 @@ test_models/fast:
$(MAKE) $(MAKESILENT) -f CMakeFiles/test_models.dir/build.make CMakeFiles/test_models.dir/build
.PHONY : test_models/fast
#=============================================================================
# Target rules for targets named generate_test_samples
# Build rule for target.
generate_test_samples: cmake_check_build_system
$(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 generate_test_samples
.PHONY : generate_test_samples
# fast build rule for target.
generate_test_samples/fast:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/build
.PHONY : generate_test_samples/fast
cimp/bb_regressor/bb_regressor.o: cimp/bb_regressor/bb_regressor.cpp.o
.PHONY : cimp/bb_regressor/bb_regressor.o
@ -250,29 +260,29 @@ cimp/bb_regressor/bb_regressor.cpp.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/bb_regressor.cpp.s
.PHONY : cimp/bb_regressor/bb_regressor.cpp.s
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.o: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.o
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.o: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.o
# target to build an object file
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.o
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.o
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.i: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.i
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.i: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.i
# target to preprocess a source file
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.i
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.i
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.s: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.s
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.s: cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.s
# target to generate assembly for a file
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.cpp.s
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/bb_regressor.dir/build.make CMakeFiles/bb_regressor.dir/cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s
.PHONY : cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu.s
cimp/classifier/classifier.o: cimp/classifier/classifier.cpp.o
.PHONY : cimp/classifier/classifier.o
@ -322,29 +332,29 @@ cimp/demo.cpp.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/tracking_demo.dir/build.make CMakeFiles/tracking_demo.dir/cimp/demo.cpp.s
.PHONY : cimp/demo.cpp.s
test/generate_test_samples.o: test/generate_test_samples.cpp.o
.PHONY : test/generate_test_samples.o
cimp/resnet/resnet.o: cimp/resnet/resnet.cpp.o
.PHONY : cimp/resnet/resnet.o
# target to build an object file
test/generate_test_samples.cpp.o:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/test/generate_test_samples.cpp.o
.PHONY : test/generate_test_samples.cpp.o
cimp/resnet/resnet.cpp.o:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/cimp/resnet/resnet.cpp.o
.PHONY : cimp/resnet/resnet.cpp.o
test/generate_test_samples.i: test/generate_test_samples.cpp.i
.PHONY : test/generate_test_samples.i
cimp/resnet/resnet.i: cimp/resnet/resnet.cpp.i
.PHONY : cimp/resnet/resnet.i
# target to preprocess a source file
test/generate_test_samples.cpp.i:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/test/generate_test_samples.cpp.i
.PHONY : test/generate_test_samples.cpp.i
cimp/resnet/resnet.cpp.i:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/cimp/resnet/resnet.cpp.i
.PHONY : cimp/resnet/resnet.cpp.i
test/generate_test_samples.s: test/generate_test_samples.cpp.s
.PHONY : test/generate_test_samples.s
cimp/resnet/resnet.s: cimp/resnet/resnet.cpp.s
.PHONY : cimp/resnet/resnet.s
# target to generate assembly for a file
test/generate_test_samples.cpp.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/generate_test_samples.dir/build.make CMakeFiles/generate_test_samples.dir/test/generate_test_samples.cpp.s
.PHONY : test/generate_test_samples.cpp.s
cimp/resnet/resnet.cpp.s:
$(MAKE) $(MAKESILENT) -f CMakeFiles/resnet.dir/build.make CMakeFiles/resnet.dir/cimp/resnet/resnet.cpp.s
.PHONY : cimp/resnet/resnet.cpp.s
test/test_models.o: test/test_models.cpp.o
.PHONY : test/test_models.o
@ -382,26 +392,27 @@ help:
@echo "... install/strip"
@echo "... list_install_components"
@echo "... rebuild_cache"
@echo "... test"
@echo "... bb_regressor"
@echo "... classifier"
@echo "... generate_test_samples"
@echo "... resnet"
@echo "... test_models"
@echo "... tracking_demo"
@echo "... cimp/bb_regressor/bb_regressor.o"
@echo "... cimp/bb_regressor/bb_regressor.i"
@echo "... cimp/bb_regressor/bb_regressor.s"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.o"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.i"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu.s"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.o"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.i"
@echo "... cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.s"
@echo "... cimp/classifier/classifier.o"
@echo "... cimp/classifier/classifier.i"
@echo "... cimp/classifier/classifier.s"
@echo "... cimp/demo.o"
@echo "... cimp/demo.i"
@echo "... cimp/demo.s"
@echo "... test/generate_test_samples.o"
@echo "... test/generate_test_samples.i"
@echo "... test/generate_test_samples.s"
@echo "... cimp/resnet/resnet.o"
@echo "... cimp/resnet/resnet.i"
@echo "... cimp/resnet/resnet.s"
@echo "... test/test_models.o"
@echo "... test/test_models.i"
@echo "... test/test_models.s"

40
build/cmake_install.cmake

@ -2,7 +2,7 @@
# Set the install prefix
if(NOT DEFINED CMAKE_INSTALL_PREFIX)
set(CMAKE_INSTALL_PREFIX "/media/mht/ADATA/repos/cpp_tracker")
set(CMAKE_INSTALL_PREFIX "/media/mht/ADATA/repos/cpp_tracker/build/install")
endif()
string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
@ -54,7 +54,7 @@ if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/tracking_demo")
file(RPATH_CHANGE
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/tracking_demo"
OLD_RPATH "/home/mht/libtorch/lib:"
OLD_RPATH "/home/mht/libtorch_1.8.0_cu111/libtorch/lib:/usr/local/cuda-11.8/lib64/stubs:/usr/local/cuda-11.8/lib64:"
NEW_RPATH "")
if(CMAKE_INSTALL_DO_STRIP)
execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/tracking_demo")
@ -74,7 +74,7 @@ if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models")
file(RPATH_CHANGE
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models"
OLD_RPATH "/home/mht/libtorch/lib:"
OLD_RPATH "/home/mht/libtorch_1.8.0_cu111/libtorch/lib:/usr/local/cuda-11.8/lib64/stubs:/usr/local/cuda-11.8/lib64:"
NEW_RPATH "")
if(CMAKE_INSTALL_DO_STRIP)
execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models")
@ -83,25 +83,41 @@ if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_
endif()
if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_COMPONENT)
if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples" AND
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples")
if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models" AND
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models")
file(RPATH_CHECK
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples"
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models"
RPATH "")
endif()
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/bin" TYPE EXECUTABLE FILES "/media/mht/ADATA/repos/cpp_tracker/build/generate_test_samples")
if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples" AND
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples")
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/bin" TYPE EXECUTABLE FILES "/media/mht/ADATA/repos/cpp_tracker/build/test_models")
if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models" AND
NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models")
file(RPATH_CHANGE
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples"
OLD_RPATH "/home/mht/libtorch/lib:"
FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models"
OLD_RPATH "/home/mht/libtorch_1.8.0_cu111/libtorch/lib:/usr/local/cuda-11.8/lib64/stubs:/usr/local/cuda-11.8/lib64:"
NEW_RPATH "")
if(CMAKE_INSTALL_DO_STRIP)
execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/generate_test_samples")
execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/test_models")
endif()
endif()
endif()
if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_COMPONENT)
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib" TYPE STATIC_LIBRARY FILES "/media/mht/ADATA/repos/cpp_tracker/build/libresnet.a")
endif()
if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_COMPONENT)
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib" TYPE STATIC_LIBRARY FILES "/media/mht/ADATA/repos/cpp_tracker/build/libclassifier.a")
endif()
if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_COMPONENT)
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib" TYPE STATIC_LIBRARY FILES "/media/mht/ADATA/repos/cpp_tracker/build/libbb_regressor.a")
endif()
if("x${CMAKE_INSTALL_COMPONENT}x" STREQUAL "xUnspecifiedx" OR NOT CMAKE_INSTALL_COMPONENT)
file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib" TYPE DIRECTORY OPTIONAL FILES "/home/mht/libtorch_1.8.0_cu111/libtorch/share/cmake/Torch/../lib/" USE_SOURCE_PERMISSIONS FILES_MATCHING REGEX "/[^/]*\\.so[^/]*$" REGEX "/[^/]*\\.dylib[^/]*$" REGEX "/[^/]*\\.dll$" REGEX "/c10[^/]*\\.dll$" REGEX "/torch\\_cpu[^/]*\\.dll$" REGEX "/torch\\_cuda[^/]*\\.dll$" REGEX "/torch\\.dll$" REGEX "/cudnn[^/]*\\.dll$")
endif()
if(CMAKE_INSTALL_COMPONENT)
set(CMAKE_INSTALL_MANIFEST "install_manifest_${CMAKE_INSTALL_COMPONENT}.txt")
else()

3
build/install_manifest.txt

@ -1,3 +1,2 @@
/media/mht/ADATA/repos/cpp_tracker/bin/tracking_demo
/media/mht/ADATA/repos/cpp_tracker/bin/test_models
/media/mht/ADATA/repos/cpp_tracker/bin/generate_test_samples
/media/mht/ADATA/repos/cpp_tracker/bin/test_models

BIN
build/libbb_regressor.a

BIN
build/libclassifier.a

BIN
build/tracking_demo

84
cimp/bb_regressor/bb_regressor.cpp

@ -34,33 +34,24 @@ torch::Tensor PrRoIPool2D::forward(torch::Tensor feat, torch::Tensor rois) {
if (!feat.is_cuda() || !rois.is_cuda()) {
throw std::runtime_error("PrRoIPool2D requires CUDA tensors - CPU mode is not supported");
}
feat = feat.contiguous(); // Ensure contiguous
rois = rois.contiguous(); // Ensure contiguous
// Print ROI values for debugging
std::cout << " ROI values: " << std::endl;
for (int i = 0; i < std::min(num_rois, 3); i++) {
std::cout << " ROI " << i << ": [";
for (int j = 0; j < rois.size(1); j++) {
std::cout << rois[i][j].item<float>();
if (j < rois.size(1) - 1) std::cout << ", ";
}
std::cout << "]" << std::endl;
}
// Create output tensor on the same device
// Create output tensor on the same device (CUDA)
auto output = torch::zeros({num_rois, channels, pooled_height_, pooled_width_},
feat.options());
feat.options()); // feat.options() will be CUDA
// Copy tensors to CPU for the C implementation
auto feat_cpu = feat.to(torch::kCPU).contiguous();
auto rois_cpu = rois.to(torch::kCPU).contiguous();
auto output_cpu = output.to(torch::kCPU).contiguous();
// DO NOT Copy tensors to CPU. Pass GPU pointers directly.
// auto feat_cpu = feat.to(torch::kCPU).contiguous();
// auto rois_cpu = rois.to(torch::kCPU).contiguous();
// auto output_cpu = output.to(torch::kCPU).contiguous();
// Call the C wrapper function
std::cout << " Calling prroi_pooling_forward_cuda..." << std::endl;
// Call the C wrapper function with GPU data pointers
std::cout << " Calling prroi_pooling_forward_cuda with GPU data..." << std::endl;
prroi_pooling_forward_cuda(
feat_cpu.data_ptr<float>(),
static_cast<float*>(rois_cpu.data_ptr()),
static_cast<float*>(output_cpu.data_ptr()),
feat.data_ptr<float>(),
rois.data_ptr<float>(), // Assuming rois is already float, otherwise needs care
output.data_ptr<float>(),
channels,
feat.size(2),
feat.size(3),
@ -71,8 +62,8 @@ torch::Tensor PrRoIPool2D::forward(torch::Tensor feat, torch::Tensor rois) {
);
std::cout << " prroi_pooling_forward_cuda completed" << std::endl;
// Copy result back to GPU
output.copy_(output_cpu);
// No need to copy result back to GPU, output is already on GPU and was modified in-place.
// output.copy_(output_cpu);
return output;
}
@ -85,8 +76,8 @@ LinearBlock::LinearBlock(int in_planes, int out_planes, int input_sz, bool bias,
use_bn = batch_norm;
if (use_bn) {
// Important: use BatchNorm2d to match Python implementation
bn = register_module("bn", torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(out_planes)));
// Use BatchNorm1d
bn = register_module("bn", torch::nn::BatchNorm1d(torch::nn::BatchNorm1dOptions(out_planes)));
}
use_relu = relu;
@ -96,46 +87,25 @@ LinearBlock::LinearBlock(int in_planes, int out_planes, int input_sz, bool bias,
}
torch::Tensor LinearBlock::forward(torch::Tensor x) {
// Store original dtype for later
auto original_dtype = x.dtype();
// Reshape input for linear layer: x.reshape(x.shape[0], -1)
x = x.reshape({x.size(0), -1});
// Use double precision for higher accuracy
auto x_double = x.to(torch::kFloat64);
// Reshape exactly as in Python: x.reshape(x.shape[0], -1)
x_double = x_double.reshape({x_double.size(0), -1}).contiguous();
// Convert back to original precision for the linear operation
auto x_float = x_double.to(original_dtype);
x_float = linear->forward(x_float);
// Back to double precision for further operations
x_double = x_float.to(torch::kFloat64);
x = linear->forward(x);
if (use_bn) {
// This is crucial: reshape to 4D tensor for BatchNorm2d exactly as in Python
// In Python: x = self.bn(x.reshape(x.shape[0], x.shape[1], 1, 1))
x_double = x_double.reshape({x_double.size(0), x_double.size(1), 1, 1}).contiguous();
// Apply batch norm (convert to float32 for the operation)
x_float = x_double.to(original_dtype);
x_float = bn->forward(x_float);
x_double = x_float.to(torch::kFloat64);
// BatchNorm1d expects input of (N, C) or (N, C, L). Here x is (N, C).
x = bn->forward(x);
}
// Apply ReLU if needed
if (use_relu) {
// Apply ReLU in float32 precision
x_float = x_double.to(original_dtype);
x_float = relu_->forward(x_float);
x_double = x_float.to(torch::kFloat64);
x = relu_->forward(x);
}
// Final reshape to 2D tensor, exactly matching Python's behavior
x_double = x_double.reshape({x_double.size(0), -1}).contiguous();
// Ensure output is 2D (batch_size, features)
// This might be redundant if x is already in the correct shape after relu/bn.
x = x.reshape({x.size(0), -1});
// Return tensor in original precision
return x_double.to(original_dtype);
return x;
}
// Create convolutional block

2
cimp/bb_regressor/bb_regressor.h

@ -38,7 +38,7 @@ public:
// Public members for direct access to weights
torch::nn::Linear linear{nullptr};
torch::nn::BatchNorm2d bn{nullptr};
torch::nn::BatchNorm1d bn{nullptr};
torch::nn::ReLU relu_{nullptr};
bool use_bn;
bool use_relu;

34
cimp/bb_regressor/prroi_pooling/prroi_pooling_gpu_impl.cu

@ -180,6 +180,40 @@ extern "C" void PrRoIPoolingForwardGpu(
checkCudaErrors(cudaGetLastError());
}
// Add the C-API function that bb_regressor.cpp expects
extern "C" int prroi_pooling_forward_cuda(
const float *features_data,
const float *rois_data,
float *output_data,
int channels,
int height,
int width,
int num_rois,
int pooled_height,
int pooled_width,
float spatial_scale
) {
int top_count = num_rois * channels * pooled_height * pooled_width;
cudaStream_t stream = 0; // Use default stream
PrRoIPoolingForwardGpu(
stream,
features_data, // bottom_data
rois_data, // bottom_rois
output_data, // top_data
channels,
height,
width,
pooled_height,
pooled_width,
spatial_scale,
top_count
);
// The original header declares this returns int, presumably for an error code.
// We can return 0 for success, assuming checkCudaErrors in PrRoIPoolingForwardGpu handles errors.
return 0;
}
// Simplified dummy implementations of backward passes
extern "C" void PrRoIPoolingBackwardGpu(
cudaStream_t stream,

287
cimp/resnet/resnet.cpp

@ -0,0 +1,287 @@
#include "resnet.h"
#include <filesystem> // For std::filesystem::path
#include <stdexcept> // For std::runtime_error
#include <torch/script.h> // For torch::jit::load and torch::jit::Module
#include <optional> // ensure this is included
#include <fstream> // Added for std::ifstream
#include <vector> // Added for std::vector
namespace cimp {
namespace resnet {
namespace fs = std::filesystem; // Moved fs namespace alias here
// Helper function to load a tensor by its parameter name (e.g., "conv1.weight")
// Assumes .pt files are named like "conv1.weight.pt", "layer1.0.bn1.running_mean.pt"
torch::Tensor load_named_tensor(const std::string& base_weights_dir, const std::string& param_name_original, const torch::Device& device) {
fs::path tensor_file_path = fs::path(base_weights_dir) / (param_name_original + ".pt");
if (!fs::exists(tensor_file_path)) {
std::string param_name_underscore = param_name_original;
std::replace(param_name_underscore.begin(), param_name_underscore.end(), '.', '_');
fs::path tensor_file_path_underscore = fs::path(base_weights_dir) / (param_name_underscore + ".pt");
if (fs::exists(tensor_file_path_underscore)) {
std::cout << "INFO: Using underscore-named file for C++ loading: " << tensor_file_path_underscore.string() << std::endl;
tensor_file_path = tensor_file_path_underscore;
} else {
throw std::runtime_error("Weight file not found (tried direct and underscore versions): " +
(fs::path(base_weights_dir) / (param_name_original + ".pt")).string() +
" and " + tensor_file_path_underscore.string());
}
}
std::cout << "Attempting direct torch::pickle_load for tensor: " << tensor_file_path.string() << std::endl;
try {
// Read the file into a vector<char>
std::ifstream file_stream(tensor_file_path.string(), std::ios::binary);
if (!file_stream) {
throw std::runtime_error("Failed to open file: " + tensor_file_path.string());
}
std::vector<char> file_buffer((std::istreambuf_iterator<char>(file_stream)),
std::istreambuf_iterator<char>());
file_stream.close();
c10::IValue ivalue = torch::pickle_load(file_buffer);
return ivalue.toTensor().to(device);
} catch (const c10::Error& e) {
std::cerr << "CRITICAL ERROR: torch::pickle_load FAILED for '" << tensor_file_path.string() << "'. Error: " << e.what() << std::endl;
throw;
}
}
// --- BottleneckImpl Method Definitions ---
// Constructor implementation for BottleneckImpl
// Signature must match resnet.h:
// BottleneckImpl(int64_t inplanes, int64_t planes, const std::string& weights_dir_prefix, int64_t stride = 1,
// std::optional<torch::nn::Sequential> downsample_module_opt = std::nullopt, int64_t expansion_factor_arg = 4);
BottleneckImpl::BottleneckImpl(const std::string& base_weights_dir,
const std::string& block_param_prefix,
int64_t inplanes, int64_t planes,
const torch::Device& device,
int64_t stride_param, std::optional<torch::nn::Sequential> downsample_module_opt,
int64_t expansion_factor_arg)
: expansion_factor(expansion_factor_arg), stride_member(stride_param) {
// conv1
conv1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(inplanes, planes, 1).bias(false));
bn1 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(planes));
conv1->weight = load_named_tensor(base_weights_dir, block_param_prefix + "conv1.weight", device);
bn1->weight = load_named_tensor(base_weights_dir, block_param_prefix + "bn1.weight", device);
bn1->bias = load_named_tensor(base_weights_dir, block_param_prefix + "bn1.bias", device);
bn1->named_buffers()["running_mean"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn1.running_mean", device);
bn1->named_buffers()["running_var"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn1.running_var", device);
register_module("conv1", conv1);
register_module("bn1", bn1);
// conv2
conv2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(planes, planes, 3).stride(stride_member).padding(1).bias(false));
bn2 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(planes));
conv2->weight = load_named_tensor(base_weights_dir, block_param_prefix + "conv2.weight", device);
bn2->weight = load_named_tensor(base_weights_dir, block_param_prefix + "bn2.weight", device);
bn2->bias = load_named_tensor(base_weights_dir, block_param_prefix + "bn2.bias", device);
bn2->named_buffers()["running_mean"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn2.running_mean", device);
bn2->named_buffers()["running_var"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn2.running_var", device);
register_module("conv2", conv2);
register_module("bn2", bn2);
// conv3
conv3 = torch::nn::Conv2d(torch::nn::Conv2dOptions(planes, planes * expansion_factor, 1).bias(false));
bn3 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(planes * expansion_factor));
conv3->weight = load_named_tensor(base_weights_dir, block_param_prefix + "conv3.weight", device);
bn3->weight = load_named_tensor(base_weights_dir, block_param_prefix + "bn3.weight", device);
bn3->bias = load_named_tensor(base_weights_dir, block_param_prefix + "bn3.bias", device);
bn3->named_buffers()["running_mean"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn3.running_mean", device);
bn3->named_buffers()["running_var"] = load_named_tensor(base_weights_dir, block_param_prefix + "bn3.running_var", device);
register_module("conv3", conv3);
register_module("bn3", bn3);
relu = torch::nn::ReLU(torch::nn::ReLUOptions(true));
register_module("relu", relu);
if (downsample_module_opt.has_value()) {
this->projection_shortcut = downsample_module_opt.value(); // Assign the passed Sequential module
// Weights for the submodules of projection_shortcut (conv & bn) are loaded by _make_layer
// before this module is passed. Here, we just register it.
register_module("projection_shortcut", this->projection_shortcut);
} else {
this->projection_shortcut = nullptr;
}
}
// Forward method implementation for BottleneckImpl
torch::Tensor BottleneckImpl::forward(torch::Tensor x) {
torch::Tensor identity = x;
x = conv1->forward(x);
x = bn1->forward(x);
x = relu->forward(x);
x = conv2->forward(x);
x = bn2->forward(x);
x = relu->forward(x);
x = conv3->forward(x);
x = bn3->forward(x);
if (this->projection_shortcut) {
identity = this->projection_shortcut->forward(identity);
}
x += identity;
x = relu->forward(x);
return x;
}
// --- ResNetImpl Method Definitions ---
ResNetImpl::ResNetImpl(const std::string& base_weights_dir_path,
const std::vector<int64_t>& layers_dims,
const std::vector<std::string>& output_layers_param,
const torch::Device& device)
: _output_layers(output_layers_param), _base_weights_dir(base_weights_dir_path) {
conv1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 64, 7).stride(2).padding(3).bias(false));
bn1 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(64));
this->conv1->weight = load_named_tensor(this->_base_weights_dir, "conv1.weight", device);
this->bn1->weight = load_named_tensor(this->_base_weights_dir, "bn1.weight", device);
this->bn1->bias = load_named_tensor(this->_base_weights_dir, "bn1.bias", device);
this->bn1->named_buffers()["running_mean"] = load_named_tensor(this->_base_weights_dir, "bn1.running_mean", device);
this->bn1->named_buffers()["running_var"] = load_named_tensor(this->_base_weights_dir, "bn1.running_var", device);
register_module("conv1", conv1);
register_module("bn1", bn1);
relu = torch::nn::ReLU(torch::nn::ReLUOptions().inplace(true));
maxpool = torch::nn::MaxPool2d(torch::nn::MaxPool2dOptions(3).stride(2).padding(1));
register_module("relu", relu);
register_module("maxpool", maxpool);
layer1 = _make_layer(64, layers_dims[0], "layer1.", device);
layer2 = _make_layer(128, layers_dims[1], "layer2.", device, 2);
layer3 = _make_layer(256, layers_dims[2], "layer3.", device, 2);
layer4 = _make_layer(512, layers_dims[3], "layer4.", device, 2);
register_module("layer1", layer1);
register_module("layer2", layer2);
register_module("layer3", layer3);
register_module("layer4", layer4);
}
torch::nn::Sequential ResNetImpl::_make_layer(int64_t planes_for_block, int64_t num_blocks,
const std::string& layer_param_prefix,
const torch::Device& device,
int64_t stride_for_first_block) {
torch::nn::Sequential layer_sequential;
std::optional<torch::nn::Sequential> downsample_module_for_block_opt = std::nullopt;
if (stride_for_first_block != 1 || this->inplanes != planes_for_block * ResNetImpl::expansion) {
torch::nn::Sequential ds_seq;
auto conv_down = torch::nn::Conv2d(torch::nn::Conv2dOptions(this->inplanes, planes_for_block * ResNetImpl::expansion, 1).stride(stride_for_first_block).bias(false));
auto bn_down = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(planes_for_block * ResNetImpl::expansion));
std::string ds_block_prefix = layer_param_prefix + "0.downsample.";
conv_down->weight = load_named_tensor(this->_base_weights_dir, ds_block_prefix + "0.weight", device);
bn_down->weight = load_named_tensor(this->_base_weights_dir, ds_block_prefix + "1.weight", device);
bn_down->bias = load_named_tensor(this->_base_weights_dir, ds_block_prefix + "1.bias", device);
bn_down->named_buffers()["running_mean"] = load_named_tensor(this->_base_weights_dir, ds_block_prefix + "1.running_mean", device);
bn_down->named_buffers()["running_var"] = load_named_tensor(this->_base_weights_dir, ds_block_prefix + "1.running_var", device);
ds_seq->push_back(conv_down);
ds_seq->push_back(bn_down);
downsample_module_for_block_opt = ds_seq;
}
std::string first_block_param_prefix = layer_param_prefix + "0.";
layer_sequential->push_back(Bottleneck(this->_base_weights_dir, first_block_param_prefix,
this->inplanes, planes_for_block, device,
stride_for_first_block,
downsample_module_for_block_opt, ResNetImpl::expansion));
this->inplanes = planes_for_block * ResNetImpl::expansion;
for (int64_t i = 1; i < num_blocks; ++i) {
std::string current_block_param_prefix = layer_param_prefix + std::to_string(i) + ".";
layer_sequential->push_back(Bottleneck(this->_base_weights_dir, current_block_param_prefix,
this->inplanes, planes_for_block, device,
1,
std::nullopt, ResNetImpl::expansion));
}
return layer_sequential;
}
std::map<std::string, torch::Tensor> ResNetImpl::forward(torch::Tensor x) {
std::map<std::string, torch::Tensor> outputs;
auto should_output = [&](const std::string& layer_name) {
return std::find(_output_layers.begin(), _output_layers.end(), layer_name) != _output_layers.end();
};
// Original GPU path for conv1
x = conv1->forward(x);
if (should_output("conv1_output")) outputs["conv1_output"] = x;
x = bn1->forward(x.clone());
if (should_output("bn1_output")) outputs["bn1_output"] = x;
x = relu->forward(x.clone());
if (should_output("relu1_output")) outputs["relu1_output"] = x;
torch::Tensor x_pre_layer1 = maxpool->forward(x.clone());
if (should_output("maxpool_output")) outputs["maxpool_output"] = x_pre_layer1;
// Pass x_pre_layer1 to layer1
torch::Tensor x_after_layer1 = layer1->forward(x_pre_layer1.clone()); // Use .clone() if layer1 might modify input inplace, good practice
if (should_output("layer1")) outputs["layer1"] = x_after_layer1;
if (should_output("layer1_0_shortcut_output")) {
if (layer1 && !layer1->is_empty()) {
try {
// Get the first module (Bottleneck) from layer1 Sequential container
std::shared_ptr<torch::nn::Module> first_block_module_ptr = layer1->ptr(0);
// Attempt to dynamically cast to Bottleneck type
auto bottleneck_module_holder = std::dynamic_pointer_cast<cimp::resnet::BottleneckImpl>(first_block_module_ptr);
if (bottleneck_module_holder) { // Check if cast was successful
// Accessing projection_shortcut directly from BottleneckImpl
if (bottleneck_module_holder->projection_shortcut) {
torch::Tensor shortcut_out = bottleneck_module_holder->projection_shortcut->forward(x_pre_layer1.clone());
outputs["layer1_0_shortcut_output"] = shortcut_out;
} else {
// std::cout << "DEBUG: layer1.0 projection_shortcut is null." << std::endl;
}
} else {
// std::cerr << "ERROR: Failed to cast first block of layer1 to BottleneckImpl." << std::endl;
}
} catch (const std::exception& e) {
// std::cerr << "ERROR: Exception while getting layer1_0_shortcut_output: " << e.what() << std::endl;
}
} else {
// std::cout << "DEBUG: layer1 is null or empty, cannot get shortcut output." << std::endl;
}
}
torch::Tensor x_current = x_after_layer1; // Continue with the output of layer1
x_current = layer2->forward(x_current.clone());
if (should_output("layer2")) outputs["layer2"] = x_current;
x_current = layer3->forward(x_current.clone());
if (should_output("layer3")) outputs["layer3"] = x_current;
x_current = layer4->forward(x_current.clone());
if (should_output("layer4")) outputs["layer4"] = x_current;
if (should_output("features")) outputs["features"] = x_current; // 'features' is typically layer4 output
return outputs;
}
// For ResNet-50, layers are [3, 4, 6, 3]
ResNet resnet50(const std::string& base_weights_dir,
const std::vector<std::string>& output_layers,
const torch::Device& device) {
return ResNet(ResNetImpl(base_weights_dir, {3, 4, 6, 3}, output_layers, device)); // Pass device
}
} // namespace resnet
} // namespace cimp

82
cimp/resnet/resnet.h

@ -0,0 +1,82 @@
#ifndef CPP_TRACKER_RESNET_H
#define CPP_TRACKER_RESNET_H
#include <torch/torch.h>
#include <vector>
#include <string>
#include <map>
#include <optional>
namespace cimp {
namespace resnet {
// ResNet-50 Bottleneck block IMPLementation
struct BottleneckImpl : torch::nn::Module {
// Constructor declaration
BottleneckImpl(const std::string& base_weights_dir,
const std::string& block_param_prefix,
int64_t inplanes, int64_t planes,
const torch::Device& device,
int64_t stride = 1,
std::optional<torch::nn::Sequential> downsample_module_opt = std::nullopt,
int64_t expansion_factor_arg = 4);
// Forward method declaration
torch::Tensor forward(torch::Tensor x);
// Member layers (must be declared in the Impl struct)
torch::nn::Conv2d conv1{nullptr}, conv2{nullptr}, conv3{nullptr};
torch::nn::BatchNorm2d bn1{nullptr}, bn2{nullptr}, bn3{nullptr};
torch::nn::ReLU relu{nullptr};
torch::nn::Sequential projection_shortcut{nullptr};
int64_t expansion_factor; // Store the expansion factor
int64_t stride_member; // To avoid conflict with constructor param name
};
// This macro defines the 'Bottleneck' type based on 'BottleneckImpl'
// It effectively creates: using Bottleneck = torch::nn::ModuleHolder<BottleneckImpl>;
TORCH_MODULE(Bottleneck);
struct ResNetImpl : torch::nn::Module {
ResNetImpl(const std::string& base_weights_dir,
const std::vector<int64_t>& layers,
const std::vector<std::string>& output_layers,
const torch::Device& device);
std::map<std::string, torch::Tensor> forward(torch::Tensor x);
// Initial layers
torch::nn::Conv2d conv1{nullptr};
torch::nn::BatchNorm2d bn1{nullptr};
torch::nn::ReLU relu{nullptr};
torch::nn::MaxPool2d maxpool{nullptr};
// ResNet layers
torch::nn::Sequential layer1{nullptr};
torch::nn::Sequential layer2{nullptr};
torch::nn::Sequential layer3{nullptr};
torch::nn::Sequential layer4{nullptr}; // We'll build it, even if not always outputting
private:
torch::nn::Sequential _make_layer(int64_t planes, int64_t blocks,
const std::string& layer_param_prefix,
const torch::Device& device,
int64_t stride = 1);
int64_t inplanes = 64;
std::vector<std::string> _output_layers;
std::string _base_weights_dir; // Store base weights directory, e.g. ../exported_weights/raw_backbone
static const int expansion = 4; // Bottleneck expansion factor for ResNet layers
};
TORCH_MODULE(ResNet);
// Factory function for ResNet-50
ResNet resnet50(const std::string& base_weights_dir,
const std::vector<std::string>& output_layers,
const torch::Device& device);
} // namespace resnet
} // namespace cimp
#endif //CPP_TRACKER_RESNET_H

156
cmake-build-debug/CMakeCache.txt

@ -14,100 +14,82 @@
# EXTERNAL cache entries
########################
//No help, variable specified on the command line.
CMAKE_BUILD_TYPE:UNINITIALIZED=Debug
//Path to a program.
CMAKE_ADDR2LINE:FILEPATH=/usr/bin/addr2line
//Value Computed by CMake.
CMAKE_FIND_PACKAGE_REDIRECTS_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug/CMakeFiles/pkgRedirects
//Path to a program.
CMAKE_AR:FILEPATH=/usr/bin/ar
//No help, variable specified on the command line.
CMAKE_MAKE_PROGRAM:UNINITIALIZED=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/ninja/linux/x64/ninja
//Value Computed by CMake
CMAKE_PROJECT_DESCRIPTION:STATIC=
//Value Computed by CMake
CMAKE_PROJECT_HOMEPAGE_URL:STATIC=
//Value Computed by CMake
CMAKE_PROJECT_NAME:STATIC=cpp_tracker
CMAKE_BUILD_TYPE:UNINITIALIZED=Debug
//No help, variable specified on the command line.
CMAKE_TOOLCHAIN_FILE:UNINITIALIZED=/media/mht/ADATA/repos/cpp_tracker/vcpkg//scripts/buildsystems/vcpkg.cmake
//CUDA architectures
CMAKE_CUDA_ARCHITECTURES:STRING=
//Automatically copy dependencies into the output directory for
// executables.
VCPKG_APPLOCAL_DEPS:BOOL=ON
//CUDA compiler
CMAKE_CUDA_COMPILER:FILEPATH=CMAKE_CUDA_COMPILER-NOTFOUND
//Additional options to bootstrap vcpkg
VCPKG_BOOTSTRAP_OPTIONS:STRING=
//CXX compiler
CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/c++
//The directory which contains the installed libraries for each
// triplet
VCPKG_INSTALLED_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug/vcpkg_installed
//A wrapper around 'ar' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-11
//Additional install options to pass to vcpkg
VCPKG_INSTALL_OPTIONS:STRING=
//A wrapper around 'ranlib' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-11
//The path to the vcpkg manifest directory.
VCPKG_MANIFEST_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker
//Path to a program.
CMAKE_DLLTOOL:FILEPATH=CMAKE_DLLTOOL-NOTFOUND
//Install the dependencies listed in your manifest:
//\n If this is off, you will have to manually install your dependencies.
//\n See https://github.com/microsoft/vcpkg/tree/master/docs/specifications/manifests.md
// for more info.
//\n
VCPKG_MANIFEST_INSTALL:BOOL=ON
//Value Computed by CMake.
CMAKE_FIND_PACKAGE_REDIRECTS_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug/CMakeFiles/pkgRedirects
//Use manifest mode, as opposed to classic mode.
VCPKG_MANIFEST_MODE:BOOL=ON
//Path to a program.
CMAKE_LINKER:FILEPATH=/usr/bin/ld
//Overlay ports to use for vcpkg install in manifest mode
VCPKG_OVERLAY_PORTS:STRING=
//Path to a program.
CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/gmake
//Overlay triplets to use for vcpkg install in manifest mode
VCPKG_OVERLAY_TRIPLETS:STRING=
//Path to a program.
CMAKE_NM:FILEPATH=/usr/bin/nm
//Appends the vcpkg paths to CMAKE_PREFIX_PATH, CMAKE_LIBRARY_PATH
// and CMAKE_FIND_ROOT_PATH so that vcpkg libraries/packages are
// found after toolchain/system libraries/packages.
VCPKG_PREFER_SYSTEM_LIBS:BOOL=OFF
//Path to a program.
CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy
//Enable the setup of CMAKE_PROGRAM_PATH to vcpkg paths
VCPKG_SETUP_CMAKE_PROGRAM_PATH:BOOL=ON
//Path to a program.
CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump
//Vcpkg target triplet (ex. x86-windows)
VCPKG_TARGET_TRIPLET:STRING=x64-linux
//Value Computed by CMake
CMAKE_PROJECT_DESCRIPTION:STATIC=
//Trace calls to find_package()
VCPKG_TRACE_FIND_PACKAGE:BOOL=OFF
//Value Computed by CMake
CMAKE_PROJECT_HOMEPAGE_URL:STATIC=
//Enables messages from the VCPKG toolchain for debugging purposes.
VCPKG_VERBOSE:BOOL=OFF
//Value Computed by CMake
CMAKE_PROJECT_NAME:STATIC=ImageSimilarityTracker
//(experimental) Automatically copy dependencies into the install
// target directory for executables. Requires CMake 3.14.
X_VCPKG_APPLOCAL_DEPS_INSTALL:BOOL=OFF
//Path to a program.
CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib
//(experimental) Add USES_TERMINAL to VCPKG_APPLOCAL_DEPS to force
// serialization.
X_VCPKG_APPLOCAL_DEPS_SERIALIZED:BOOL=OFF
//Path to a program.
CMAKE_READELF:FILEPATH=/usr/bin/readelf
//Path to a program.
Z_VCPKG_CL:FILEPATH=Z_VCPKG_CL-NOTFOUND
CMAKE_STRIP:FILEPATH=/usr/bin/strip
//The directory which contains the installed libraries for each
// triplet
_VCPKG_INSTALLED_DIR:PATH=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug/vcpkg_installed
//Path to a program.
CMAKE_TAPI:FILEPATH=CMAKE_TAPI-NOTFOUND
//Value Computed by CMake
cimp_BINARY_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
ImageSimilarityTracker_BINARY_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
//Value Computed by CMake
cimp_IS_TOP_LEVEL:STATIC=ON
ImageSimilarityTracker_IS_TOP_LEVEL:STATIC=ON
//Value Computed by CMake
cimp_SOURCE_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker
ImageSimilarityTracker_SOURCE_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker
//Value Computed by CMake
cpp_tracker_BINARY_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
@ -123,6 +105,10 @@ cpp_tracker_SOURCE_DIR:STATIC=/media/mht/ADATA/repos/cpp_tracker
# INTERNAL cache entries
########################
//ADVANCED property for variable: CMAKE_ADDR2LINE
CMAKE_ADDR2LINE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_AR
CMAKE_AR-ADVANCED:INTERNAL=1
//This is the directory where this CMakeCache.txt was created
CMAKE_CACHEFILE_DIR:INTERNAL=/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
//Major version of cmake used to create the current loaded cache
@ -137,10 +123,22 @@ CMAKE_COMMAND:INTERNAL=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/c
CMAKE_CPACK_COMMAND:INTERNAL=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/bin/cpack
//Path to ctest program executable.
CMAKE_CTEST_COMMAND:INTERNAL=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/bin/ctest
//ADVANCED property for variable: CMAKE_CUDA_COMPILER
CMAKE_CUDA_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER
CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_AR
CMAKE_CXX_COMPILER_AR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_RANLIB
CMAKE_CXX_COMPILER_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_DLLTOOL
CMAKE_DLLTOOL-ADVANCED:INTERNAL=1
//Executable file format
CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF
//Name of external makefile project generator.
CMAKE_EXTRA_GENERATOR:INTERNAL=
//Name of generator.
CMAKE_GENERATOR:INTERNAL=Ninja
CMAKE_GENERATOR:INTERNAL=Unix Makefiles
//Generator instance identifier.
CMAKE_GENERATOR_INSTANCE:INTERNAL=
//Name of generator platform.
@ -150,20 +148,30 @@ CMAKE_GENERATOR_TOOLSET:INTERNAL=
//Source directory with the top level CMakeLists.txt file for this
// project
CMAKE_HOME_DIRECTORY:INTERNAL=/media/mht/ADATA/repos/cpp_tracker
//ADVANCED property for variable: CMAKE_LINKER
CMAKE_LINKER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MAKE_PROGRAM
CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_NM
CMAKE_NM-ADVANCED:INTERNAL=1
//number of local generators
CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=1
//ADVANCED property for variable: CMAKE_OBJCOPY
CMAKE_OBJCOPY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_OBJDUMP
CMAKE_OBJDUMP-ADVANCED:INTERNAL=1
//Platform information initialized
CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1
//ADVANCED property for variable: CMAKE_RANLIB
CMAKE_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_READELF
CMAKE_READELF-ADVANCED:INTERNAL=1
//Path to CMake installation.
CMAKE_ROOT:INTERNAL=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/share/cmake-3.30
//ADVANCED property for variable: CMAKE_TOOLCHAIN_FILE
CMAKE_TOOLCHAIN_FILE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STRIP
CMAKE_STRIP-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_TAPI
CMAKE_TAPI-ADVANCED:INTERNAL=1
//uname command
CMAKE_UNAME:INTERNAL=/usr/bin/uname
//ADVANCED property for variable: VCPKG_VERBOSE
VCPKG_VERBOSE-ADVANCED:INTERNAL=1
//Making sure VCPKG_MANIFEST_MODE doesn't change
Z_VCPKG_CHECK_MANIFEST_MODE:INTERNAL=ON
//Vcpkg root directory
Z_VCPKG_ROOT_DIR:INTERNAL=/media/mht/ADATA/repos/cpp_tracker/vcpkg

13
cmake-build-debug/CMakeFiles/clion-Debug-log.txt

@ -1,14 +1,9 @@
/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/bin/cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_MAKE_PROGRAM=/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/ninja/linux/x64/ninja -DCMAKE_TOOLCHAIN_FILE=/media/mht/ADATA/repos/cpp_tracker/vcpkg//scripts/buildsystems/vcpkg.cmake -G Ninja -S /media/mht/ADATA/repos/cpp_tracker -B /media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
-- Running vcpkg install
/media/mht/ADATA/repos/cpp_tracker/vcpkg/ports/torch: error: torch does not exist
-- Running vcpkg install - failed
CMake Error at vcpkg/scripts/buildsystems/vcpkg.cmake:938 (message):
vcpkg install failed. See logs for more information:
/media/mht/ADATA/repos/cpp_tracker/cmake-build-debug/vcpkg-manifest-install.log
/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/bin/cmake -DCMAKE_BUILD_TYPE=Debug -G "Unix Makefiles" -S /media/mht/ADATA/repos/cpp_tracker -B /media/mht/ADATA/repos/cpp_tracker/cmake-build-debug
CMake Error at /home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/share/cmake-3.30/Modules/Internal/CMakeCUDAArchitecturesValidate.cmake:7 (message):
CMAKE_CUDA_ARCHITECTURES must be non-empty if set.
Call Stack (most recent call first):
/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/share/cmake-3.30/Modules/CMakeDetermineSystem.cmake:146 (include)
/home/mht/.local/share/JetBrains/Toolbox/apps/clion/bin/cmake/linux/x64/share/cmake-3.30/Modules/CMakeDetermineCUDACompiler.cmake:112 (cmake_cuda_architectures_validate)
CMakeLists.txt:2 (project)
CMake Error: CMAKE_CXX_COMPILER not set, after EnableLanguage
-- Configuring incomplete, errors occurred!

BIN
cmake-build-debug/CMakeFiles/clion-environment.txt

178
export_resnet_individual_tensors.py

@ -0,0 +1,178 @@
import torch
import torchvision.models as models
import os
from collections import OrderedDict
def export_weights(model, output_dir, doc_filename):
"""
Exports model weights as individual .pt files and creates a documentation file.
Each tensor in the model's state_dict is saved.
Filename convention: replaces '.' in state_dict keys with '_', appends '.pt'.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"Created directory: {output_dir}")
doc_lines = ["# Auto-generated weights documentation\n"]
state_dict = model.state_dict()
print(f"Exporting {len(state_dict)} tensors to {output_dir}...")
for key, tensor_data in state_dict.items():
# Use underscore naming convention for filename, matching DiMPTorchScriptWrapper expectations
file_name = key.replace('.', '_') + '.pt'
file_path = os.path.join(output_dir, file_name)
# Save the tensor
torch.save(tensor_data.clone().detach().cpu(), file_path)
# Add entry to documentation file
doc_lines.append(f"## {key}\n")
doc_lines.append(f"Shape: {list(tensor_data.shape)}\n")
doc_lines.append(f"Dtype: {tensor_data.dtype}\n")
doc_lines.append(f"File: {file_name}\n\n")
if (len(doc_lines) % 50) == 0: # Print progress periodically
print(f" Processed {len(doc_lines)//4} tensors...")
doc_file_path = os.path.join(output_dir, doc_filename)
with open(doc_file_path, 'w') as f:
f.writelines(doc_lines)
print(f"Successfully exported {len(state_dict)} tensors.")
print(f"Documentation file created: {doc_file_path}")
if __name__ == "__main__":
# --- Configuration ---
# For ResNet-50, the original DiMP seems to use a ResNet variant that might not
# exactly match torchvision's default ResNet-50 in terms of all parameter names
# or structure, especially if it was modified for specific feature extraction.
# The ltr.models.backbone.resnet.resnet50 is the one used by DiMPTorchScriptWrapper.
# We need to ensure the keys from this model are used for saving, so that
# DiMPTorchScriptWrapper can load them correctly.
print("Loading reference ResNet-50 model structure (ltr.models.backbone.resnet)...")
# This import assumes your project structure allows this script to find ltr
# You might need to adjust sys.path if this script is placed outside the main project tree
# For example, if cpp_tracker is the root:
import sys
# Assuming this script is in cpp_tracker/ or cpp_tracker/test/
# Adjust based on actual location if needed.
# If script is in cpp_tracker root, this is fine.
# If in cpp_tracker/test/, then '../' to get to cpp_tracker/
project_root = os.path.dirname(os.path.abspath(__file__)) # If in root
# project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # If in a subdir like /test
# sys.path.insert(0, project_root) # Add project root to allow ltr import
# Let's assume the script is in the root for now, or that ltr is in PYTHONPATH
try:
from ltr.models.backbone.resnet import resnet50 as ltr_resnet50
except ImportError as e:
print(f"Error importing ltr.models.backbone.resnet: {e}")
print("Please ensure that the 'ltr' module is in your PYTHONPATH, or adjust sys.path in this script.")
print("You might need to run this script from the root of the cpp_tracker workspace or ensure correct setup.")
sys.exit(1)
# 1. Create an instance of the LTR ResNet-50 to get the correct parameter names and structure.
# This model will define the *target* state_dict keys we want to save.
print("Instantiating LTR ResNet-50 (for structure and param names)...")
# Output_layers doesn't strictly matter here as we only need its state_dict keys,
# but use a common setting.
ltr_model = ltr_resnet50(output_layers=['layer1','layer2','layer3','layer4'], pretrained=False)
ltr_model_state_dict_keys = ltr_model.state_dict().keys()
print(f"LTR ResNet-50 instantiated. It has {len(ltr_model_state_dict_keys)} parameters/buffers.")
# 2. Load the actual pretrained weights from torchvision.
print("Loading pretrained ResNet-50 weights from torchvision...")
torchvision_model = models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1)
torchvision_state_dict = torchvision_model.state_dict()
print("Torchvision ResNet-50 pretrained weights loaded.")
# 3. Create a new state_dict that only contains keys present in *both*
# the LTR model and the torchvision model, using torchvision's weights.
# This handles potential mismatches like torchvision having a full 'fc' layer
# that the LTR ResNet variant might not use or name identically.
# Also, ltr.models.backbone.resnet.py applies its own normalization to conv layers
# and fills batchnorm weights/biases. The torchvision pretrained=True model already has these.
# So, we directly use the torchvision weights for matching keys.
aligned_state_dict = OrderedDict()
copied_keys = 0
torchvision_only_keys = []
ltr_only_keys_not_in_torchvision = []
for key in ltr_model_state_dict_keys:
if key in torchvision_state_dict:
if ltr_model.state_dict()[key].shape == torchvision_state_dict[key].shape:
aligned_state_dict[key] = torchvision_state_dict[key].clone()
copied_keys += 1
else:
print(f" Shape mismatch for key '{key}': LTR shape {ltr_model.state_dict()[key].shape}, Torchvision shape {torchvision_state_dict[key].shape}. Skipping.")
ltr_only_keys_not_in_torchvision.append(key + " (shape mismatch)")
else:
# If a key from LTR model is not in torchvision, it might be an architectural difference
# or a buffer that torchvision doesn't save in its state_dict explicitly (e.g. num_batches_tracked for BN).
# The LTR model initializes these, so we can take them from the un-trained ltr_model instance.
# This is important for BN running_mean, running_var, and num_batches_tracked if not in torchvision sd.
print(f" Key '{key}' in LTR model but not in Torchvision state_dict. Using LTR model's initial value for this key.")
aligned_state_dict[key] = ltr_model.state_dict()[key].clone() # Use the initial value from ltr_model
ltr_only_keys_not_in_torchvision.append(key + " (taken from LTR init)")
copied_keys +=1 # Counting this as copied for completeness
print(f"Matched and copied {copied_keys} Tensors from Torchvision to LTR structure.")
if ltr_only_keys_not_in_torchvision:
print(f" Keys in LTR model structure but not found in Torchvision pretrained state_dict (or shape mismatch): {len(ltr_only_keys_not_in_torchvision)}")
for k_info in ltr_only_keys_not_in_torchvision[:10]: # Print first 10
print(f" - {k_info}")
if len(ltr_only_keys_not_in_torchvision) > 10: print(" ...")
for key in torchvision_state_dict.keys():
if key not in ltr_model_state_dict_keys:
torchvision_only_keys.append(key)
if torchvision_only_keys:
print(f" Keys in Torchvision pretrained state_dict but not in LTR model structure: {len(torchvision_only_keys)}")
for k in torchvision_only_keys[:10]: # Print first 10
print(f" - {k}")
if len(torchvision_only_keys) > 10: print(" ...")
# 4. Populate the LTR model instance with these aligned weights.
# This isn't strictly necessary for saving, but it's a good check.
print("Loading aligned state_dict into LTR model instance...")
missing_keys, unexpected_keys = ltr_model.load_state_dict(aligned_state_dict, strict=False) # Use strict=False due to potential key differences
if missing_keys:
print(f" Warning: Missing keys when loading aligned_state_dict into LTR model: {missing_keys}")
if unexpected_keys:
print(f" Warning: Unexpected keys when loading aligned_state_dict into LTR model: {unexpected_keys}")
if not missing_keys and not unexpected_keys:
print(" Successfully loaded aligned state_dict into LTR model instance.")
# 5. Now, use this populated ltr_model (or rather, its aligned_state_dict which has the correct keys and torchvision weights)
# for the export_weights function. The export_weights function expects a model, but we can
# give it an object that has a .state_dict() method returning our aligned_state_dict.
class TempModelWrapper:
def __init__(self, state_dict_to_serve):
self._state_dict = state_dict_to_serve
def state_dict(self):
return self._state_dict
model_to_export_from = TempModelWrapper(aligned_state_dict)
output_directory = "exported_weights/backbone_regenerated"
doc_file = "backbone_regenerated_weights_doc.txt"
print(f"\nStarting export process to '{output_directory}'...")
export_weights(model_to_export_from, output_directory, doc_file)
print("\nScript finished.")
print(f"Please check the '{output_directory}' for the .pt files and '{doc_file}'.")
print("Next steps:")
print("1. Update DiMPTorchScriptWrapper in pytracking/features/net_wrappers.py to use this new directory and doc file for ResNet.")
print("2. Update C++ ResNet loading in cimp/resnet/resnet.cpp (and test_models.cpp) to use this new directory.")
print("3. Re-run all tests (build.sh, then test/run_tests.sh).")

205
export_resnet_raw.py

@ -0,0 +1,205 @@
import torch
import torch.nn as nn
import os
import argparse
import sys
from collections import OrderedDict
# Add ltr to path to import ResNet
workspace_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))
print(f"Workspace root: {workspace_root}")
ltr_path = os.path.join(workspace_root, 'ltr')
if not os.path.isdir(ltr_path):
print(f"Error: 'ltr' directory not found in {workspace_root}")
sys.exit(1)
sys.path.insert(0, workspace_root)
try:
# We might not strictly need resnet50 from ltr if loading a JIT model,
# but good to have for potential type checking or structure reference.
from ltr.models.backbone.resnet import resnet50
print("Successfully imported ResNet from ltr.models.backbone.resnet")
except ImportError as e:
print(f"Warning: Could not import ResNet from ltr.models.backbone.resnet: {e}")
# Depending on the JIT model, this might not be fatal.
class TensorContainer(nn.Module):
def __init__(self, tensor_to_wrap, tensor_name="tensor"):
super().__init__()
# Can't use register_buffer or register_parameter as these expect string keys
# that are valid python identifiers. setattr works for general attributes.
setattr(self, tensor_name, tensor_to_wrap)
def convert_param_name_to_filename(param_name):
"""Converts a PyTorch parameter name (e.g., layer1.0.conv1.weight)
to the underscore-separated filename convention (e.g., layer1_0_conv1_weight.pt).
"""
return param_name.replace('.', '_') + '.pt'
def load_weights_from_individual_files(model_to_populate, source_dir):
print(f"Attempting to load weights from individual files in: {source_dir} using underscore naming convention.")
new_state_dict = OrderedDict()
loaded_count = 0
missed_params = []
missed_buffers = []
# Parameters
for name, param_tensor_template in model_to_populate.named_parameters():
expected_filename = convert_param_name_to_filename(name)
filepath = os.path.join(source_dir, expected_filename)
if os.path.exists(filepath):
try:
# print(f" Loading parameter: {name} from {filepath}")
tensor_data = torch.load(filepath, map_location=torch.device('cpu'))
if isinstance(tensor_data, torch.jit.ScriptModule):
# If it's a JIT module (e.g. from previous save attempts or other sources)
# try to extract the tensor, assuming it was wrapped with a known key like 'tensor'
# or if it's a module with a single parameter/buffer.
try:
tensor_data = tensor_data.attr("tensor").toTensor()
print(f" INFO: Extracted tensor via .attr('tensor') from ScriptModule: {filepath}")
except RuntimeError:
params = list(tensor_data.parameters())
buffers = list(tensor_data.buffers())
if len(params) == 1:
tensor_data = params[0]
print(f" INFO: Extracted tensor from single parameter of ScriptModule: {filepath}")
elif len(buffers) == 1 and not params:
tensor_data = buffers[0]
print(f" INFO: Extracted tensor from single buffer of ScriptModule: {filepath}")
else:
raise ValueError(f"ScriptModule at {filepath} doesn't have .attr('tensor') and not single param/buffer.")
if not isinstance(tensor_data, torch.Tensor):
raise TypeError(f"Loaded data from {filepath} is not a tensor (type: {type(tensor_data)})")
if tensor_data.shape != param_tensor_template.data.shape:
print(f" WARNING: Shape mismatch for param {name}. Expected {param_tensor_template.data.shape}, got {tensor_data.shape} from {filepath}. Skipping.")
missed_params.append(name)
continue
new_state_dict[name] = tensor_data
loaded_count += 1
except Exception as e:
print(f" ERROR loading or processing {filepath} for param {name}: {e}. Skipping.")
missed_params.append(name)
else:
# print(f" File not found for parameter {name}: {filepath}. Will be missed.")
missed_params.append(name)
# Buffers
for name, buffer_tensor_template in model_to_populate.named_buffers():
expected_filename = convert_param_name_to_filename(name)
filepath = os.path.join(source_dir, expected_filename)
if os.path.exists(filepath):
try:
# print(f" Loading buffer: {name} from {filepath}")
tensor_data = torch.load(filepath, map_location=torch.device('cpu'))
if isinstance(tensor_data, torch.jit.ScriptModule):
try:
tensor_data = tensor_data.attr("tensor").toTensor()
print(f" INFO: Extracted tensor via .attr('tensor') from ScriptModule: {filepath}")
except RuntimeError:
params = list(tensor_data.parameters())
buffers = list(tensor_data.buffers())
if len(buffers) == 1 and not params:
tensor_data = buffers[0]
print(f" INFO: Extracted tensor from single buffer of ScriptModule: {filepath}")
elif len(params) == 1 and not buffers:
tensor_data = params[0]
print(f" INFO: Extracted tensor from single param of ScriptModule: {filepath}")
else:
raise ValueError(f"ScriptModule at {filepath} doesn't have .attr('tensor') and not single param/buffer.")
if not isinstance(tensor_data, torch.Tensor):
raise TypeError(f"Loaded data from {filepath} is not a tensor (type: {type(tensor_data)})")
if tensor_data.shape != buffer_tensor_template.data.shape:
print(f" WARNING: Shape mismatch for buffer {name}. Expected {buffer_tensor_template.data.shape}, got {tensor_data.shape} from {filepath}. Skipping.")
missed_buffers.append(name)
continue
new_state_dict[name] = tensor_data
loaded_count += 1
except Exception as e:
print(f" ERROR loading or processing {filepath} for buffer {name}: {e}. Skipping.")
missed_buffers.append(name)
else:
# print(f" File not found for buffer {name}: {filepath}. Will be missed.")
missed_buffers.append(name)
if loaded_count > 0:
print(f"Attempting to load {loaded_count} found tensors into model state_dict.")
result = model_to_populate.load_state_dict(new_state_dict, strict=False)
print("State_dict loading result:")
if result.missing_keys:
print(f" Strict load missing_keys ({len(result.missing_keys)}): {result.missing_keys[:20]}...") # Print first 20
if result.unexpected_keys:
print(f" Strict load unexpected_keys ({len(result.unexpected_keys)}): {result.unexpected_keys[:20]}...")
# Cross check with our own missed lists
print(f"Manually tracked missed parameters ({len(missed_params)}): {missed_params[:20]}...")
print(f"Manually tracked missed buffers ({len(missed_buffers)}): {missed_buffers[:20]}...")
# Check if all expected params/buffers in the model were loaded
all_model_keys = set(model_to_populate.state_dict().keys())
loaded_keys = set(new_state_dict.keys())
truly_missing_from_model = all_model_keys - loaded_keys
if truly_missing_from_model:
print(f"CRITICAL: Keys in model NOT found in source_dir ({len(truly_missing_from_model)}): {list(truly_missing_from_model)[:20]}...")
if not truly_missing_from_model and not result.unexpected_keys :
print("Successfully loaded weights from individual files into the model.")
else:
print("WARNING: Some weights might be missing or unexpected after loading from individual files.")
if not loaded_keys: # if we loaded nothing
print("ERROR: No weights were successfully loaded from individual files. Aborting.")
sys.exit(1)
else:
print("ERROR: No weights were found or loaded from individual files. Aborting.")
sys.exit(1)
def export_jit_wrapped_tensors(model, output_dir):
TENSOR_KEY_IN_CONTAINER = "tensor" # The key used in TensorContainer and for C++ loading
if not os.path.exists(output_dir):
os.makedirs(output_dir); print(f"Created output directory: {output_dir}")
for name, param in model.named_parameters():
filename = name + '.pt'
filepath = os.path.join(output_dir, filename)
print(f"Exporting JIT-wrapped parameter: {name} (as {filename}) to {filepath} with shape {param.data.shape}")
container = TensorContainer(param.data.clone().detach().cpu(), TENSOR_KEY_IN_CONTAINER)
scripted_container = torch.jit.script(container)
torch.jit.save(scripted_container, filepath)
for name, buf in model.named_buffers():
filename = name + '.pt'
filepath = os.path.join(output_dir, filename)
print(f"Exporting JIT-wrapped buffer: {name} (as {filename}) to {filepath} with shape {buf.data.shape}")
container = TensorContainer(buf.data.clone().detach().cpu(), TENSOR_KEY_IN_CONTAINER)
scripted_container = torch.jit.script(container)
torch.jit.save(scripted_container, filepath)
print(f"All params/buffers exported as JIT-wrapped tensors to {output_dir} (using dot naming, key '{TENSOR_KEY_IN_CONTAINER}').")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load ResNet-50 weights from a directory of individual underscore_named .pt files, then re-export them as JIT-wrapped (TensorContainer) dot_named .pt files for C++ loading.")
parser.add_argument('--source_individual_weights_dir', type=str, required=True,
help="Directory containing the source underscore_named .pt files (e.g., 'exported_weights/backbone/').")
parser.add_argument('--output_jit_wrapped_tensors_dir', type=str, required=True,
help="Directory to save the re-exported JIT-wrapped dot_named .pt files (e.g., 'exported_weights/raw_backbone/').")
args = parser.parse_args()
print("Instantiating a new ResNet-50 model (will be populated from source dir)...")
model = resnet50(output_layers=['layer4'], pretrained=False)
print("ResNet-50 model instantiated.")
load_weights_from_individual_files(model, args.source_individual_weights_dir)
export_jit_wrapped_tensors(model, args.output_jit_wrapped_tensors_dir)
print("Process complete. Weights loaded from source (underscore_named) and re-exported as JIT-wrapped tensors (dot_named).")

BIN
exported_weights/backbone_regenerated/bn1_bias.pt

BIN
exported_weights/backbone_regenerated/bn1_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/bn1_running_mean.pt

BIN
exported_weights/backbone_regenerated/bn1_running_var.pt

BIN
exported_weights/backbone_regenerated/bn1_weight.pt

BIN
exported_weights/backbone_regenerated/conv1_weight.pt

BIN
exported_weights/backbone_regenerated/fc_bias.pt

BIN
exported_weights/backbone_regenerated/fc_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn1_bias.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn1_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn1_running_mean.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn1_running_var.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn1_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn2_bias.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn2_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn2_running_mean.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn2_running_var.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn2_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn3_bias.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn3_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn3_running_mean.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn3_running_var.pt

BIN
exported_weights/backbone_regenerated/layer1_0_bn3_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_conv1_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_conv2_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_conv3_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_0_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_1_bias.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_1_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_1_running_mean.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_1_running_var.pt

BIN
exported_weights/backbone_regenerated/layer1_0_downsample_1_weight.pt

BIN
exported_weights/backbone_regenerated/layer1_1_bn1_bias.pt

BIN
exported_weights/backbone_regenerated/layer1_1_bn1_num_batches_tracked.pt

BIN
exported_weights/backbone_regenerated/layer1_1_bn1_running_mean.pt

BIN
exported_weights/backbone_regenerated/layer1_1_bn1_running_var.pt

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save