You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

856 lines
38 KiB

#include "bb_regressor.h"
#include <iostream>
#include <fstream>
#include <torch/script.h>
#include <torch/serialize.h>
#include <vector>
#include <stdexcept>
// Add CUDA includes for required CUDA implementation
#include <cuda_runtime.h>
#include <ATen/cuda/CUDAContext.h>
#include <sys/stat.h>
#include <sys/types.h>
// Use the PrRoIPooling implementation
#include "prroi_pooling_gpu.h"
#include "prroi_pooling_gpu_impl.cuh"
#include "utils.h"
// PrRoIPool2D implementation (requires CUDA)
PrRoIPool2D::PrRoIPool2D(int pooled_height, int pooled_width, float spatial_scale)
: pooled_height_(pooled_height), pooled_width_(pooled_width), spatial_scale_(spatial_scale) {}
torch::Tensor PrRoIPool2D::forward(torch::Tensor feat, torch::Tensor rois) {
// Print shape info for debugging
std::cout << " PrRoIPool2D inputs: " << std::endl;
std::cout << " Features: [" << feat.size(0) << ", " << feat.size(1) << ", "
<< feat.size(2) << ", " << feat.size(3) << "]" << std::endl;
std::cout << " ROIs: [" << rois.size(0) << ", " << rois.size(1) << "]" << std::endl;
std::cout << " Pooled size: [" << pooled_height_ << ", " << pooled_width_ << "]" << std::endl;
std::cout << " Spatial scale: " << spatial_scale_ << std::endl;
// Calculate output shape
int channels = feat.size(1);
int num_rois = rois.size(0);
// Ensure both tensors are on CUDA
if (!feat.is_cuda() || !rois.is_cuda()) {
throw std::runtime_error("PrRoIPool2D requires CUDA tensors - CPU mode is not supported");
}
feat = feat.contiguous(); // Ensure contiguous
rois = rois.contiguous(); // Ensure contiguous
// Create output tensor on the same device (CUDA)
auto output = torch::zeros({num_rois, channels, pooled_height_, pooled_width_},
feat.options()); // feat.options() will be CUDA
// DO NOT Copy tensors to CPU. Pass GPU pointers directly.
// auto feat_cpu = feat.to(torch::kCPU).contiguous();
// auto rois_cpu = rois.to(torch::kCPU).contiguous();
// auto output_cpu = output.to(torch::kCPU).contiguous();
// Call the C wrapper function with GPU data pointers
std::cout << " Calling prroi_pooling_forward_cuda with GPU data..." << std::endl;
prroi_pooling_forward_cuda(
feat.data_ptr<float>(),
rois.data_ptr<float>(), // Assuming rois is already float, otherwise needs care
output.data_ptr<float>(),
channels,
feat.size(2),
feat.size(3),
num_rois,
pooled_height_,
pooled_width_,
spatial_scale_
);
std::cout << " prroi_pooling_forward_cuda completed" << std::endl;
// No need to copy result back to GPU, output is already on GPU and was modified in-place.
// output.copy_(output_cpu);
return output;
}
// LinearBlock implementation
LinearBlock::LinearBlock(int in_planes, int out_planes, int input_sz, bool bias, bool batch_norm, bool relu) {
// Create the linear layer with proper input dimensions
auto linear_options = torch::nn::LinearOptions(in_planes * input_sz * input_sz, out_planes).bias(bias);
linear = register_module("linear", torch::nn::Linear(linear_options));
use_bn = batch_norm;
if (use_bn) {
// Use BatchNorm1d
bn = register_module("bn", torch::nn::BatchNorm1d(torch::nn::BatchNorm1dOptions(out_planes)));
}
use_relu = relu;
if (use_relu) {
relu_ = register_module("relu", torch::nn::ReLU(torch::nn::ReLUOptions().inplace(true)));
}
}
torch::Tensor LinearBlock::forward(torch::Tensor x) {
// Reshape input for linear layer: x.reshape(x.shape[0], -1)
x = x.reshape({x.size(0), -1});
x = linear->forward(x);
if (use_bn) {
// BatchNorm1d expects input of (N, C) or (N, C, L). Here x is (N, C).
x = bn->forward(x);
}
if (use_relu) {
x = relu_->forward(x);
}
// Ensure output is 2D (batch_size, features)
// This might be redundant if x is already in the correct shape after relu/bn.
x = x.reshape({x.size(0), -1});
return x;
}
// Create convolutional block
torch::nn::Sequential BBRegressor::create_conv_block(int in_planes, int out_planes,
int kernel_size, int stride,
int padding, int dilation) {
// Print dimensions for debugging
std::cout << "Creating conv block: in_planes=" << in_planes << ", out_planes=" << out_planes << std::endl;
torch::nn::Sequential seq;
// Add convolutional layer
seq->push_back(torch::nn::Conv2d(torch::nn::Conv2dOptions(in_planes, out_planes, kernel_size)
.stride(stride).padding(padding).dilation(dilation).bias(true)));
// Add batch normalization layer
seq->push_back(torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(out_planes)));
// Add ReLU activation
seq->push_back(torch::nn::ReLU(torch::nn::ReLUOptions().inplace(true)));
return seq;
}
// Helper function to verify BatchNorm dimensions
void BBRegressor::verify_batchnorm_dimensions() {
std::cout << "Verifying BatchNorm dimensions..." << std::endl;
// Get children of conv3_1r
std::cout << "conv3_1r has " << conv3_1r->size() << " modules" << std::endl;
if (conv3_1r->size() > 1) {
auto module = conv3_1r[1];
std::cout << "conv3_1r module[1] type: " << module->name() << std::endl;
}
// Get children of conv3_1t
std::cout << "conv3_1t has " << conv3_1t->size() << " modules" << std::endl;
if (conv3_1t->size() > 1) {
auto module = conv3_1t[1];
std::cout << "conv3_1t module[1] type: " << module->name() << std::endl;
}
// Get children of conv3_2t
std::cout << "conv3_2t has " << conv3_2t->size() << " modules" << std::endl;
if (conv3_2t->size() > 1) {
auto module = conv3_2t[1];
std::cout << "conv3_2t module[1] type: " << module->name() << std::endl;
}
}
// Helper function to read file to bytes
std::vector<char> BBRegressor::read_file_to_bytes(const std::string& file_path) {
std::ifstream file(file_path, std::ios::binary | std::ios::ate);
if (!file.is_open()) {
throw std::runtime_error("Could not open file: " + file_path);
}
std::streamsize size = file.tellg();
file.seekg(0, std::ios::beg);
std::vector<char> buffer(size);
if (!file.read(buffer.data(), size)) {
throw std::runtime_error("Could not read file: " + file_path);
}
return buffer;
}
// Load tensor from file
torch::Tensor BBRegressor::load_tensor(const std::string& file_path) {
try {
// Read file into bytes first
std::vector<char> data = read_file_to_bytes(file_path);
// Use pickle_load with byte data
torch::Tensor tensor = torch::pickle_load(data).toTensor();
// Always move tensor to the specified device
return tensor.to(device);
} catch (const c10::Error& e) {
std::cerr << "Error loading tensor from " << file_path << ": " << e.what() << std::endl;
throw;
}
}
// Constructor
BBRegressor::BBRegressor(const std::string& model_weights_dir, torch::Device dev)
: device(dev), model_dir(model_weights_dir),
fc3_rt(256, 256, 5, true, true, true),
fc4_rt(256, 256, 3, true, true, true) {
// Check if model directory exists
if (!fs::exists(model_dir)) {
throw std::runtime_error("Model directory does not exist: " + model_dir);
}
// Initialize convolution blocks - match Python's AtomIoUNet implementation exactly
std::cout << "Initializing conv blocks..." << std::endl;
// In Python: self.conv3_1r = conv(input_dim[0], 128, kernel_size=3, stride=1)
conv3_1r = create_conv_block(512, 128, 3, 1, 1, 1);
// In Python: self.conv3_1t = conv(input_dim[0], 256, kernel_size=3, stride=1)
conv3_1t = create_conv_block(512, 256, 3, 1, 1, 1);
// In Python: self.conv3_2t = conv(256, pred_input_dim[0], kernel_size=3, stride=1)
conv3_2t = create_conv_block(256, 256, 3, 1, 1, 1);
// Update pooling sizes to match the Python model exactly
// In Python: self.prroi_pool3r = PrRoIPool2D(3, 3, 1/8)
prroi_pool3r = std::make_shared<PrRoIPool2D>(3, 3, 0.125); // 1/8 scale for layer2
// In Python: self.prroi_pool3t = PrRoIPool2D(5, 5, 1/8)
prroi_pool3t = std::make_shared<PrRoIPool2D>(5, 5, 0.125); // 1/8 scale for layer2
// Create sequential blocks
// In Python: self.fc3_1r = conv(128, 256, kernel_size=3, stride=1, padding=0)
fc3_1r = create_conv_block(128, 256, 3, 1, 0, 1); // padding=0 for this layer
// In Python: self.conv4_1r = conv(input_dim[1], 256, kernel_size=3, stride=1)
conv4_1r = create_conv_block(1024, 256, 3, 1, 1, 1);
// In Python: self.conv4_1t = conv(input_dim[1], 256, kernel_size=3, stride=1)
conv4_1t = create_conv_block(1024, 256, 3, 1, 1, 1);
// In Python: self.conv4_2t = conv(256, pred_input_dim[1], kernel_size=3, stride=1)
conv4_2t = create_conv_block(256, 256, 3, 1, 1, 1);
// In Python: self.prroi_pool4r = PrRoIPool2D(1, 1, 1/16)
prroi_pool4r = std::make_shared<PrRoIPool2D>(1, 1, 0.0625); // 1/16 scale for layer3
// In Python: self.prroi_pool4t = PrRoIPool2D(3, 3, 1/16)
prroi_pool4t = std::make_shared<PrRoIPool2D>(3, 3, 0.0625); // 1/16 scale for layer3
// In Python: self.fc34_3r = conv(256 + 256, pred_input_dim[0], kernel_size=1, stride=1, padding=0)
fc34_3r = create_conv_block(512, 256, 1, 1, 0, 1); // kernel_size=1, padding=0
// In Python: self.fc34_4r = conv(256 + 256, pred_input_dim[1], kernel_size=1, stride=1, padding=0)
fc34_4r = create_conv_block(512, 256, 1, 1, 0, 1); // kernel_size=1, padding=0
// Linear blocks - exactly match Python's implementation dimensions and parameters
// In Python: self.fc3_rt = LinearBlock(pred_input_dim[0], pred_inter_dim[0], 5)
fc3_rt = LinearBlock(256, 256, 5, true, true, true);
// In Python: self.fc4_rt = LinearBlock(pred_input_dim[1], pred_inter_dim[1], 3)
fc4_rt = LinearBlock(256, 256, 3, true, true, true);
// In Python: self.iou_predictor = nn.Linear(pred_inter_dim[0]+pred_inter_dim[1], 1, bias=True)
iou_predictor = torch::nn::Linear(torch::nn::LinearOptions(256 + 256, 1).bias(true));
// Load all weights
load_weights();
// Set the model to evaluation mode
this->eval();
// Move the model to the specified device
this->to(device);
// Debug information
std::cout << "BB Regressor initialized in evaluation mode" << std::endl;
}
// Set the model to evaluation mode
void BBRegressor::eval() {
// Set all sequential modules to eval mode
conv3_1r->eval();
conv3_1t->eval();
conv3_2t->eval();
fc3_1r->eval();
conv4_1r->eval();
conv4_1t->eval();
conv4_2t->eval();
fc34_3r->eval();
fc34_4r->eval();
// Linear blocks also need to be set to eval mode for BatchNorm layers
fc3_rt.eval();
fc4_rt.eval();
// Set linear layers to eval mode (though this usually doesn't have any effect)
iou_predictor->eval();
}
// Load weights
void BBRegressor::load_weights() {
// Helper lambda to load weights for a sequential module
auto load_sequential_weights = [this](torch::nn::Sequential& seq, const std::string& prefix) {
try {
// Load weights for conv layer (index 0)
std::string weight_path = model_dir + "/" + prefix + "_0_weight.pt";
std::string bias_path = model_dir + "/" + prefix + "_0_bias.pt";
if (fs::exists(weight_path) && fs::exists(bias_path)) {
auto conv_weight = load_tensor(weight_path);
auto conv_bias = load_tensor(bias_path);
// Get the conv2d module from sequential
// Fix: Get the number of output channels from the weight tensor
int out_channels = conv_weight.size(0);
int in_channels = conv_weight.size(1);
int kernel_size = conv_weight.size(2);
std::cout << "Loading " << prefix << " conv weights: "
<< "[out_ch=" << out_channels
<< ", in_ch=" << in_channels
<< ", kernel=" << kernel_size << "]" << std::endl;
// FIXED: Use the correct padding based on the layer name
int padding = 1; // Default padding
// Special cases for layers with different padding
if (prefix == "fc3_1r" || prefix == "fc34_3r" || prefix == "fc34_4r") {
padding = 0; // These layers use padding=0 in the Python implementation
}
std::cout << " Using padding=" << padding << " for " << prefix << std::endl;
auto conv_options = torch::nn::Conv2dOptions(in_channels, out_channels, kernel_size)
.stride(1).padding(padding).bias(true);
auto conv_module = torch::nn::Conv2d(conv_options);
// Set weights and bias
conv_module->weight = conv_weight;
conv_module->bias = conv_bias;
// Debug info - print some weight stats
std::cout << " Conv weight stats: mean=" << conv_weight.mean().item<float>()
<< ", std=" << conv_weight.std().item<float>()
<< ", min=" << conv_weight.min().item<float>()
<< ", max=" << conv_weight.max().item<float>() << std::endl;
// Create a new sequence with the proper conv module
auto new_seq = torch::nn::Sequential();
new_seq->push_back(conv_module);
// Load batch norm parameters (index 1)
std::string bn_weight_path = model_dir + "/" + prefix + "_1_weight.pt";
std::string bn_bias_path = model_dir + "/" + prefix + "_1_bias.pt";
std::string bn_mean_path = model_dir + "/" + prefix + "_1_running_mean.pt";
std::string bn_var_path = model_dir + "/" + prefix + "_1_running_var.pt";
if (fs::exists(bn_weight_path) && fs::exists(bn_bias_path) &&
fs::exists(bn_mean_path) && fs::exists(bn_var_path)) {
auto bn_weight = load_tensor(bn_weight_path);
auto bn_bias = load_tensor(bn_bias_path);
auto bn_mean = load_tensor(bn_mean_path);
auto bn_var = load_tensor(bn_var_path);
// Important: Create BatchNorm with the correct number of features from the weights
int num_features = bn_weight.size(0);
std::cout << " Creating BatchNorm2d with num_features=" << num_features << std::endl;
// Create a proper batch norm module with the right number of features
auto bn_options = torch::nn::BatchNorm2dOptions(num_features)
.eps(1e-5) // Match Python default
.momentum(0.1) // Match Python default
.affine(true)
.track_running_stats(true);
auto bn_module = torch::nn::BatchNorm2d(bn_options);
// Set batch norm parameters
bn_module->weight = bn_weight;
bn_module->bias = bn_bias;
bn_module->running_mean = bn_mean;
bn_module->running_var = bn_var;
// Debug info - print some batch norm stats
std::cout << " BN weight stats: mean=" << bn_weight.mean().item<float>()
<< ", std=" << bn_weight.std().item<float>() << std::endl;
std::cout << " BN running_mean stats: mean=" << bn_mean.mean().item<float>()
<< ", std=" << bn_mean.std().item<float>() << std::endl;
std::cout << " BN running_var stats: mean=" << bn_var.mean().item<float>()
<< ", std=" << bn_var.std().item<float>() << std::endl;
// Add the batch norm module to the sequence
new_seq->push_back(bn_module);
}
// Add the ReLU module with inplace=true to match Python
auto relu_options = torch::nn::ReLUOptions().inplace(true);
new_seq->push_back(torch::nn::ReLU(relu_options));
// Replace the old sequence with the new one
seq = new_seq;
std::cout << "Loaded weights for " << prefix << std::endl;
} else {
std::cerr << "Weight files not found for " << prefix << std::endl;
}
} catch (const std::exception& e) {
std::cerr << "Error loading weights for " << prefix << ": " << e.what() << std::endl;
throw; // Re-throw to stop execution
}
};
// Load weights for linear blocks
auto load_linear_block_weights = [this](LinearBlock& block, const std::string& prefix) {
try {
// Load weights for linear layer
std::string weight_path = model_dir + "/" + prefix + "_linear_weight.pt";
std::string bias_path = model_dir + "/" + prefix + "_linear_bias.pt";
if (fs::exists(weight_path) && fs::exists(bias_path)) {
auto linear_weight = load_tensor(weight_path);
auto linear_bias = load_tensor(bias_path);
// Set weights and bias
block.linear->weight = linear_weight;
block.linear->bias = linear_bias;
// Load batch norm parameters
std::string bn_weight_path = model_dir + "/" + prefix + "_bn_weight.pt";
std::string bn_bias_path = model_dir + "/" + prefix + "_bn_bias.pt";
std::string bn_mean_path = model_dir + "/" + prefix + "_bn_running_mean.pt";
std::string bn_var_path = model_dir + "/" + prefix + "_bn_running_var.pt";
if (fs::exists(bn_weight_path) && fs::exists(bn_bias_path) &&
fs::exists(bn_mean_path) && fs::exists(bn_var_path)) {
auto bn_weight = load_tensor(bn_weight_path);
auto bn_bias = load_tensor(bn_bias_path);
auto bn_mean = load_tensor(bn_mean_path);
auto bn_var = load_tensor(bn_var_path);
// Set batch norm parameters
block.bn->weight = bn_weight;
block.bn->bias = bn_bias;
block.bn->running_mean = bn_mean;
block.bn->running_var = bn_var;
}
std::cout << "Loaded weights for " << prefix << std::endl;
} else {
std::cerr << "Weight files not found for " << prefix << std::endl;
}
} catch (const std::exception& e) {
std::cerr << "Error loading weights for " << prefix << ": " << e.what() << std::endl;
throw; // Re-throw to stop execution
}
};
// Load weights for all layers
load_sequential_weights(conv3_1r, "conv3_1r");
load_sequential_weights(conv3_1t, "conv3_1t");
load_sequential_weights(conv3_2t, "conv3_2t");
load_sequential_weights(fc3_1r, "fc3_1r");
load_sequential_weights(conv4_1r, "conv4_1r");
load_sequential_weights(conv4_1t, "conv4_1t");
load_sequential_weights(conv4_2t, "conv4_2t");
load_sequential_weights(fc34_3r, "fc34_3r");
load_sequential_weights(fc34_4r, "fc34_4r");
load_linear_block_weights(fc3_rt, "fc3_rt");
load_linear_block_weights(fc4_rt, "fc4_rt");
// Load IoU predictor weights
try {
std::string weight_path = model_dir + "/iou_predictor_weight.pt";
std::string bias_path = model_dir + "/iou_predictor_bias.pt";
if (fs::exists(weight_path) && fs::exists(bias_path)) {
auto weight = load_tensor(weight_path);
auto bias = load_tensor(bias_path);
iou_predictor->weight = weight;
iou_predictor->bias = bias;
std::cout << "Loaded weights for iou_predictor" << std::endl;
} else {
std::cerr << "Weight files not found for iou_predictor" << std::endl;
}
} catch (const std::exception& e) {
std::cerr << "Error loading weights for iou_predictor: " << e.what() << std::endl;
throw; // Re-throw to stop execution
}
}
// Move model to device
void BBRegressor::to(torch::Device device) {
// Verify the device is a CUDA device
if (!device.is_cuda()) {
throw std::runtime_error("BBRegressor requires a CUDA device");
}
this->device = device;
// Move all components to device
conv3_1r->to(device);
conv3_1t->to(device);
conv3_2t->to(device);
fc3_1r->to(device);
conv4_1r->to(device);
conv4_1t->to(device);
conv4_2t->to(device);
fc3_rt.to(device);
fc4_rt.to(device);
iou_predictor->to(device);
}
// Get IoU features from backbone features
std::vector<torch::Tensor> BBRegressor::get_iou_feat(std::vector<torch::Tensor> feat2_input, int sample_idx) {
std::cout << "[DEBUG] Entered get_iou_feat with sample_idx=" << sample_idx << std::endl;
torch::Tensor feat3_t_original = feat2_input[0];
torch::Tensor feat4_t_original = feat2_input[1];
// Reshape exactly as in Python implementation
if (feat3_t_original.dim() == 5) {
auto shape = feat3_t_original.sizes();
feat3_t_original = feat3_t_original.reshape({-1, shape[2], shape[3], shape[4]});
}
if (feat4_t_original.dim() == 5) {
auto shape = feat4_t_original.sizes();
feat4_t_original = feat4_t_original.reshape({-1, shape[2], shape[3], shape[4]});
}
// Ensure inputs to conv are contiguous and kFloat32 (ResNet output should be float32)
torch::Tensor feat3_t = feat3_t_original.contiguous().to(torch::kFloat32);
torch::Tensor feat4_t = feat4_t_original.contiguous().to(torch::kFloat32);
torch::NoGradGuard no_grad;
// Ensure debug directory exists for sample 0
if (sample_idx == 0) {
const char* debug_dir = "test/output/bb_regressor";
struct stat st = {0};
if (stat(debug_dir, &st) == -1) {
mkdir(debug_dir, 0777);
}
}
// conv3_1t
auto c3_1t_conv = conv3_1t[0]->as<torch::nn::Conv2d>()->forward(feat3_t);
auto c3_1t_bn = conv3_1t[1]->as<torch::nn::BatchNorm2d>()->forward(c3_1t_conv);
auto c3_1t_relu = conv3_1t[2]->as<torch::nn::ReLU>()->forward(c3_1t_bn);
if (sample_idx == 0) {
std::cout << "[DEBUG] About to save debug tensors for sample_idx == 0" << std::endl;
save_tensor_to_file(c3_1t_bn.cpu(), "test/output/bb_regressor/sample_0_debug_conv3_1t_bn.pt");
save_tensor_to_file(c3_1t_relu.cpu(), "test/output/bb_regressor/sample_0_debug_conv3_1t_relu.pt");
std::cout << "conv3_1t_bn: dtype=" << c3_1t_bn.dtype() << ", device=" << c3_1t_bn.device() << ", shape=" << c3_1t_bn.sizes() << std::endl;
std::cout << "conv3_1t_relu: dtype=" << c3_1t_relu.dtype() << ", device=" << c3_1t_relu.device() << ", shape=" << c3_1t_relu.sizes() << std::endl;
}
auto c3_t_1 = c3_1t_relu;
// conv3_2t
auto c3_2t_conv = conv3_2t[0]->as<torch::nn::Conv2d>()->forward(c3_t_1);
auto c3_2t_bn = conv3_2t[1]->as<torch::nn::BatchNorm2d>()->forward(c3_2t_conv);
auto c3_2t_relu = conv3_2t[2]->as<torch::nn::ReLU>()->forward(c3_2t_bn);
if (sample_idx == 0) {
std::cout << "[DEBUG] About to save debug tensors for conv3_2t, sample_idx == 0" << std::endl;
save_tensor_to_file(c3_2t_bn.cpu(), "test/output/bb_regressor/sample_0_debug_conv3_2t_bn.pt");
save_tensor_to_file(c3_2t_relu.cpu(), "test/output/bb_regressor/sample_0_debug_conv3_2t_relu.pt");
std::cout << "conv3_2t_bn: dtype=" << c3_2t_bn.dtype() << ", device=" << c3_2t_bn.device() << ", shape=" << c3_2t_bn.sizes() << std::endl;
std::cout << "conv3_2t_relu: dtype=" << c3_2t_relu.dtype() << ", device=" << c3_2t_relu.device() << ", shape=" << c3_2t_relu.sizes() << std::endl;
}
auto c3_t = c3_2t_relu;
// conv4_1t
auto c4_1t_conv = conv4_1t[0]->as<torch::nn::Conv2d>()->forward(feat4_t);
auto c4_1t_bn = conv4_1t[1]->as<torch::nn::BatchNorm2d>()->forward(c4_1t_conv);
auto c4_1t_relu = conv4_1t[2]->as<torch::nn::ReLU>()->forward(c4_1t_bn);
if (sample_idx == 0) {
std::cout << "[DEBUG] About to save debug tensors for conv4_1t, sample_idx == 0" << std::endl;
save_tensor_to_file(c4_1t_bn.cpu(), "test/output/bb_regressor/sample_0_debug_conv4_1t_bn.pt");
save_tensor_to_file(c4_1t_relu.cpu(), "test/output/bb_regressor/sample_0_debug_conv4_1t_relu.pt");
std::cout << "conv4_1t_bn: dtype=" << c4_1t_bn.dtype() << ", device=" << c4_1t_bn.device() << ", shape=" << c4_1t_bn.sizes() << std::endl;
std::cout << "conv4_1t_relu: dtype=" << c4_1t_relu.dtype() << ", device=" << c4_1t_relu.device() << ", shape=" << c4_1t_relu.sizes() << std::endl;
}
auto c4_t_1 = c4_1t_relu;
// conv4_2t
auto c4_2t_conv = conv4_2t[0]->as<torch::nn::Conv2d>()->forward(c4_t_1);
auto c4_2t_bn = conv4_2t[1]->as<torch::nn::BatchNorm2d>()->forward(c4_2t_conv);
auto c4_2t_relu = conv4_2t[2]->as<torch::nn::ReLU>()->forward(c4_2t_bn);
if (sample_idx == 0) {
std::cout << "[DEBUG] About to save debug tensors for conv4_2t, sample_idx == 0" << std::endl;
save_tensor_to_file(c4_2t_bn.cpu(), "test/output/bb_regressor/sample_0_debug_conv4_2t_bn.pt");
save_tensor_to_file(c4_2t_relu.cpu(), "test/output/bb_regressor/sample_0_debug_conv4_2t_relu.pt");
std::cout << "conv4_2t_bn: dtype=" << c4_2t_bn.dtype() << ", device=" << c4_2t_bn.device() << ", shape=" << c4_2t_bn.sizes() << std::endl;
std::cout << "conv4_2t_relu: dtype=" << c4_2t_relu.dtype() << ", device=" << c4_2t_relu.device() << ", shape=" << c4_2t_relu.sizes() << std::endl;
}
auto c4_t = c4_2t_relu;
return {c3_t.contiguous(), c4_t.contiguous()}; // Ensure output is contiguous and float32
}
// Get modulation vectors for the target
std::vector<torch::Tensor> BBRegressor::get_modulation(std::vector<torch::Tensor> feat, torch::Tensor bb) {
// feat should contain two tensors: feat3_r and feat4_r (backbone features)
// bb is the initial bounding box [batch_size, 1, 4] (x,y,w,h) or [batch_size, 4]
// Ensure inputs are on the correct device
torch::NoGradGuard no_grad; // Ensure no gradients are computed
auto feat3_r = feat[0].to(device);
auto feat4_r = feat[1].to(device);
auto current_bb = bb.to(device);
// Reshape bb if it's [batch, 1, 4] to [batch, 4]
if (current_bb.dim() == 3 && current_bb.size(1) == 1) {
current_bb = current_bb.squeeze(1);
}
if (current_bb.dim() != 2 || current_bb.size(1) != 4) {
throw std::runtime_error("BBRegressor::get_modulation: bb must be [batch, 4] or [batch, 1, 4]");
}
// Pass through early conv layers (reference branch)
// Python: c3_r = self.conv3_1r(feat3_r)
auto c3_r = conv3_1r->forward(feat3_r);
// Prepare ROIs: convert bb from [x,y,w,h] to [batch_idx, x1,y1,x2,y2] (matching Python)
int batch_size = current_bb.size(0);
auto batch_index = torch::arange(0, batch_size, current_bb.options().dtype(torch::kFloat)).reshape({-1, 1});
// Convert bb from xywh to xyxy format (matching Python: bb[:, 2:4] = bb[:, 0:2] + bb[:, 2:4])
auto bb_xyxy = current_bb.clone();
bb_xyxy.index_put_({torch::indexing::Slice(), torch::indexing::Slice(2, 4)},
bb_xyxy.index({torch::indexing::Slice(), torch::indexing::Slice(0, 2)}) +
bb_xyxy.index({torch::indexing::Slice(), torch::indexing::Slice(2, 4)}));
// Create ROI (matching Python: roi1 = torch.cat((batch_index, bb), dim=1))
std::vector<torch::Tensor> roi1_tensors = {batch_index, bb_xyxy};
auto roi1 = torch::cat(roi1_tensors, 1);
roi1 = roi1.to(device);
// Python: roi3r = self.prroi_pool3r(c3_r, roi1)
auto roi3r = prroi_pool3r->forward(c3_r, roi1);
// Python: c4_r = self.conv4_1r(feat4_r)
auto c4_r = conv4_1r->forward(feat4_r);
// Python: roi4r = self.prroi_pool4r(c4_r, roi1)
auto roi4r = prroi_pool4r->forward(c4_r, roi1);
// Python: fc3_r = self.fc3_1r(roi3r)
auto fc3_r = fc3_1r->forward(roi3r);
// Python: fc34_r = torch.cat((fc3_r, roi4r), dim=1)
std::vector<torch::Tensor> fc34_r_tensors = {fc3_r, roi4r};
auto fc34_r = torch::cat(fc34_r_tensors, 1);
// Python: fc34_3_r = self.fc34_3r(fc34_r)
auto fc34_3_r = fc34_3r->forward(fc34_r);
// Python: fc34_4_r = self.fc34_4r(fc34_r)
auto fc34_4_r = fc34_4r->forward(fc34_r);
return {fc34_3_r, fc34_4_r};
}
// Predict IoU for proposals
torch::Tensor BBRegressor::predict_iou(std::vector<torch::Tensor> modulation,
std::vector<torch::Tensor> feat,
torch::Tensor proposals) {
// Ensure all inputs are on the correct device
auto target_device = device;
for (auto& t : feat) { t = t.to(target_device); }
for (auto& m : modulation) { m = m.to(target_device); }
proposals = proposals.to(target_device);
// Get batch size and number of proposals
int batch_size = proposals.size(0);
int num_proposals = proposals.size(1);
// Apply modulation BEFORE PrRoIPooling (matching Python implementation)
auto fc34_3_r = modulation[0].to(target_device);
auto fc34_4_r = modulation[1].to(target_device);
auto c3_t = feat[0].to(target_device);
auto c4_t = feat[1].to(target_device);
// Reshape modulation vectors to match Python: fc34_3_r.reshape(batch_size, -1, 1, 1)
if (fc34_3_r.dim() == 2) {
fc34_3_r = fc34_3_r.reshape({batch_size, -1, 1, 1});
}
if (fc34_4_r.dim() == 2) {
fc34_4_r = fc34_4_r.reshape({batch_size, -1, 1, 1});
}
// Apply modulation BEFORE pooling (matching Python: c3_t_att = c3_t * fc34_3_r.reshape(batch_size, -1, 1, 1))
auto c3_t_att = c3_t * fc34_3_r;
auto c4_t_att = c4_t * fc34_4_r;
// Convert proposals from xywh to xyxy format (matching Python)
auto proposals_xy = proposals.index({torch::indexing::Slice(), torch::indexing::Slice(), torch::indexing::Slice(0, 2)});
auto proposals_wh = proposals.index({torch::indexing::Slice(), torch::indexing::Slice(), torch::indexing::Slice(2, 4)});
auto proposals_xyxy = torch::cat({proposals_xy, proposals_xy + proposals_wh}, 2);
// Add batch index (matching Python implementation)
auto batch_index = torch::arange(0, batch_size, proposals.options().dtype(torch::kFloat)).reshape({-1, 1});
auto batch_index_expanded = batch_index.reshape({batch_size, -1, 1}).expand({-1, num_proposals, -1});
std::vector<torch::Tensor> roi2_tensors = {batch_index_expanded, proposals_xyxy};
auto roi2 = torch::cat(roi2_tensors, 2);
roi2 = roi2.reshape({-1, 5}).to(proposals_xyxy.device());
// Apply PrRoIPooling to MODULATED features (matching Python)
auto roi3t = prroi_pool3t->forward(c3_t_att, roi2);
auto roi4t = prroi_pool4t->forward(c4_t_att, roi2);
// Forward through linear blocks
fc3_rt.to(target_device);
fc4_rt.to(target_device);
auto fc3_rt_output = fc3_rt.forward(roi3t);
auto fc4_rt_output = fc4_rt.forward(roi4t);
// Concatenate features (matching Python)
std::vector<torch::Tensor> fc34_rt_tensors = {fc3_rt_output, fc4_rt_output};
auto fc34_rt_cat = torch::cat(fc34_rt_tensors, 1);
// Predict IoU
iou_predictor->to(target_device);
auto iou_pred = iou_predictor->forward(fc34_rt_cat).reshape({batch_size, num_proposals});
return iou_pred;
}
// Print model information
void BBRegressor::print_model_info() {
std::cout << "BBRegressor Model Information:" << std::endl;
std::cout << " - Model directory: " << model_dir << std::endl;
std::cout << " - Device: CUDA:" << device.index() << std::endl;
std::cout << " - CUDA Device Count: " << torch::cuda::device_count() << std::endl;
std::cout << " - Using PreciseRoIPooling: " <<
#ifdef WITH_PRROI_POOLING
"Yes"
#else
"No (will fail)"
#endif
<< std::endl;
}
// Compute statistics for a tensor
BBRegressor::TensorStats BBRegressor::compute_stats(const torch::Tensor& tensor) {
TensorStats stats;
// Get shape
for (int i = 0; i < tensor.dim(); i++) {
stats.shape.push_back(tensor.size(i));
}
// Compute basic stats - make sure we reduce to scalar values
stats.mean = tensor.mean().item<float>(); // Mean of all elements
stats.std_dev = tensor.std().item<float>(); // Std dev of all elements
stats.min_val = tensor.min().item<float>(); // Min of all elements
stats.max_val = tensor.max().item<float>(); // Max of all elements
stats.sum = tensor.sum().item<float>(); // Sum of all elements
// Sample values at specific positions
if (tensor.dim() >= 4) {
// For 4D tensors (batch, channel, height, width)
stats.samples.push_back(tensor.index({0, 0, 0, 0}).item<float>());
if (tensor.size(1) > 1 && tensor.size(2) > 1 && tensor.size(3) > 1) {
int mid_c = static_cast<int>(tensor.size(1) / 2);
int mid_h = static_cast<int>(tensor.size(2) / 2);
int mid_w = static_cast<int>(tensor.size(3) / 2);
stats.samples.push_back(tensor.index({0, mid_c, mid_h, mid_w}).item<float>());
// Use static_cast to convert int64_t to int to avoid type mismatch
int64_t last_c_idx = tensor.size(1) - 1;
int64_t last_h_idx = tensor.size(2) - 1;
int64_t last_w_idx = tensor.size(3) - 1;
// Limit indices to avoid accessing out of bounds
if (last_c_idx > 10) last_c_idx = 10;
if (last_h_idx > 10) last_h_idx = 10;
if (last_w_idx > 10) last_w_idx = 10;
stats.samples.push_back(tensor.index({0, static_cast<int>(last_c_idx),
static_cast<int>(last_h_idx),
static_cast<int>(last_w_idx)}).item<float>());
}
} else if (tensor.dim() == 3) {
// For 3D tensors
stats.samples.push_back(tensor.index({0, 0, 0}).item<float>());
if (tensor.size(1) > 1 && tensor.size(2) > 1) {
int mid_h = static_cast<int>(tensor.size(1) / 2);
int mid_w = static_cast<int>(tensor.size(2) / 2);
stats.samples.push_back(tensor.index({0, mid_h, mid_w}).item<float>());
int last_h = static_cast<int>(tensor.size(1) - 1);
int last_w = static_cast<int>(tensor.size(2) - 1);
stats.samples.push_back(tensor.index({0, last_h, last_w}).item<float>());
}
} else if (tensor.dim() == 2) {
// For 2D tensors
stats.samples.push_back(tensor.index({0, 0}).item<float>());
if (tensor.size(0) > 1 && tensor.size(1) > 1) {
int mid_h = static_cast<int>(tensor.size(0) / 2);
int mid_w = static_cast<int>(tensor.size(1) / 2);
stats.samples.push_back(tensor.index({mid_h, mid_w}).item<float>());
int last_h = static_cast<int>(tensor.size(0) - 1);
int last_w = static_cast<int>(tensor.size(1) - 1);
stats.samples.push_back(tensor.index({last_h, last_w}).item<float>());
}
} else {
// For 1D tensors or scalars
if (tensor.numel() > 0) {
stats.samples.push_back(tensor.index({0}).item<float>());
if (tensor.size(0) > 1) {
int mid = static_cast<int>(tensor.size(0) / 2);
stats.samples.push_back(tensor.index({mid}).item<float>());
int last = static_cast<int>(tensor.size(0) - 1);
stats.samples.push_back(tensor.index({last}).item<float>());
}
}
}
return stats;
}
// Save tensor statistics to a file
void BBRegressor::save_stats(const std::vector<TensorStats>& all_stats, const std::string& filepath) {
std::ofstream file(filepath);
if (!file.is_open()) {
std::cerr << "Error opening file for writing: " << filepath << std::endl;
return;
}
for (size_t i = 0; i < all_stats.size(); i++) {
const auto& stats = all_stats[i];
file << "Output " << i << ":" << std::endl;
file << " Shape: [";
for (size_t j = 0; j < stats.shape.size(); j++) {
file << stats.shape[j];
if (j < stats.shape.size() - 1) file << ", ";
}
file << "]" << std::endl;
file << " Mean: " << stats.mean << std::endl;
file << " Std: " << stats.std_dev << std::endl;
file << " Min: " << stats.min_val << std::endl;
file << " Max: " << stats.max_val << std::endl;
file << " Sum: " << stats.sum << std::endl;
file << " Sample values: [";
for (size_t j = 0; j < stats.samples.size(); j++) {
file << stats.samples[j];
if (j < stats.samples.size() - 1) file << ", ";
}
file << "]" << std::endl << std::endl;
}
file.close();
}