@ -26,7 +26,7 @@ from pytracking.features.net_wrappers import DiMPTorchScriptWrapper
# For loading AtomIoUNet from source
# For loading AtomIoUNet from source
from ltr.models.bbreg.atom_iou_net import AtomIoUNet
from ltr.models.bbreg.atom_iou_net import AtomIoUNet
# Add import for new modular comparison
# Add import for new modular comparison
from model_comparison.bbreg_comparison import compare_debug_tensors
from model_comparison.bbreg_comparison import compare_debug_tensors , compare_resnet_debug_tensors
SCRIPT_DIR_FOR_INIT = os . path . dirname ( os . path . abspath ( __file__ ) )
SCRIPT_DIR_FOR_INIT = os . path . dirname ( os . path . abspath ( __file__ ) )
ROOT_DIR_FOR_INIT = os . path . dirname ( SCRIPT_DIR_FOR_INIT )
ROOT_DIR_FOR_INIT = os . path . dirname ( SCRIPT_DIR_FOR_INIT )
@ -764,13 +764,12 @@ class ComparisonRunner:
processed_input_tensor = input_tensor . to ( self . device ) # Ensure device
processed_input_tensor = input_tensor . to ( self . device ) # Ensure device
# --- END REINSTATED INPUT LOADING AND PREPROCESSING ---
# --- END REINSTATED INPUT LOADING AND PREPROCESSING ---
# --- Save preprocessed input for sample 0 ---
if sample_idx == 0 :
preprocessed_dir = Path ( self . cpp_output_dir ) / ' resnet '
preprocessed_dir . mkdir ( parents = True , exist_ok = True )
py_preprocessed_path = preprocessed_dir / f ' sample_{sample_idx}_image_preprocessed_python.pt '
torch . save ( processed_input_tensor . cpu ( ) , py_preprocessed_path )
print ( f " Saved Python preprocessed image for sample {sample_idx} to {py_preprocessed_path} " )
# --- Save preprocessed input for every sample ---
preprocessed_dir = Path ( self . cpp_output_dir ) / ' resnet '
preprocessed_dir . mkdir ( parents = True , exist_ok = True )
py_preprocessed_path = preprocessed_dir / f ' sample_{sample_idx}_image_preprocessed_python.pt '
torch . save ( processed_input_tensor . cpu ( ) , py_preprocessed_path )
print ( f " Saved Python preprocessed image for sample {sample_idx} to {py_preprocessed_path} " )
# --- END save preprocessed input ---
# --- END save preprocessed input ---
# Initialize dictionaries to store Python-side outputs for the current sample
# Initialize dictionaries to store Python-side outputs for the current sample
@ -987,6 +986,16 @@ class ComparisonRunner:
# processed_samples += 1 # This variable is no longer used as loop is range-based
# processed_samples += 1 # This variable is no longer used as loop is range-based
print ( " --- ResNet Output Comparison Complete --- " )
print ( " --- ResNet Output Comparison Complete --- " )
def compare_resnet_debug_outputs ( self , sample_idx = 0 , verbose = True ) :
"""
Compare intermediate ResNet debug outputs between C + + and Python and store results .
"""
cpp_dir = os . path . join ( self . cpp_output_dir , ' resnet ' )
py_dir = os . path . join ( self . python_output_dir , ' resnet_debug ' )
results = compare_resnet_debug_tensors ( cpp_dir , py_dir , sample_idx = sample_idx , verbose = verbose )
self . resnet_debug_results = results
self . all_comparison_stats [ ' ResNetDebug ' ] = results
def generate_html_report ( self ) :
def generate_html_report ( self ) :
print ( " \n Generating HTML report... " )
print ( " \n Generating HTML report... " )
report_path = os . path . join ( self . comparison_dir , " report.html " )
report_path = os . path . join ( self . comparison_dir , " report.html " )
@ -1260,6 +1269,23 @@ class ComparisonRunner:
f . write ( html_content )
f . write ( html_content )
print ( f " HTML report generated at {report_path} " )
print ( f " HTML report generated at {report_path} " )
# Add ResNet Debug Output Comparison Section
if ' ResNetDebug ' in self . all_comparison_stats :
html_content + = " <h2>ResNet Intermediate Debug Output Comparison</h2> "
html_content + = " <table><tr><th>Stage</th><th>Cosine Similarity</th><th>Allclose</th><th>Max Abs Diff</th><th>CPP Shape</th><th>PY Shape</th></tr> "
for stage , stats in self . all_comparison_stats [ ' ResNetDebug ' ] . items ( ) :
html_content + = f " <tr><td>{stage}</td><td>{stats[ ' cosine_similarity ' ]:.6f}</td><td>{stats[ ' allclose ' ]}</td><td>{stats[ ' max_abs_diff ' ]:.6f}</td><td>{stats[ ' cpp_shape ' ]}</td><td>{stats[ ' py_shape ' ]}</td></tr> "
html_content + = " </table> "
# ... rest of HTML ...
html_content + = " </body></html> "
# Save HTML
report_path = os . path . join ( self . comparison_dir , " comparison_report.html " )
with open ( report_path , " w " ) as f :
f . write ( html_content )
print ( f " HTML report generated at: {report_path} " )
return report_path
def _generate_single_plot ( self , error_array , title , plot_path , mean_val , std_abs_err , mae , max_err ) :
def _generate_single_plot ( self , error_array , title , plot_path , mean_val , std_abs_err , mae , max_err ) :
if error_array is None or len ( error_array ) == 0 or np . all ( np . isnan ( error_array ) ) :
if error_array is None or len ( error_array ) == 0 or np . all ( np . isnan ( error_array ) ) :
# print(f"Skipping plot for {title} as error_array is empty or all NaNs.")
# print(f"Skipping plot for {title} as error_array is empty or all NaNs.")
@ -1287,6 +1313,10 @@ class ComparisonRunner:
self . compare_classifier ( )
self . compare_classifier ( )
self . compare_bb_regressor ( )
self . compare_bb_regressor ( )
self . compare_preprocessed_inputs ( ) # ADDED
self . compare_preprocessed_inputs ( ) # ADDED
# Compare ResNet debug outputs before generating HTML
print ( " \n Comparing ResNet intermediate debug outputs... " )
self . compare_resnet_debug_outputs ( sample_idx = 0 , verbose = True )
# ... rest of tests ...
self . generate_html_report ( )
self . generate_html_report ( )
print ( " All tests completed! " )
print ( " All tests completed! " )