/* * Copyright (c) ICG. All rights reserved. * See copyright.txt for more information. * * Institute for Computer Graphics and Vision * Graz, University of Technology / Austria * * * This software is distributed WITHOUT ANY WARRANTY; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notices for more information. * * * Project : MIPItkProjects * Module : Evaluation * Class : $RCSfile: CompareWarpedImages.cxx,v $ * Language : C++ * Description : * * Author : Martin Urschler * EMail : urschler@icg.tu-graz.ac.at * Date : $Date: 2007-04-23 11:46:49 $ * Version : $Revision: 1.21 $ * Full Id : $Id: CompareWarpedImages.cxx,v 1.21 2007-04-23 11:46:49 urschler Exp $ * */ #include "commonItkTypedefs.h" #include "VolumeIOWrapper.h" #include "SmallUtilityMethods.h" #include "itkJoinImageFilter.h" #include "itkImageToHistogramGenerator.h" #include "itkImageRegionConstIterator.h" #include "itkCannyEdgeDetectionImageFilter.h" #include "itkRecursiveGaussianImageFilter.h" #include // FIXXME: at the moment this measure also takes the resampleDefaultValue // into account and creates bins including these values -> this will bias // the evaluation void calculateMutualInformationStatistics( const Int16ImageType::Pointer image1, const Int16ImageType::Pointer image2, double& nmi ) { // Using the \doxygen{JoinImageFilter} we use the two input images and put // them together in an image of two components. typedef itk::JoinImageFilter< Int16ImageType, Int16ImageType > JoinFilterType; JoinFilterType::Pointer joinFilter = JoinFilterType::New(); joinFilter->SetInput1( image1 ); joinFilter->SetInput2( image2 ); joinFilter->Update(); // We prepare now the types to be used for the computation of the Joint // histogram. For this purpose, we take the type of the image resulting from // the JoinImageFilter and use it as template argument of the // \doxygen{ImageToHistogramGenerator}. We then construct one by invoking the // \code{New()} method. typedef JoinFilterType::OutputImageType VectorImageType; typedef itk::Statistics::ImageToHistogramGenerator< VectorImageType > HistogramGeneratorType; HistogramGeneratorType::Pointer histogramGenerator = HistogramGeneratorType::New(); // We pass the multiple components image as input to the histogram generator, // and setup the marginal scale value that will define the precision to be // used for classifying values into the histogram bins. histogramGenerator->SetInput( joinFilter->GetOutput() ); histogramGenerator->SetMarginalScale( 10.0 ); // We must now define the number of bins to use for each one of the // components in the joint image. For this purpose we take the SizeType from // the traits of the histogram generator type. The array of number of bins is // passed to the generator and we can then invoke the \code{Compute()} method // in order to trigger the computation of the joint histogram. typedef HistogramGeneratorType::SizeType SizeType; SizeType size; size[0] = 255; // number of bins for the first channel size[1] = 255; // number of bins for the second channel histogramGenerator->SetNumberOfBins( size ); histogramGenerator->Compute(); // The histogram can be recovered from the generator by creating a variable // with the histogram type taken from the generator traits. typedef HistogramGeneratorType::HistogramType HistogramType; const HistogramType * histogram = histogramGenerator->GetOutput(); // We now walk over all the bins of the joint histogram and compute their // contribution to the value of the joint Entropy. For this purpose we use // histogram iterators, and the Begin() and End() methods. Since // the values returned from the histogram are measuring frequency we must // convert them to an estimation of probability by dividing them over the // total sum of frequencies returned by the \code{GetTotalFrequency()} method HistogramType::ConstIterator itr = histogram->Begin(); HistogramType::ConstIterator end = histogram->End(); const double Sum = histogram->GetTotalFrequency(); // We initialize to zero the variable to use for accumulating the value of // the joint entropy, and then use the iterator for visiting all the bins of // the joint histogram. For every bin we compute their contribution to the // reduction of uncertainty. Note that in order to avoid logarithmic // operations on zero values, we skip over those bins that have less than one // count. The entropy contribution must be computed using logarithms in base // two in order to be able express entropy in \textbf{bits}. double JointEntropy = 0.0; while( itr != end ) { const double count = itr.GetFrequency(); if( count > 0.0 ) { const double probability = count / Sum; JointEntropy += - probability * log( probability ) / log( 2.0 ); } ++itr; } std::cout << "Joint Entropy = " << JointEntropy << " bits " << std::endl; // Now that we have the value of the joint entropy we can proceed to estimate // the values of the entropies for each image independently. This can be done // by simply changing the number of bins and then recomputing the histogram. size[0] = 255; // number of bins for the first channel size[1] = 1; // number of bins for the second channel histogramGenerator->SetNumberOfBins( size ); histogramGenerator->Compute(); // We initialize to zero another variable in order to start accumulating the // entropy contributions from every bin. itr = histogram->Begin(); end = histogram->End(); double Entropy1 = 0.0; while( itr != end ) { const double count = itr.GetFrequency(); if( count > 0.0 ) { const double probability = count / Sum; Entropy1 += - probability * log( probability ) / log( 2.0 ); } ++itr; } std::cout << "Image1 Entropy = " << Entropy1 << " bits " << std::endl; // The same process is used for computing the entropy of the other component. // Simply by swapping the number of bins in the histogram. size[0] = 1; // number of bins for the first channel size[1] = 255; // number of bins for the second channel histogramGenerator->SetNumberOfBins( size ); histogramGenerator->Compute(); // The entropy is computed in a similar manner, just by visiting all the bins // on the histogram and accumulating their entropy contributions. itr = histogram->Begin(); end = histogram->End(); double Entropy2 = 0.0; while( itr != end ) { const double count = itr.GetFrequency(); if( count > 0.0 ) { const double probability = count / Sum; Entropy2 += - probability * log( probability ) / log( 2.0 ); } ++itr; } std::cout << "Image2 Entropy = " << Entropy2 << " bits " << std::endl; double MutualInformation = Entropy1 + Entropy2 - JointEntropy; std::cout << "Mutual Information = " << MutualInformation << " bits " << std::endl; // or Normalized Mutual Information, where the value of Mutual Information // gets divided by the mean entropy of the input images. double NormalizedMutualInformation1 = 2.0 * MutualInformation / ( Entropy1 + Entropy2 ); std::cout << "Normalized Mutual Information 1 = " << NormalizedMutualInformation1 << std::endl; nmi = NormalizedMutualInformation1; } double calculateDifferentialCharacteristicMeasure( const Int16ImageType::Pointer image1, const Int16ImageType::Pointer image2, const int warpingDefaultValue ) { const unsigned int Dimensions = itk::GetImageDimension::ImageDimension; // extract differential characteristics according to Hellier03a, // and Florack92 const double sigma[3] = {1.0,1.0,1.0}; typedef itk::RecursiveGaussianImageFilter DerivativeFilterTypeA; typedef itk::RecursiveGaussianImageFilter DerivativeFilterTypeB; { Int16ImageType::Pointer image = image1; FloatImageType::Pointer gradFirst[3]; FloatImageType::Pointer gradSecond[6]; for (unsigned int dim=0; dimSetDirection( dim ); filter->SetOrder( DerivativeFilterTypeA::FirstOrder ); filter->SetSigma( sigma[dim] ); filter->SetNormalizeAcrossScale( false ); filter->SetInput( image ); ITK_EXCEPTION_CHECKED( "First Derivative of image!", filter->Update(), 0 ); gradFirst[dim] = filter->GetOutput(); } unsigned int counter = 0; for (unsigned int dim1=0; dim1SetDirection( dim2 ); filter->SetOrder( DerivativeFilterTypeB::FirstOrder ); filter->SetSigma( sigma[dim2] ); filter->SetNormalizeAcrossScale( false ); filter->SetInput( gradFirst[dim1] ); ITK_EXCEPTION_CHECKED( "Second Derivative of image!", filter->Update(), 0 ); gradSecond[counter] = filter->GetOutput(); ++counter; } } std::cout << "start with allocation!" << std::endl; ITK_IMAGE_ALLOCATE_MACRO(FloatImageType, differentialCharacteristicImage, Int16ImageType, image); std::cout << "start with calculation!" << std::endl; itk::ImageRegionIterator diffIter( differentialCharacteristicImage, differentialCharacteristicImage->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itX( gradFirst[0], gradFirst[0]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itY( gradFirst[1], gradFirst[1]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itZ( gradFirst[2], gradFirst[2]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itXX( gradSecond[0], gradSecond[0]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itXY( gradSecond[1], gradSecond[1]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itXZ( gradSecond[2], gradSecond[2]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itYY( gradSecond[3], gradSecond[3]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itYZ( gradSecond[4], gradSecond[4]->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator itZZ( gradSecond[5], gradSecond[5]->GetLargestPossibleRegion() ); while(!diffIter.IsAtEnd()) { const float gX = itX.Get(); const float gY = itY.Get(); const float gZ = itZ.Get(); const float gXX = itXX.Get(); const float gYY = itYY.Get(); const float gZZ = itZZ.Get(); const float gXY = itXY.Get(); const float gXZ = itXZ.Get(); const float gYZ = itYZ.Get(); // mean curvature const float lvv = -1.0f / (2.0f * (gX*gX+gY*gY+gZ*gZ)) * ((gX*gX*(itYY.Get()+itZZ.Get())-2.0f*gY*gZ*itYZ.Get())+ (gY*gY*(itXX.Get()+itZZ.Get())-2.0f*gX*gZ*itXZ.Get())+ (gZ*gZ*(itYY.Get()+itXX.Get())-2.0f*gY*gX*itXY.Get())); // // gaussian curvature // // float lvv = (gX*gX*(gYY*gZZ-gYZ*gYZ) + gY*gY*(gXX*gZZ-gXZ*gXZ) + // gZ*gZ*(gYY*gXX-gXY*gXY) + 2.0f*(gY*gZ*(gXZ*gXY-gXX*gYZ) + // gX*gZ*(gYZ*gXY-gYY*gXZ) + gX*gY*(gXZ*gYZ-gZZ*gXY))); // // if (lvv > 4096.0f) lvv = 4096.0f; // if (lvv < -2048.0f) lvv = 2048.0f; diffIter.Set(lvv); ++itX; ++itY; ++itZ; ++itXX; ++itXY; ++itXZ; ++itYY; ++itYZ; ++itZZ; ++diffIter; } std::cout << SmallUtilityMethods::getMinimumVoxelValue( differentialCharacteristicImage) << std::endl; std::cout << SmallUtilityMethods::getMaximumVoxelValue( differentialCharacteristicImage) << std::endl; VolumeIOWrapper::writeITKVolume16Bit( differentialCharacteristicImage, "D:\\temp\\florack1.hdr", "" ); } return 0.0; } double calculateEdgeOverlapMeasure( const Int16ImageType::Pointer image1, const Int16ImageType::Pointer image2, const int warpingDefaultValue ) { double edgeOverlapMeasure = 0.0; // perform canny edge detection on both images const Int16ImageType::SpacingType spacing1 = image1->GetSpacing(); const Int16ImageType::SpacingType spacing2 = image2->GetSpacing(); typedef itk::CannyEdgeDetectionImageFilter CannyFilterType; FloatImageType::Pointer edges1 = 0; { FloatImageType::Pointer image1_float = SmallUtilityMethodsCastItkVolume(image1); CannyFilterType::Pointer canny = CannyFilterType::New(); canny->SetInput( image1_float ); CannyFilterType::ArrayType variance; variance[0] = vnl_math_sqr(3.0*spacing1[0]); variance[1] = vnl_math_sqr(2.0*spacing1[1]); variance[2] = vnl_math_sqr(2.0*spacing1[2]); canny->SetVariance(variance); ITK_EXCEPTION_CHECKED( "perform canny on image 1", canny->Update(), 0.0); edges1 = canny->GetOutput(); } std::cout << SmallUtilityMethods::getMinimumVoxelValue(edges1) << std::endl; std::cout << SmallUtilityMethods::getMaximumVoxelValue(edges1) << std::endl; //VolumeIOWrapper::writeITKVolume16Bit( // edges1, "D:\\temp\\edges1.hdr", "" ); typedef itk::CannyEdgeDetectionImageFilter CannyFilterType; FloatImageType::Pointer edges2 = 0; { FloatImageType::Pointer image2_float = SmallUtilityMethodsCastItkVolume(image2); CannyFilterType::Pointer canny = CannyFilterType::New(); canny->SetInput( image2_float ); CannyFilterType::ArrayType variance; variance[0] = vnl_math_sqr(3.0*spacing2[0]); variance[1] = vnl_math_sqr(2.0*spacing2[1]); variance[2] = vnl_math_sqr(2.0*spacing2[2]); canny->SetVariance(variance); ITK_EXCEPTION_CHECKED( "perform canny on image 2", canny->Update(), 0.0); edges2 = canny->GetOutput(); } std::cout << SmallUtilityMethods::getMinimumVoxelValue(edges2) << std::endl; std::cout << SmallUtilityMethods::getMaximumVoxelValue(edges2) << std::endl; //VolumeIOWrapper::writeITKVolume16Bit( // edges2, "D:\\temp\\edges2.hdr", "" ); const unsigned int neighborhoodRadius = 1; typedef itk::ConstNeighborhoodIterator IterType; itk::ConstNeighborhoodIterator::RadiusType radius; radius.Fill(neighborhoodRadius); IterType origIt1(radius, image1, image1->GetLargestPossibleRegion()); IterType origIt2(radius, image2, image2->GetLargestPossibleRegion()); itk::ImageRegionConstIterator it1( edges1, edges1->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator it2( edges2, edges2->GetLargestPossibleRegion() ); unsigned long nrVoxels = 0L; const unsigned int totalNbhBufferSize = origIt1.GetSize()[0] * origIt1.GetSize()[1] * origIt1.GetSize()[2]; while (!it1.IsAtEnd()) { // look at all of the original pixel values in a 3x3x3 neighborhood, // if one of them is the warpingDefaultValue -> continue bool skip = false; for (unsigned int nb=0; nb(value1) - static_cast(value2) ); edgeOverlapMeasure += difference; ++nrVoxels; } ++it1; ++it2; ++origIt1; ++origIt2; } edgeOverlapMeasure /= static_cast(nrVoxels); std::cout << "edgeOverlapMeasure: " << edgeOverlapMeasure << std::endl; return edgeOverlapMeasure; } double calculateMedianAbsoluteDeviationIntensity( const Int16ImageType::Pointer image1, const Int16ImageType::Pointer image2, const int warpingDefaultValue ) { itk::ImageRegionConstIterator it1( image1, image1->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator it2( image2, image2->GetLargestPossibleRegion() ); double medianDeviationIntensity = 0.0; const Int16ImageType::SizeType imageSize = image1->GetLargestPossibleRegion().GetSize(); const unsigned long vectorSize = imageSize[0]*imageSize[1]*imageSize[2]; std::cout << "imageSize: " << imageSize << std::endl; std::cout << "vectorSize: " << vectorSize << std::endl; int medianDifference = 0; { std::vector intensityDifferences; intensityDifferences.reserve(vectorSize); while (!it1.IsAtEnd()) { const Int16ImageType::PixelType value1 = it1.Get(); const Int16ImageType::PixelType value2 = it2.Get(); { // we ignore those values, where a synthetic transformation has // shown the warpingDefaultValue. these values can never be estimated // by a registration process if (value1 != warpingDefaultValue && value2 != warpingDefaultValue) { const int difference = static_cast( vcl_fabs( static_cast(value1) - static_cast(value2) ) ); intensityDifferences.push_back(difference); } } ++it1; ++it2; } // calculate the median of the intensity differences const std::vector::iterator medianIterator = intensityDifferences.begin() + intensityDifferences.size() / 2L; std::nth_element(intensityDifferences.begin(), medianIterator, intensityDifferences.end()); medianDifference = *medianIterator; } std::cout << "median intensity difference: " << medianDifference << std::endl; it1.GoToBegin(); it2.GoToBegin(); { std::vector intensityDifferences; intensityDifferences.reserve(vectorSize); while (!it1.IsAtEnd()) { const Int16ImageType::PixelType value1 = it1.Get(); const Int16ImageType::PixelType value2 = it2.Get(); { // we ignore those values, where a synthetic transformation has // shown the warpingDefaultValue. these values can never be // estimated by a registration process if (value1 != warpingDefaultValue && value2 != warpingDefaultValue) { const int difference = static_cast( vcl_fabs( vcl_fabs( static_cast(value1) - static_cast(value2) ) - static_cast(medianDifference) ) ); intensityDifferences.push_back(difference); } } ++it1; ++it2; } // calculate the median of the intensity differences const std::vector::iterator medianIterator = intensityDifferences.begin() + intensityDifferences.size() / 2L; std::nth_element(intensityDifferences.begin(), medianIterator, intensityDifferences.end()); medianDeviationIntensity = *medianIterator; } std::cout << "median deviation: " << medianDeviationIntensity << std::endl; return medianDeviationIntensity; } double calculateRobustMaxAbsoluteDeviationIntensity( const Int16ImageType::Pointer image1, const Int16ImageType::Pointer image2, const int warpingDefaultValue ) { itk::ImageRegionConstIterator it1( image1, image1->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator it2( image2, image2->GetLargestPossibleRegion() ); double robustMaxDeviationIntensity = 0.0; const Int16ImageType::SizeType imageSize = image1->GetLargestPossibleRegion().GetSize(); const unsigned long vectorSize = imageSize[0]*imageSize[1]*imageSize[2]; std::cout << "imageSize: " << imageSize << std::endl; std::cout << "vectorSize: " << vectorSize << std::endl; { std::vector intensityDifferences; intensityDifferences.reserve(vectorSize); while (!it1.IsAtEnd()) { const Int16ImageType::PixelType value1 = it1.Get(); const Int16ImageType::PixelType value2 = it2.Get(); { // we ignore those values, where a synthetic transformation has // shown the warpingDefaultValue. these values can never be estimated // by a registration process if (value1 != warpingDefaultValue && value2 != warpingDefaultValue) { const int difference = static_cast( vcl_fabs( static_cast(value1) - static_cast(value2) ) ); intensityDifferences.push_back(difference); } } ++it1; ++it2; } // calculate the robust max of the intensity differences const std::vector::iterator robustMaxIterator = intensityDifferences.begin() + (intensityDifferences.size() / 100L) * 95L; std::nth_element(intensityDifferences.begin(), robustMaxIterator, intensityDifferences.end()); robustMaxDeviationIntensity = *robustMaxIterator; } std::cout << "robust max intensity difference: " << robustMaxDeviationIntensity << std::endl; return robustMaxDeviationIntensity; } int main( int argc, char *argv[] ) { if (argc < 5 || argc > 6) { std::cerr << "Usage: " << std::endl; std::cerr << argv[0] << " original_fixed_image original_moving_image" << " moving_image_warped_to_fixed resampleDefaultValue" << " [result_file.csv]" << std::endl; return EXIT_FAILURE; } bool writeCSVFile = false; if (argc == 6) writeCSVFile = true; std::string original_fixed_image_filename( argv[1] ); std::string original_moving_image_filename( argv[2] ); std::string unwarped_image_filename( argv[3] ); std::string result_statistics_filename; if (writeCSVFile) result_statistics_filename = std::string( argv[5] ); const Int16ImageType::PixelType warpingDefaultValue = atoi( argv[4] ); std::cout << "warpingDefaultValue: " << warpingDefaultValue << std::endl; Int16ImageType::Pointer original_fixed_image = VolumeIOWrapper::readITKVolume( original_fixed_image_filename, ""); Int16ImageType::Pointer original_moving_image = VolumeIOWrapper::readITKVolume( original_moving_image_filename, ""); Int16ImageType::Pointer unwarped_image = VolumeIOWrapper::readITKVolume( unwarped_image_filename, ""); if (original_fixed_image->GetLargestPossibleRegion().GetSize() != unwarped_image->GetLargestPossibleRegion().GetSize()) { std::cerr << "images have to be of the same size!" << std::endl; return EXIT_FAILURE; } // HERE THE VALIDATION STARTS // values that lie above this threshold are clamped to this value // this should make the evaluation more robust, since large differences // are mismatches, no matter how large they are // in addition this makes it possible to deal with resampleDefaultValue // in the unwarped image! if we meet one of them, we simply assign // this clampThreshold to the difference const double intensity_difference_clamp_threshold = 100.0; itk::ImageRegionConstIterator fixed_it( original_fixed_image, original_fixed_image->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator moving_it( original_moving_image, original_moving_image->GetLargestPossibleRegion() ); itk::ImageRegionConstIterator unwarp_it( unwarped_image, unwarped_image->GetLargestPossibleRegion() ); // the metric values long numberOfMeanSquaredIntensityVoxels = 0L; long numberOfMeanSquaredIntensityVoxelsUnwarped = 0L; double mean_intensity_difference = 0.0; double std_intensity_difference = 0.0; double rms_intensity_difference = 0.0; double min_intensity_difference = vcl_numeric_limits::max(); double max_intensity_difference = vcl_numeric_limits::min(); double mean_intensity_difference_unwarped = 0.0; double std_intensity_difference_unwarped = 0.0; double rms_intensity_difference_unwarped = 0.0; double min_intensity_difference_unwarped = vcl_numeric_limits::max(); double max_intensity_difference_unwarped = vcl_numeric_limits::min(); while (!fixed_it.IsAtEnd()) { const Int16ImageType::PixelType fixedValue = fixed_it.Get(); const Int16ImageType::PixelType movingValue = moving_it.Get(); { // we ignore those values, where a synthetic transformation has // shown the warpingDefaultValue. these values can never be estimated // by a registration process if (fixedValue != warpingDefaultValue && movingValue != warpingDefaultValue) { const double difference = vcl_fabs( static_cast(fixedValue) - static_cast(movingValue) ); const double clamped_difference = (difference > intensity_difference_clamp_threshold) ? intensity_difference_clamp_threshold : difference; if (difference < min_intensity_difference) min_intensity_difference = difference; if (difference > max_intensity_difference) max_intensity_difference = difference; mean_intensity_difference += clamped_difference; rms_intensity_difference += vnl_math_sqr(clamped_difference); ++numberOfMeanSquaredIntensityVoxels; } } const Int16ImageType::PixelType unwarpedValue = unwarp_it.Get(); { // we ignore those values, where a synthetic transformation has // shown the warpingDefaultValue. these values can never be estimated // by a registration process if (fixedValue != warpingDefaultValue) { const double difference = vcl_fabs( static_cast(fixedValue) - static_cast(unwarpedValue) ); const double clamped_difference = (unwarpedValue == warpingDefaultValue || difference > intensity_difference_clamp_threshold) ? intensity_difference_clamp_threshold : difference; if (difference < min_intensity_difference_unwarped) min_intensity_difference_unwarped = difference; if (difference > max_intensity_difference_unwarped) max_intensity_difference_unwarped = difference; mean_intensity_difference_unwarped += clamped_difference; rms_intensity_difference_unwarped += vnl_math_sqr(clamped_difference); ++numberOfMeanSquaredIntensityVoxelsUnwarped; } } ++fixed_it; ++moving_it; ++unwarp_it; } // never change the calculations of these three numbers, the std depends // on this order! // we calculate intensity difference standard deviation using // running statistics! mean_intensity_difference /= (double) numberOfMeanSquaredIntensityVoxels; std_intensity_difference = sqrt(1.0 / ((double) (numberOfMeanSquaredIntensityVoxels - 1.0)) * (rms_intensity_difference - numberOfMeanSquaredIntensityVoxels * mean_intensity_difference * mean_intensity_difference)); rms_intensity_difference = sqrt( rms_intensity_difference / (double)numberOfMeanSquaredIntensityVoxels ); // never change the calculations of these three numbers, the std depends // on this order! // we calculate intensity difference standard deviation using running // statistics! mean_intensity_difference_unwarped /= (double) numberOfMeanSquaredIntensityVoxelsUnwarped; std_intensity_difference_unwarped = sqrt(1.0 / ((double) (numberOfMeanSquaredIntensityVoxelsUnwarped - 1.0)) * (rms_intensity_difference_unwarped - numberOfMeanSquaredIntensityVoxelsUnwarped * mean_intensity_difference_unwarped * mean_intensity_difference_unwarped)); rms_intensity_difference_unwarped = sqrt(rms_intensity_difference_unwarped / (double) numberOfMeanSquaredIntensityVoxelsUnwarped ); const double medianDeviationIntensity = calculateMedianAbsoluteDeviationIntensity(original_fixed_image, original_moving_image, warpingDefaultValue); const double medianDeviationIntensityUnwarped = calculateMedianAbsoluteDeviationIntensity(original_fixed_image, unwarped_image, warpingDefaultValue); const double robustMaxDeviationIntensity = calculateRobustMaxAbsoluteDeviationIntensity(original_fixed_image, original_moving_image, warpingDefaultValue); const double robustMaxDeviationIntensityUnwarped = calculateRobustMaxAbsoluteDeviationIntensity(original_fixed_image, unwarped_image, warpingDefaultValue); // edge overlap comparison const double edgeOverlap = calculateEdgeOverlapMeasure(original_fixed_image, original_moving_image, warpingDefaultValue); const double edgeOverlapUnwarped = calculateEdgeOverlapMeasure( original_fixed_image, unwarped_image, warpingDefaultValue); //const double diffCharacteristicMeasure = // calculateDifferentialCharacteristicMeasure(original_fixed_image, // original_moving_image, warpingDefaultValue); // print statistics: std::cout << "numberOfMeanSquaredIntensityVoxels: " << numberOfMeanSquaredIntensityVoxels << std::endl; std::cout << "Mean intensity difference: " << mean_intensity_difference << std::endl; std::cout << "STD intensity difference: " << std_intensity_difference << std::endl; std::cout << "RMS intensity difference: " << rms_intensity_difference << std::endl; //std::cout << "min intensity difference: " << min_intensity_difference << // std::endl; std::cout << "max intensity difference: " << max_intensity_difference << std::endl << std::endl; std::cout << "numberOfMeanSquaredIntensityVoxelsUnwarped: " << numberOfMeanSquaredIntensityVoxelsUnwarped << std::endl; std::cout << "Mean intensity difference unwarped: " << mean_intensity_difference_unwarped << std::endl; std::cout << "STD intensity difference unwarped: " << std_intensity_difference_unwarped << std::endl; std::cout << "RMS intensity difference unwarped: " << rms_intensity_difference_unwarped << std::endl; //std::cout << "min intensity difference unwarped: " << // min_intensity_difference_unwarped << std::endl; std::cout << "max intensity difference unwarped: " << max_intensity_difference_unwarped << std::endl << std::endl; // now for some mutual information statistics double nmi = 0.0; double nmi_unwarped = 0.0; calculateMutualInformationStatistics( original_fixed_image, original_moving_image, nmi ); calculateMutualInformationStatistics( original_fixed_image, unwarped_image, nmi_unwarped ); std::cout << "nmi before: " << nmi << std::endl; std::cout << "nmi after: " << nmi_unwarped << std::endl; if (writeCSVFile) { // open the result file std::cout << "trying to open file " << result_statistics_filename.c_str() << std::endl; std::ofstream result_file; try { result_file.open( result_statistics_filename.c_str(), std::fstream::app ); if (result_file.is_open()) { result_file << original_fixed_image_filename << ";" << original_moving_image_filename << ";" << rms_intensity_difference << ";" << std_intensity_difference << ";" << min_intensity_difference << ";" << robustMaxDeviationIntensity << ";" << nmi << ";" << medianDeviationIntensity << ";" << edgeOverlap << ";" << mean_intensity_difference << ";" << std::endl; result_file << original_fixed_image_filename << ";" << unwarped_image_filename << ";" << rms_intensity_difference_unwarped << ";" << std_intensity_difference_unwarped << ";" << min_intensity_difference_unwarped << ";" << robustMaxDeviationIntensityUnwarped << ";" << nmi_unwarped << ";" << medianDeviationIntensityUnwarped << ";" << edgeOverlapUnwarped << ";" << mean_intensity_difference_unwarped << ";" << std::endl; result_file.close(); } else { std::cerr << "file couldn't be opened!" << std::endl; } } catch( std::exception& ) { std::cerr << "file io exception!" << std::endl; } } return EXIT_SUCCESS; } /*typedef itk::NormalizedMutualInformationHistogramImageToImageMetric< Int16ImageType, Int16ImageType> MetricType; MetricType::Pointer metric = MetricType::New(); metric->SetFixedImage( original_fixed_image ); metric->SetMovingImage( original_moving_image ); itk::AffineTransform::Pointer transform = itk::AffineTransform::New(); transform->SetIdentity(); metric->SetTransform(transform); itk::LinearInterpolateImageFunction::Pointer interpolator = itk::LinearInterpolateImageFunction::New(); interpolator->SetInputImage( original_moving_image ); metric->SetInterpolator(interpolator); std::cout << "Mutual Information initial: " << metric->GetValue(transform->GetParameters()) << std::endl; interpolator->SetInputImage( unwarped_image ); metric->SetMovingImage( unwarped_image ); std::cout << "Mutual Information unwarped: " << metric->GetValue(transform->GetParameters()) << std::endl;*/