From dbfc2756b2b7dd17b7415b30a029185b1f716514 Mon Sep 17 00:00:00 2001 From: Benjamin Loison <12752145+Benjamin-Loison@users.noreply.github.com> Date: Mon, 13 May 2024 15:21:30 +0200 Subject: [PATCH] Remove commented code --- datasets/raise/attribute_source_camera.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/datasets/raise/attribute_source_camera.py b/datasets/raise/attribute_source_camera.py index 6ba705e..9690f62 100755 --- a/datasets/raise/attribute_source_camera.py +++ b/datasets/raise/attribute_source_camera.py @@ -74,7 +74,6 @@ def getMultipleColorsImage(singleColorChannelImages): return multipleColorsImage def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera): - #singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) if DENOISER != Denoiser.MEAN else (cameraColorMeans[camera][color] if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else (cameraColorMeans[camera][color].mean if )) for color in Color} singleColorChannelDenoisedImages = {} for color in Color: if DENOISER != Denoiser.MEAN: @@ -85,15 +84,6 @@ def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, c singleColorChannelDenoisedImage = cameraColorMean else: cameraColorCurrentMean = cameraColorMean.mean - if cameraColorCurrentMean is None: - print('`cameraColorCurrentMean` is `None`!') - exit(2) - ''' - if cameraColorCurrentMean is None: - singleColorChannelDenoisedImage = singleColorChannelImages[color] - else: - singleColorChannelDenoisedImage = cameraColorCurrentMean - ''' singleColorChannelDenoisedImage = cameraColorCurrentMean singleColorChannelDenoisedImages[color] = singleColorChannelDenoisedImage multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages) @@ -149,10 +139,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera) cameraIterativeMean = camerasIterativeMean[camera] - #print(f'{imagePrnuEstimateNpArray=}') - #exit(2) cameraIterativeMean.add(imagePrnuEstimateNpArray) - #continue # If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step. if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1): numberOfTrainingImagesAccuracy = 0 @@ -172,7 +159,6 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else multipleColorsImage = getMultipleColorsImage(singleColorChannelImages) cameraTestingImageNoise = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera) - #print(f'{cameraTestingImageNoise = }') distance = rmsDiffNumpy(cameraTestingImageNoise, camerasIterativeMean[camera].mean) print(f'{cameraTrainingImageIndex=} {cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}') if minimalDistance is None or distance < minimalDistance: