#72: Implement, possibly incorrectly, first choice
This commit is contained in:
parent
5e6702e007
commit
31704c6e78
@ -7,7 +7,6 @@ from utils import denoise, iterativeMean, getColorChannel, escapeFilePath, Color
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import scipy
|
||||
|
||||
sys.path.insert(0, '../../algorithms/distance/')
|
||||
|
||||
@ -75,7 +74,28 @@ def getMultipleColorsImage(singleColorChannelImages):
|
||||
return multipleColorsImage
|
||||
|
||||
def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera):
|
||||
singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) if DENOISER != Denoiser.MEAN else cameraColorMeans[camera][color] for color in Color}
|
||||
#singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) if DENOISER != Denoiser.MEAN else (cameraColorMeans[camera][color] if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else (cameraColorMeans[camera][color].mean if )) for color in Color}
|
||||
singleColorChannelDenoisedImages = {}
|
||||
for color in Color:
|
||||
if DENOISER != Denoiser.MEAN:
|
||||
singleColorChannelDenoisedImage = denoise(singleColorChannelImages[color], DENOISER)
|
||||
else:
|
||||
cameraColorMean = cameraColorMeans[camera][color]
|
||||
if PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
|
||||
singleColorChannelDenoisedImage = cameraColorMean
|
||||
else:
|
||||
cameraColorCurrentMean = cameraColorMean.mean
|
||||
if cameraColorCurrentMean is None:
|
||||
print('`cameraColorCurrentMean` is `None`!')
|
||||
exit(2)
|
||||
'''
|
||||
if cameraColorCurrentMean is None:
|
||||
singleColorChannelDenoisedImage = singleColorChannelImages[color]
|
||||
else:
|
||||
singleColorChannelDenoisedImage = cameraColorCurrentMean
|
||||
'''
|
||||
singleColorChannelDenoisedImage = cameraColorCurrentMean
|
||||
singleColorChannelDenoisedImages[color] = singleColorChannelDenoisedImage
|
||||
multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
|
||||
imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
|
||||
return imagePrnuEstimateNpArray
|
||||
@ -85,7 +105,7 @@ imagesCamerasFilePaths = {camera: [f'{IMAGES_CAMERAS_FOLDER[camera]}/{imagesCame
|
||||
# If `PREDICT_ONLY_ON_WHOLE_TRAINING_SET`, then compute the means of camera images to empower the `MEAN` denoiser.
|
||||
# Otherwise initialize these means of camera images to `iterativeMean`.
|
||||
if DENOISER == Denoiser.MEAN:
|
||||
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else iterativeMean()) for camera in imagesCamerasFilePaths}
|
||||
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else {color: iterativeMean() for color in Color}) for camera in imagesCamerasFilePaths}
|
||||
|
||||
from utils import silentTqdm
|
||||
#tqdm = silentTqdm
|
||||
@ -123,10 +143,16 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
|
||||
minColor, maxColor = updateExtremes(multipleColorsImage, minColor, maxColor)
|
||||
continue
|
||||
|
||||
if DENOISER == Denoiser.MEAN:
|
||||
for color in Color:
|
||||
cameraColorMeans[camera][color].add(singleColorChannelImages[color])
|
||||
imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
|
||||
|
||||
cameraIterativeMean = camerasIterativeMean[camera]
|
||||
#print(f'{imagePrnuEstimateNpArray=}')
|
||||
#exit(2)
|
||||
cameraIterativeMean.add(imagePrnuEstimateNpArray)
|
||||
#continue
|
||||
# If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step.
|
||||
if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1):
|
||||
numberOfTrainingImagesAccuracy = 0
|
||||
@ -139,8 +165,16 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
|
||||
#plt.imsave(f'{escapeFilePath(actualCamera)}_{cameraTestingImageIndex}.png', cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex])
|
||||
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
|
||||
for camera in IMAGES_CAMERAS_FOLDER:
|
||||
distance = abs(scipy.stats.pearsonr(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex].flatten(), camerasIterativeMean[camera].mean.flatten()).statistic - 1)
|
||||
print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
|
||||
if DENOISER != Denoiser.MEAN:
|
||||
cameraTestingImageNoise = cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex]
|
||||
else:
|
||||
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
|
||||
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
|
||||
cameraTestingImageNoise = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
|
||||
|
||||
#print(f'{cameraTestingImageNoise = }')
|
||||
distance = rmsDiffNumpy(cameraTestingImageNoise, camerasIterativeMean[camera].mean)
|
||||
print(f'{cameraTrainingImageIndex=} {cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
|
||||
if minimalDistance is None or distance < minimalDistance:
|
||||
minimalDistance = distance
|
||||
cameraPredicted = camera
|
||||
|
Loading…
x
Reference in New Issue
Block a user