Compare commits

..

No commits in common. "36b17eb3cc94b3ed7d04257d7a31d5b64612e7b9" and "5e6702e007b708972421b356a5bed228fd530c04" have entirely different histories.

2 changed files with 13 additions and 90 deletions

View File

@ -7,6 +7,7 @@ from utils import denoise, iterativeMean, getColorChannel, escapeFilePath, Color
import sys
import os
import random
import scipy
sys.path.insert(0, '../../algorithms/distance/')
@ -43,14 +44,13 @@ camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOL
# Compute the minimal color channel camera resolution.
# Assume that for each camera, its images have the same resolution.
# The following consider a given color channel resolution, assuming they all have the same resolution.
minimalColorChannelCameraResolution = (100, 100)#None
if minimalColorChannelCameraResolution is None:
for camera in IMAGES_CAMERAS_FOLDER:
imageFileName = imagesCamerasFileNames[camera][0]
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
minimalColorChannelCameraResolution = singleColorChannelImagesShape
minimalColorChannelCameraResolution = None
for camera in IMAGES_CAMERAS_FOLDER:
imageFileName = imagesCamerasFileNames[camera][0]
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
minimalColorChannelCameraResolution = singleColorChannelImagesShape
minColor = 0#None
maxColor = 7952#None
@ -75,18 +75,7 @@ def getMultipleColorsImage(singleColorChannelImages):
return multipleColorsImage
def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera):
singleColorChannelDenoisedImages = {}
for color in Color:
if DENOISER != Denoiser.MEAN:
singleColorChannelDenoisedImage = denoise(singleColorChannelImages[color], DENOISER)
else:
cameraColorMean = cameraColorMeans[camera][color]
if PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
singleColorChannelDenoisedImage = cameraColorMean
else:
cameraColorCurrentMean = cameraColorMean.mean
singleColorChannelDenoisedImage = cameraColorCurrentMean
singleColorChannelDenoisedImages[color] = singleColorChannelDenoisedImage
singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) if DENOISER != Denoiser.MEAN else cameraColorMeans[camera][color] for color in Color}
multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
return imagePrnuEstimateNpArray
@ -96,24 +85,13 @@ imagesCamerasFilePaths = {camera: [f'{IMAGES_CAMERAS_FOLDER[camera]}/{imagesCame
# If `PREDICT_ONLY_ON_WHOLE_TRAINING_SET`, then compute the means of camera images to empower the `MEAN` denoiser.
# Otherwise initialize these means of camera images to `iterativeMean`.
if DENOISER == Denoiser.MEAN:
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else {color: iterativeMean() for color in Color}) for camera in imagesCamerasFilePaths}
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else iterativeMean()) for camera in imagesCamerasFilePaths}
from utils import silentTqdm
#tqdm = silentTqdm
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
# Assume to have `{min,max}Color` hardcoded.
print('Load training images to memory')
rescaleIfNeeded = rescaleRawImageForDenoiser
cameraTrainingImages = {}
for cameraTrainingImageIndex in tqdm(range(numberOfTrainingImages), 'Camera training image index'):
for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
singleColorChannelImages = getSingleColorChannelImages(camera, cameraTrainingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
cameraTrainingImages[camera] = cameraTrainingImages.get(camera, []) + [multipleColorsImage]
print('Training images loaded to memory')
# 2 loops:
# - the first one is about computing `{min,max}Color`
# - the second one is about estimating better and better the PRNU of each camera, as consider more and more training images and measuring the resulting attribution of cameras
@ -145,17 +123,10 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
minColor, maxColor = updateExtremes(multipleColorsImage, minColor, maxColor)
continue
if DENOISER == Denoiser.MEAN:
for color in Color:
cameraColorMeans[camera][color].add(singleColorChannelImages[color])
imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
cameraIterativeMean = camerasIterativeMean[camera]
if DENOISER != Denoiser.MEAN or PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
cameraIterativeMean.add(imagePrnuEstimateNpArray)
else:
# Still use `cameraIterativeMean` to simplify the implementation.
cameraIterativeMean.mean = np.mean([cameraTrainingImage - mergeSingleColorChannelImagesAccordingToBayerFilter({color: cameraColorMeans[camera][color].mean for color in Color}) for cameraTrainingImage in cameraTrainingImages[camera][:cameraTrainingImageIndex + 1]], axis = 0)
cameraIterativeMean.add(imagePrnuEstimateNpArray)
# If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step.
if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1):
numberOfTrainingImagesAccuracy = 0
@ -168,15 +139,8 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
#plt.imsave(f'{escapeFilePath(actualCamera)}_{cameraTestingImageIndex}.png', cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex])
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
for camera in IMAGES_CAMERAS_FOLDER:
if DENOISER != Denoiser.MEAN:
cameraTestingImageNoise = cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex]
else:
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
cameraTestingImageNoise = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
distance = rmsDiffNumpy(cameraTestingImageNoise, camerasIterativeMean[camera].mean)
print(f'{cameraTrainingImageIndex=} {cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
distance = abs(scipy.stats.pearsonr(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex].flatten(), camerasIterativeMean[camera].mean.flatten()).statistic - 1)
print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
if minimalDistance is None or distance < minimalDistance:
minimalDistance = distance
cameraPredicted = camera

View File

@ -1,41 +0,0 @@
#!/usr/bin/env python
from utils import getColorChannel, Color
import os
from tqdm import tqdm
import numpy as np
from enum import Enum, auto
IMAGES_CAMERAS_FOLDER = {
'RAISE': 'flat-field/nef',
'Rafael 23/04/24': 'rafael/230424',
}
class Operation(Enum):
LOAD_RAW = auto()
SAVE = auto()
LOAD_NPY = auto()
OPERATION = Operation.LOAD_RAW
RESOLUTION = 100
for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
imagesCameraFolder = IMAGES_CAMERAS_FOLDER[camera]
for file in tqdm(os.listdir(imagesCameraFolder), 'Image'):
if file.endswith('.NEF'):
#print(file)
imageFilePath = f'{imagesCameraFolder}/{file}'
numpyFilePath = f'{imageFilePath}.npy'
for color in Color:
if OPERATION in [Operation.LOAD_RAW, Operation.SAVE]:
rawColorChannel = getColorChannel(imageFilePath, color)
if OPERATION == Operation.SAVE:
np.save(numpyFilePath, {color: rawColorChannel})
if OPERATION == Operation.LOAD_NPY:
rawColorChannel = np.load(numpyFilePath, allow_pickle = True)
#print(type(rawColorChannel))
#print(rawColorChannel)
#print(rawColorChannel.shape)
print(rawColorChannel.item()[color].mean())
break
break