Compare commits

...

7 Commits

Author SHA1 Message Date
Benjamin Loison
36b17eb3cc
#62: Add benchmark_load_part_of_images.py 2024-05-13 17:52:49 +02:00
Benjamin Loison
fb0f78e069
Add the ability to hardcode resolution 2024-05-13 16:27:53 +02:00
Benjamin Loison
86093a40c0
Load training images to memory to ease implementation and make execution
faster
2024-05-13 16:26:58 +02:00
Benjamin Loison
54178c1101
#72: Verify actual data type pointer 2024-05-13 16:10:11 +02:00
Benjamin Loison
50058d5d2e
#72: Verify pointer before actually implementing the correct second choice 2024-05-13 15:55:59 +02:00
Benjamin Loison
dbfc2756b2
Remove commented code 2024-05-13 15:21:30 +02:00
Benjamin Loison
31704c6e78
#72: Implement, possibly incorrectly, first choice 2024-05-13 15:20:17 +02:00
2 changed files with 90 additions and 13 deletions

View File

@ -7,7 +7,6 @@ from utils import denoise, iterativeMean, getColorChannel, escapeFilePath, Color
import sys
import os
import random
import scipy
sys.path.insert(0, '../../algorithms/distance/')
@ -44,13 +43,14 @@ camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOL
# Compute the minimal color channel camera resolution.
# Assume that for each camera, its images have the same resolution.
# The following consider a given color channel resolution, assuming they all have the same resolution.
minimalColorChannelCameraResolution = None
for camera in IMAGES_CAMERAS_FOLDER:
imageFileName = imagesCamerasFileNames[camera][0]
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
minimalColorChannelCameraResolution = singleColorChannelImagesShape
minimalColorChannelCameraResolution = (100, 100)#None
if minimalColorChannelCameraResolution is None:
for camera in IMAGES_CAMERAS_FOLDER:
imageFileName = imagesCamerasFileNames[camera][0]
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
minimalColorChannelCameraResolution = singleColorChannelImagesShape
minColor = 0#None
maxColor = 7952#None
@ -75,7 +75,18 @@ def getMultipleColorsImage(singleColorChannelImages):
return multipleColorsImage
def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera):
singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) if DENOISER != Denoiser.MEAN else cameraColorMeans[camera][color] for color in Color}
singleColorChannelDenoisedImages = {}
for color in Color:
if DENOISER != Denoiser.MEAN:
singleColorChannelDenoisedImage = denoise(singleColorChannelImages[color], DENOISER)
else:
cameraColorMean = cameraColorMeans[camera][color]
if PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
singleColorChannelDenoisedImage = cameraColorMean
else:
cameraColorCurrentMean = cameraColorMean.mean
singleColorChannelDenoisedImage = cameraColorCurrentMean
singleColorChannelDenoisedImages[color] = singleColorChannelDenoisedImage
multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
return imagePrnuEstimateNpArray
@ -85,13 +96,24 @@ imagesCamerasFilePaths = {camera: [f'{IMAGES_CAMERAS_FOLDER[camera]}/{imagesCame
# If `PREDICT_ONLY_ON_WHOLE_TRAINING_SET`, then compute the means of camera images to empower the `MEAN` denoiser.
# Otherwise initialize these means of camera images to `iterativeMean`.
if DENOISER == Denoiser.MEAN:
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else iterativeMean()) for camera in imagesCamerasFilePaths}
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else {color: iterativeMean() for color in Color}) for camera in imagesCamerasFilePaths}
from utils import silentTqdm
#tqdm = silentTqdm
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
# Assume to have `{min,max}Color` hardcoded.
print('Load training images to memory')
rescaleIfNeeded = rescaleRawImageForDenoiser
cameraTrainingImages = {}
for cameraTrainingImageIndex in tqdm(range(numberOfTrainingImages), 'Camera training image index'):
for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
singleColorChannelImages = getSingleColorChannelImages(camera, cameraTrainingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
cameraTrainingImages[camera] = cameraTrainingImages.get(camera, []) + [multipleColorsImage]
print('Training images loaded to memory')
# 2 loops:
# - the first one is about computing `{min,max}Color`
# - the second one is about estimating better and better the PRNU of each camera, as consider more and more training images and measuring the resulting attribution of cameras
@ -123,10 +145,17 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
minColor, maxColor = updateExtremes(multipleColorsImage, minColor, maxColor)
continue
if DENOISER == Denoiser.MEAN:
for color in Color:
cameraColorMeans[camera][color].add(singleColorChannelImages[color])
imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
cameraIterativeMean = camerasIterativeMean[camera]
cameraIterativeMean.add(imagePrnuEstimateNpArray)
if DENOISER != Denoiser.MEAN or PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
cameraIterativeMean.add(imagePrnuEstimateNpArray)
else:
# Still use `cameraIterativeMean` to simplify the implementation.
cameraIterativeMean.mean = np.mean([cameraTrainingImage - mergeSingleColorChannelImagesAccordingToBayerFilter({color: cameraColorMeans[camera][color].mean for color in Color}) for cameraTrainingImage in cameraTrainingImages[camera][:cameraTrainingImageIndex + 1]], axis = 0)
# If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step.
if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1):
numberOfTrainingImagesAccuracy = 0
@ -139,8 +168,15 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
#plt.imsave(f'{escapeFilePath(actualCamera)}_{cameraTestingImageIndex}.png', cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex])
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
for camera in IMAGES_CAMERAS_FOLDER:
distance = abs(scipy.stats.pearsonr(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex].flatten(), camerasIterativeMean[camera].mean.flatten()).statistic - 1)
print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
if DENOISER != Denoiser.MEAN:
cameraTestingImageNoise = cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex]
else:
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
cameraTestingImageNoise = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
distance = rmsDiffNumpy(cameraTestingImageNoise, camerasIterativeMean[camera].mean)
print(f'{cameraTrainingImageIndex=} {cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
if minimalDistance is None or distance < minimalDistance:
minimalDistance = distance
cameraPredicted = camera

View File

@ -0,0 +1,41 @@
#!/usr/bin/env python
from utils import getColorChannel, Color
import os
from tqdm import tqdm
import numpy as np
from enum import Enum, auto
IMAGES_CAMERAS_FOLDER = {
'RAISE': 'flat-field/nef',
'Rafael 23/04/24': 'rafael/230424',
}
class Operation(Enum):
LOAD_RAW = auto()
SAVE = auto()
LOAD_NPY = auto()
OPERATION = Operation.LOAD_RAW
RESOLUTION = 100
for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
imagesCameraFolder = IMAGES_CAMERAS_FOLDER[camera]
for file in tqdm(os.listdir(imagesCameraFolder), 'Image'):
if file.endswith('.NEF'):
#print(file)
imageFilePath = f'{imagesCameraFolder}/{file}'
numpyFilePath = f'{imageFilePath}.npy'
for color in Color:
if OPERATION in [Operation.LOAD_RAW, Operation.SAVE]:
rawColorChannel = getColorChannel(imageFilePath, color)
if OPERATION == Operation.SAVE:
np.save(numpyFilePath, {color: rawColorChannel})
if OPERATION == Operation.LOAD_NPY:
rawColorChannel = np.load(numpyFilePath, allow_pickle = True)
#print(type(rawColorChannel))
#print(rawColorChannel)
#print(rawColorChannel.shape)
print(rawColorChannel.item()[color].mean())
break
break