Add comments to attribute_source_camera.py

This commit is contained in:
Benjamin Loison 2024-05-13 02:48:22 +02:00
parent 226936020e
commit b2ffc20473
No known key found for this signature in database

View File

@ -12,6 +12,8 @@ sys.path.insert(0, '../../algorithms/distance/')
from rms_diff import rmsDiffNumpy
# Configuration:
DENOISER = Denoiser.MEAN
IMAGES_CAMERAS_FOLDER = {
'RAISE': 'flat-field/nef',
@ -23,11 +25,13 @@ PREDICT_ONLY_ON_WHOLE_TRAINING_SET = False
setting = ','.join([escapeFilePath(imageCameraFolder) for imageCameraFolder in IMAGES_CAMERAS_FOLDER]) + f'_{DENOISER}'
imagesCamerasFileNames = {camera: os.listdir(imageCameraFolder) for camera, imageCameraFolder in IMAGES_CAMERAS_FOLDER.items()}
# Fix randomness for reproducibility.
random.seed(0)
# To not have a bias (chronological for instance) when split to make training and testing sets.
# Randomize order to not have a bias (chronological for instance) when split to make training and testing sets.
for camera in IMAGES_CAMERAS_FOLDER:
random.shuffle(imagesCamerasFileNames[camera])
# Limit number of images per camera with the one having the less images.
minimumNumberOfImagesCameras = min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
for camera in IMAGES_CAMERAS_FOLDER:
imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras]
@ -36,6 +40,7 @@ for camera in IMAGES_CAMERAS_FOLDER:
numberOfCameras = len(IMAGES_CAMERAS_FOLDER)
camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOLDER}
# Compute the minimal color channel camera resolution.
# Assume that for each camera, its images have the same resolution.
# The following consider a given color channel resolution, assuming they all have the same resolution.
minimalColorChannelCameraResolution = None
@ -75,28 +80,40 @@ def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, c
return imagePrnuEstimateNpArray
imagesCamerasFilePaths = {camera: [f'{IMAGES_CAMERAS_FOLDER[camera]}/{imagesCamerasFileName}' for imagesCamerasFileName in imagesCamerasFileNames[camera]] for camera in imagesCamerasFileNames}
cameraColorMeans = {camera: getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) for camera in imagesCamerasFilePaths}
# If the denoiser is `MEAN`:
# If `PREDICT_ONLY_ON_WHOLE_TRAINING_SET`, then compute the means of camera images to empower the `MEAN` denoiser.
# Otherwise initialize these means of camera images to `iterativeMean`.
if DENOISER == Denoiser.MEAN:
cameraColorMeans = {camera: (getColorMeans(imagesCamerasFilePaths[camera][:numberOfTrainingImages], Color, minimalColorChannelCameraResolution) if PREDICT_ONLY_ON_WHOLE_TRAINING_SET else iterativeMean()) for camera in imagesCamerasFilePaths}
from utils import silentTqdm
#tqdm = silentTqdm
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
# 2 loops:
# - the first one is about computing `{min,max}Color`
# - the second one is about estimating better and better the PRNU of each camera, as consider more and more training images and measuring the resulting attribution of cameras
for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else []) + [False], 'Compute extremes'):
rescaleIfNeeded = returnSingleColorChannelImage if computeExtremes else rescaleRawImageForDenoiser
# As the second loop firstly compute noises of testing images.
# What about `MEAN` denoiser condition?
if not computeExtremes:
print(f'{minColor=} {maxColor=}')
print('Extracting noise of testing images')
for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
print(f'{camera=} {numberOfTrainingImages + cameraTestingImageIndex=}')
if DENOISER != Denoiser.MEAN or PREDICT_ONLY_ON_WHOLE_TRAINING_SET:
print('Extracting noise of testing images')
for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
print(f'{camera=} {numberOfTrainingImages + cameraTestingImageIndex=}')
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
imagePrnuEstimateNpArray = getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage, camera)
cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [imagePrnuEstimateNpArray]
cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [imagePrnuEstimateNpArray]
# As the first loop compute `{min,max}Color` based on testing images.
# As the second loop improve PRNU estimation by considering an additional testing image and if needed measure current learning step accuracy.
for cameraTrainingImageIndex in tqdm(range(minimumNumberOfImagesCameras if computeExtremes else numberOfTrainingImages), 'Camera training image index'):
for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
singleColorChannelImages = getSingleColorChannelImages(camera, cameraTrainingImageIndex)
@ -110,6 +127,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
cameraIterativeMean = camerasIterativeMean[camera]
cameraIterativeMean.add(imagePrnuEstimateNpArray)
# If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step.
if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1):
numberOfTrainingImagesAccuracy = 0
print(f'{numberOfTestingImages=} {numberOfCameras=}')
@ -118,6 +136,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
cameraPredicted = None
minimalDistance = None
#plt.imsave(f'{escapeFilePath(actualCamera)}_{cameraTestingImageIndex}.png', cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex])
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
for camera in IMAGES_CAMERAS_FOLDER:
distance = rmsDiffNumpy(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex], camerasIterativeMean[camera].mean)
@ -130,9 +149,11 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
numberOfTrainingImagesAccuracy += 1
accuracy += [numberOfTrainingImagesAccuracy / (numberOfTestingImages * numberOfCameras)]
# Save the estimated PRNU of each camera after having trained on the whole training set.
for camera in IMAGES_CAMERAS_FOLDER:
plt.imsave(f'{setting}_estimated_prnu_camera_{escapeFilePath(camera)}.png', (camerasIterativeMean[camera].mean))
# Plot and save the accuracy of camera source attribution thanks to a given number of images to estimate PRNUs with `DENOISER` denoiser.
plt.title(f'Accuracy of camera source attribution thanks to a given number of images to estimate PRNUs with {DENOISER} denoiser')
plt.xlabel('Number of images to estimate PRNU')
plt.ylabel('Accuracy of camera source attribution')