139 lines
7.5 KiB
Python
Executable File
139 lines
7.5 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
from tqdm import tqdm
|
|
from utils import denoise, iterativeMean, getColorChannel, escapeFilePath, Color, mergeSingleColorChannelImagesAccordingToBayerFilter, rescaleRawImageForDenoiser, updateExtremes, saveNpArray
|
|
import sys
|
|
import os
|
|
import random
|
|
|
|
sys.path.insert(0, '../../algorithms/distance/')
|
|
|
|
from rms_diff import rmsDiffNumpy
|
|
|
|
DENOISER = 'wavelet'
|
|
IMAGES_CAMERAS_FOLDER = {
|
|
'RAISE': 'flat-field/nef',
|
|
'Rafael 23/04/24': 'rafael/230424',
|
|
}
|
|
TRAINING_PORTION = 0.5
|
|
|
|
setting = ','.join([escapeFilePath(imageCameraFolder) for imageCameraFolder in IMAGES_CAMERAS_FOLDER]) + f'_{DENOISER}'
|
|
|
|
imagesCamerasFileNames = {camera: os.listdir(imageCameraFolder) for camera, imageCameraFolder in IMAGES_CAMERAS_FOLDER.items()}
|
|
random.seed(0)
|
|
# To not have a bias (chronological for instance) when split to make training and testing sets.
|
|
for camera in IMAGES_CAMERAS_FOLDER:
|
|
random.shuffle(imagesCamerasFileNames[camera])
|
|
|
|
minimumNumberOfImagesCameras = 16#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
|
|
for camera in IMAGES_CAMERAS_FOLDER:
|
|
imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras]
|
|
print(camera, imagesCamerasFileNames[camera])
|
|
|
|
numberOfCameras = len(IMAGES_CAMERAS_FOLDER)
|
|
camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOLDER}
|
|
|
|
# Assume that for each camera, its images have the same resolution.
|
|
# The following consider a given color channel resolution, assuming they all have the same resolution.
|
|
minimalColorChannelCameraResolution = None
|
|
for camera in IMAGES_CAMERAS_FOLDER:
|
|
imageFileName = imagesCamerasFileNames[camera][0]
|
|
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
|
|
singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
|
|
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
|
|
minimalColorChannelCameraResolution = singleColorChannelImagesShape
|
|
|
|
minColor = None#13#None
|
|
maxColor = None#7497#None
|
|
|
|
accuracy = []
|
|
numberOfTrainingImages = int(minimumNumberOfImagesCameras * TRAINING_PORTION)
|
|
numberOfTestingImages = minimumNumberOfImagesCameras - int(minimumNumberOfImagesCameras * TRAINING_PORTION)
|
|
cameraTestingImagesNoise = {}
|
|
|
|
def getImageFilePath(camera, cameraImageIndex):
|
|
imageFileName = imagesCamerasFileNames[camera][cameraImageIndex]
|
|
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
|
|
return imageFilePath
|
|
|
|
def getSingleColorChannelImages(camera, cameraImageIndex):
|
|
imageFilePath = getImageFilePath(camera, cameraImageIndex)
|
|
singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color)[:minimalColorChannelCameraResolution[0],:minimalColorChannelCameraResolution[1]], minColor, maxColor) for color in Color}
|
|
return singleColorChannelImages
|
|
|
|
def getMultipleColorsImage(singleColorChannelImages):
|
|
multipleColorsImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelImages)
|
|
return multipleColorsImage
|
|
|
|
def getImagePrnuEstimateNpArray(singleColorChannelImages, multipleColorsImage):
|
|
singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) for color in Color}
|
|
multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
|
|
imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
|
|
return imagePrnuEstimateNpArray
|
|
|
|
from utils import silentTqdm
|
|
#tqdm = silentTqdm
|
|
|
|
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
|
|
|
|
for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else []) + [False], 'Compute extremes'):
|
|
rescaleIfNeeded = returnSingleColorChannelImage if computeExtremes else rescaleRawImageForDenoiser
|
|
if not computeExtremes:
|
|
print(f'{minColor=} {maxColor=}')
|
|
print('Extracting noise of testing images')
|
|
for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
|
|
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
|
|
print(f'{camera=} {numberOfTrainingImages + cameraTestingImageIndex=}')
|
|
|
|
singleColorChannelImages = getSingleColorChannelImages(camera, numberOfTrainingImages + cameraTestingImageIndex)
|
|
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
|
|
|
|
imagePrnuEstimateNpArray = getImagePrnuEstimatedNpArray(singleColorChannelImages, multipleColorsImage)
|
|
|
|
cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [imagePrnuEstimateNpArray]
|
|
for cameraTrainingImageIndex in tqdm(range(minimumNumberOfImagesCameras if computeExtremes else numberOfTrainingImages), 'Camera training image index'):
|
|
for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
|
|
singleColorChannelImages = getSingleColorChannelImages(camera, cameraTrainingImageIndex)
|
|
multipleColorsImage = getMultipleColorsImage(singleColorChannelImages)
|
|
|
|
if computeExtremes:
|
|
minColor, maxColor = updateExtremes(multipleColorsImage, minColor, maxColor)
|
|
continue
|
|
|
|
imagePrnuEstimateNpArray = getImagePrnuEstimatedNpArray(singleColorChannelImages, multipleColorsImage)
|
|
|
|
cameraIterativeMean = camerasIterativeMean[camera]
|
|
cameraIterativeMean.add(imagePrnuEstimateNpArray)
|
|
if cameraIndex == numberOfCameras - 1:
|
|
numberOfTrainingImagesAccuracy = 0
|
|
print(f'{numberOfTestingImages=} {numberOfCameras=}')
|
|
# Loop over each camera testing image folder.
|
|
for actualCamera in IMAGES_CAMERAS_FOLDER:
|
|
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
|
|
cameraPredicted = None
|
|
minimalDistance = None
|
|
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
|
|
for camera in IMAGES_CAMERAS_FOLDER:
|
|
distance = rmsDiffNumpy(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex], camerasIterativeMean[camera].mean)
|
|
print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
|
|
if minimalDistance is None or distance < minimalDistance:
|
|
minimalDistance = distance
|
|
cameraPredicted = camera
|
|
print(f'Predicted camera {cameraPredicted} {"good" if cameraPredicted == actualCamera else "bad"}')
|
|
if cameraPredicted == actualCamera:
|
|
numberOfTrainingImagesAccuracy += 1
|
|
accuracy += [numberOfTrainingImagesAccuracy / (numberOfTestingImages * numberOfCameras)]
|
|
|
|
for camera in IMAGES_CAMERAS_FOLDER:
|
|
plt.imsave(f'{setting}_estimated_prnu_camera_{escapeFilePath(camera)}.png', (camerasIterativeMean[camera].mean))
|
|
|
|
plt.title(f'Accuracy of camera source attribution thanks to a given number of images to estimate PRNUs with {DENOISER} denoiser')
|
|
plt.xlabel('Number of images to estimate PRNU')
|
|
plt.ylabel('Accuracy of camera source attribution')
|
|
plt.plot(accuracy)
|
|
internalTitle = f'{setting}_accuracy_of_camera_source_attribution'
|
|
saveNpArray(internalTitle, accuracy)
|
|
plt.savefig(f'{internalTitle}.svg')
|