Compare commits

..

2 Commits

Author SHA1 Message Date
Benjamin Loison
53c3935bab
Clean some debugging 2024-04-30 06:48:17 +02:00
Benjamin Loison
7a807b91d8
#63: Add debugging 2024-04-30 06:45:28 +02:00

View File

@ -27,15 +27,14 @@ random.seed(0)
for camera in IMAGES_CAMERAS_FOLDER:
random.shuffle(imagesCamerasFileNames[camera])
minimumNumberOfImagesCameras = 4#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
minimumNumberOfImagesCameras = 16#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
for camera in IMAGES_CAMERAS_FOLDER:
imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras]
print(camera, imagesCamerasFileNames[camera])
numberOfCameras = len(IMAGES_CAMERAS_FOLDER)
camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOLDER}
minColor = None
maxColor = None
# Assume that for each camera, its images have the same resolution.
# The following consider a given color channel resolution, assuming they all have the same resolution.
minimalColorChannelCameraResolution = None
@ -46,14 +45,17 @@ for camera in IMAGES_CAMERAS_FOLDER:
if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
minimalColorChannelCameraResolution = singleColorChannelImagesShape
minColor = 13#None
maxColor = 7497#None
minColor = None#13#None
maxColor = None#7497#None
accuracy = []
numberOfTrainingImages = int(minimumNumberOfImagesCameras * TRAINING_PORTION)
numberOfTestingImages = minimumNumberOfImagesCameras - int(minimumNumberOfImagesCameras * TRAINING_PORTION)
cameraTestingImagesNoise = {}
from utils import silentTqdm
#tqdm = silentTqdm
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else []) + [False], 'Compute extremes'):
@ -66,6 +68,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
# Should make a function
imageFileName = imagesCamerasFileNames[camera][numberOfTrainingImages + cameraTestingImageIndex]
imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
print(f'{imageFilePath=}')
# Should make a function
singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color)[:minimalColorChannelCameraResolution[0],:minimalColorChannelCameraResolution[1]], minColor, maxColor) for color in Color}
@ -74,7 +77,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [multipleColorsDenoisedImage]
cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [imagePrnuEstimateNpArray]
for cameraTrainingImageIndex in tqdm(range(minimumNumberOfImagesCameras if computeExtremes else numberOfTrainingImages), 'Camera training image index'):
for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
imageFileName = imagesCamerasFileNames[camera][cameraTrainingImageIndex]
@ -94,6 +97,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
cameraIterativeMean.add(imagePrnuEstimateNpArray)
if cameraIndex == numberOfCameras - 1:
numberOfTrainingImagesAccuracy = 0
print(f'{numberOfTestingImages=} {numberOfCameras=}')
# Loop over each camera testing image folder.
for actualCamera in IMAGES_CAMERAS_FOLDER:
for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
@ -101,17 +105,18 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
minimalDistance = None
# Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
for camera in IMAGES_CAMERAS_FOLDER:
distance = rmsDiffNumpy(cameraTestingImagesNoise[camera][cameraTestingImageIndex], camerasIterativeMean[camera].mean)
distance = rmsDiffNumpy(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex], camerasIterativeMean[camera].mean)
print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
if minimalDistance is None or distance < minimalDistance:
minimalDistance = distance
cameraPredicted = camera
print(f'Predicted camera {cameraPredicted} {"good" if cameraPredicted == actualCamera else "bad"}')
if cameraPredicted == actualCamera:
numberOfTrainingImagesAccuracy += 1
accuracy += [numberOfTrainingImagesAccuracy / (numberOfTestingImages * numberOfCameras)]
for camera in IMAGES_CAMERAS_FOLDER:
plt.imsave(f'{setting}_estimated_prnu_subgroup_{escapeFilePath(camera)}.png', (camerasIterativeMean[camera].mean))
plt.imsave(f'{setting}_estimated_prnu_camera_{escapeFilePath(camera)}.png', (camerasIterativeMean[camera].mean))
plt.title(f'Accuracy of camera source attribution thanks to a given number of images to estimate PRNUs with {DENOISER} denoiser')
plt.xlabel('Number of images to estimate PRNU')