WIP (#63)
This commit is contained in:
		@@ -27,30 +27,42 @@ random.seed(0)
 | 
			
		||||
for camera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
    random.shuffle(imagesCamerasFileNames[camera])
 | 
			
		||||
 | 
			
		||||
minimumNumberOfImagesCameras = 10#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
 | 
			
		||||
minimumNumberOfImagesCameras = 4#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER])
 | 
			
		||||
for camera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
    IMAGES_CAMERAS_FOLDER[camera] = IMAGES_CAMERAS_FOLDER[camera][:minimumNumberOfImagesCameras]
 | 
			
		||||
    imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras]
 | 
			
		||||
 | 
			
		||||
numberOfCameras = len(IMAGES_CAMERAS_FOLDER)
 | 
			
		||||
camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOLDER}
 | 
			
		||||
 | 
			
		||||
minColor = None
 | 
			
		||||
maxColor = None
 | 
			
		||||
# Assume that for each camera, its images have the same resolution.
 | 
			
		||||
# The following consider a given color channel resolution, assuming they all have the same resolution.
 | 
			
		||||
minimalColorChannelCameraResolution = None
 | 
			
		||||
for camera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
    imageFileName = imagesCamerasFileNames[camera][0]
 | 
			
		||||
    imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
 | 
			
		||||
    singleColorChannelImagesShape = getColorChannel(imageFilePath, Color.RED).shape
 | 
			
		||||
    #print(singleColorChannelImagesShape)
 | 
			
		||||
    if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution:
 | 
			
		||||
        minimalColorChannelCameraResolution = singleColorChannelImagesShape
 | 
			
		||||
#print(minimalColorChannelCameraResolution)
 | 
			
		||||
#exit(1)
 | 
			
		||||
 | 
			
		||||
accuracy = []
 | 
			
		||||
numberOfTrainingImages = int(minimumNumberOfImagesCameras * TRAINING_PORTION)
 | 
			
		||||
numberOfTestingImages = minimumNumberOfImagesCameras - int(minimumNumberOfImagesCameras * TRAINING_PORTION)
 | 
			
		||||
cameraTestingImagesNoise = {}#{camera: [] for camera in IMAGES_CAMERAS_FOLDER}
 | 
			
		||||
cameraTestingImagesNoise = {}
 | 
			
		||||
 | 
			
		||||
returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage
 | 
			
		||||
 | 
			
		||||
for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else []) + [False], 'Compute extremes'):
 | 
			
		||||
    rescaleIfNeeded = returnSingleColorChannelImage if computeExtremes else rescaleRawImageForDenoiser
 | 
			
		||||
    for cameraTrainingImageIndex in tqdm(range(minimumNumberOfImagesCameras if computeExtremes else numberOfTrainingImages), 'Camera training image index'):
 | 
			
		||||
        for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
 | 
			
		||||
        for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')):
 | 
			
		||||
            imageFileName = imagesCamerasFileNames[camera][cameraTrainingImageIndex]
 | 
			
		||||
            imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
 | 
			
		||||
            singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color), minColor, maxColor) for color in Color}
 | 
			
		||||
            singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color)[:minimalColorChannelCameraResolution[0],:minimalColorChannelCameraResolution[1]], minColor, maxColor) for color in Color}
 | 
			
		||||
            multipleColorsImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelImages)
 | 
			
		||||
 | 
			
		||||
            if computeExtremes:
 | 
			
		||||
@@ -64,29 +76,41 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else
 | 
			
		||||
            cameraIterativeMean = camerasIterativeMean[camera]
 | 
			
		||||
            cameraIterativeMean.add(imagePrnuEstimateNpArray)
 | 
			
		||||
            if cameraIndex == numberOfCameras - 1:
 | 
			
		||||
                for cameraTestingImageIndex in tqdm(range(numberOfTrainingImages), 'Camera testing image index'):
 | 
			
		||||
                    singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color), minColor, maxColor) for color in Color}
 | 
			
		||||
                    multipleColorsImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelImages)
 | 
			
		||||
                    singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) for color in Color}
 | 
			
		||||
                    multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
 | 
			
		||||
                    rms = rmsDiffNumpy(subgroupIterativeMean.mean, subgroupsIterativeMean[1 - cameraIndex].mean)
 | 
			
		||||
                accuracy += [rms]
 | 
			
		||||
                numberOfTrainingImagesAccuracy = 0
 | 
			
		||||
                # Loop over each camera testing image folder.
 | 
			
		||||
                for actualCamera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
                    for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
 | 
			
		||||
                        cameraPredicted = None
 | 
			
		||||
                        minimalDistance = None
 | 
			
		||||
                        # Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras.
 | 
			
		||||
                        for camera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
                            distance = rmsDiffNumpy(cameraTestingImagesNoise[camera][cameraTestingImageIndex], camerasIterativeMean[camera].mean)
 | 
			
		||||
                            print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}')
 | 
			
		||||
                            if minimalDistance is None or distance < minimalDistance:
 | 
			
		||||
                                minimalDistance = distance
 | 
			
		||||
                                cameraPredicted = camera
 | 
			
		||||
                        if cameraPredicted == actualCamera:
 | 
			
		||||
                            numberOfTrainingImagesAccuracy += 1
 | 
			
		||||
                accuracy += [numberOfTrainingImagesAccuracy / (numberOfTestingImages * numberOfCameras)]
 | 
			
		||||
    if computeExtremes:
 | 
			
		||||
        print(f'{minColor=} {maxColor=}')
 | 
			
		||||
        print('Extracting noise of testing images')
 | 
			
		||||
        for camera in tqdm(IMAGES_CAMERAS_FOLDER, 'Camera'):
 | 
			
		||||
            for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'):
 | 
			
		||||
                imageFilePath = 
 | 
			
		||||
                singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color), minColor, maxColor) for color in Color}
 | 
			
		||||
                # Should make a function
 | 
			
		||||
                imageFileName = imagesCamerasFileNames[camera][numberOfTrainingImages + cameraTestingImageIndex]
 | 
			
		||||
                imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}'
 | 
			
		||||
                
 | 
			
		||||
                # Should make a function
 | 
			
		||||
                singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color)[:minimalColorChannelCameraResolution[0],:minimalColorChannelCameraResolution[1]], minColor, maxColor) for color in Color}
 | 
			
		||||
                multipleColorsImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelImages)
 | 
			
		||||
                singleColorChannelDenoisedImages = {color: denoise(singleColorChannelImages[color], DENOISER) for color in Color}
 | 
			
		||||
                multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages)
 | 
			
		||||
                imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage
 | 
			
		||||
 | 
			
		||||
                cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [multipleColorsDenoisedImage]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
for camera in range(IMAGES_CAMERAS_FOLDER):
 | 
			
		||||
for camera in IMAGES_CAMERAS_FOLDER:
 | 
			
		||||
    plt.imsave(f'{setting}_estimated_prnu_subgroup_{escapeFilePath(camera)}.png', (camerasIterativeMean[camera].mean))
 | 
			
		||||
 | 
			
		||||
plt.title(f'Accuracy of camera source attribution thanks to a given number of images to estimate PRNUs with {DENOISER} denoiser')
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user