diff --git a/datasets/raise/attribute_source_camera.py b/datasets/raise/attribute_source_camera.py index 3ecc75b..3e5aeaf 100755 --- a/datasets/raise/attribute_source_camera.py +++ b/datasets/raise/attribute_source_camera.py @@ -27,15 +27,14 @@ random.seed(0) for camera in IMAGES_CAMERAS_FOLDER: random.shuffle(imagesCamerasFileNames[camera]) -minimumNumberOfImagesCameras = 4#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER]) +minimumNumberOfImagesCameras = 16#min([len(imagesCamerasFileNames[camera]) for camera in IMAGES_CAMERAS_FOLDER]) for camera in IMAGES_CAMERAS_FOLDER: - imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras] + imagesCamerasFileNames[camera] = imagesCamerasFileNames[camera][:minimumNumberOfImagesCameras]#[imagesCamerasFileNames[camera][0]] * minimumNumberOfImagesCameras#[:minimumNumberOfImagesCameras] + print(camera, imagesCamerasFileNames[camera]) numberOfCameras = len(IMAGES_CAMERAS_FOLDER) camerasIterativeMean = {camera: iterativeMean() for camera in IMAGES_CAMERAS_FOLDER} -minColor = None -maxColor = None # Assume that for each camera, its images have the same resolution. # The following consider a given color channel resolution, assuming they all have the same resolution. minimalColorChannelCameraResolution = None @@ -46,14 +45,17 @@ for camera in IMAGES_CAMERAS_FOLDER: if minimalColorChannelCameraResolution is None or singleColorChannelImagesShape < minimalColorChannelCameraResolution: minimalColorChannelCameraResolution = singleColorChannelImagesShape -minColor = 13#None -maxColor = 7497#None +minColor = None#13#None +maxColor = None#7497#None accuracy = [] numberOfTrainingImages = int(minimumNumberOfImagesCameras * TRAINING_PORTION) numberOfTestingImages = minimumNumberOfImagesCameras - int(minimumNumberOfImagesCameras * TRAINING_PORTION) cameraTestingImagesNoise = {} +from utils import silentTqdm +#tqdm = silentTqdm + returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else []) + [False], 'Compute extremes'): @@ -66,6 +68,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else # Should make a function imageFileName = imagesCamerasFileNames[camera][numberOfTrainingImages + cameraTestingImageIndex] imageFilePath = f'{IMAGES_CAMERAS_FOLDER[camera]}/{imageFileName}' + print(f'{imageFilePath=}') # Should make a function singleColorChannelImages = {color: rescaleIfNeeded(getColorChannel(imageFilePath, color)[:minimalColorChannelCameraResolution[0],:minimalColorChannelCameraResolution[1]], minColor, maxColor) for color in Color} @@ -74,7 +77,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages) imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage - cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [multipleColorsDenoisedImage] + cameraTestingImagesNoise[camera] = cameraTestingImagesNoise.get(camera, []) + [imagePrnuEstimateNpArray]#multipleColorsDenoisedImage] for cameraTrainingImageIndex in tqdm(range(minimumNumberOfImagesCameras if computeExtremes else numberOfTrainingImages), 'Camera training image index'): for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')): imageFileName = imagesCamerasFileNames[camera][cameraTrainingImageIndex] @@ -90,10 +93,16 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else multipleColorsDenoisedImage = mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelDenoisedImages) imagePrnuEstimateNpArray = multipleColorsImage - multipleColorsDenoisedImage + #print(camera) + #plt.imshow(multipleColorsImage) + #plt.show() + #exit(1) cameraIterativeMean = camerasIterativeMean[camera] cameraIterativeMean.add(imagePrnuEstimateNpArray) + plt.imsave(f'm_{escapeFilePath(camera)}.png', camerasIterativeMean[camera].mean) if cameraIndex == numberOfCameras - 1: numberOfTrainingImagesAccuracy = 0 + print(f'{numberOfTestingImages=} {numberOfCameras=}') # Loop over each camera testing image folder. for actualCamera in IMAGES_CAMERAS_FOLDER: for cameraTestingImageIndex in tqdm(range(numberOfTestingImages), 'Camera testing image index'): @@ -101,11 +110,23 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else minimalDistance = None # Loop over each camera to compute closeness between the considered testing image noise and the estimated PRNUs of the various cameras. for camera in IMAGES_CAMERAS_FOLDER: - distance = rmsDiffNumpy(cameraTestingImagesNoise[camera][cameraTestingImageIndex], camerasIterativeMean[camera].mean) + distance = rmsDiffNumpy(cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex], camerasIterativeMean[camera].mean) + ''' + print(f'{camerasIterativeMean[camera].numberOfElementsInMean=}') + print(f'{cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex].min()=}') + print(f'{cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex].max()=}') + print(f'{camerasIterativeMean[camera].mean.min()=}') + print(f'{camerasIterativeMean[camera].mean.max()=}') + plt.imsave(f'a_{actualCamera}.png', cameraTestingImagesNoise[actualCamera][cameraTestingImageIndex]) + plt.imsave(f'b_{camera}.png', camerasIterativeMean[camera].mean) + plt.show() + ''' print(f'{cameraTestingImageIndex=} {camera=} {actualCamera=} {distance=}') + #exit(1) if minimalDistance is None or distance < minimalDistance: minimalDistance = distance cameraPredicted = camera + print(f'Predicted camera {cameraPredicted} {"good" if cameraPredicted == actualCamera else "bad"}') if cameraPredicted == actualCamera: numberOfTrainingImagesAccuracy += 1 accuracy += [numberOfTrainingImagesAccuracy / (numberOfTestingImages * numberOfCameras)]