Load training images to memory to ease implementation and make execution
faster
This commit is contained in:
		| @@ -102,6 +102,17 @@ from utils import silentTqdm | ||||
|  | ||||
| returnSingleColorChannelImage = lambda singleColorChannelImage, _minColor, _maxColor: singleColorChannelImage | ||||
|  | ||||
| # Assume to have `{min,max}Color` hardcoded. | ||||
| print('Load training images to memory') | ||||
| rescaleIfNeeded = rescaleRawImageForDenoiser | ||||
| cameraTrainingImages = {} | ||||
| for cameraTrainingImageIndex in tqdm(range(numberOfTrainingImages), 'Camera training image index'): | ||||
|     for cameraIndex, camera in enumerate(tqdm(IMAGES_CAMERAS_FOLDER, 'Camera')): | ||||
|         singleColorChannelImages = getSingleColorChannelImages(camera, cameraTrainingImageIndex) | ||||
|         multipleColorsImage = getMultipleColorsImage(singleColorChannelImages) | ||||
|         cameraTrainingImages[camera] = cameraTrainingImages.get(camera, []) + [multipleColorsImage] | ||||
| print('Training images loaded to memory') | ||||
|  | ||||
| # 2 loops: | ||||
| # - the first one is about computing `{min,max}Color` | ||||
| # - the second one is about estimating better and better the PRNU of each camera, as consider more and more training images and measuring the resulting attribution of cameras | ||||
| @@ -143,11 +154,7 @@ for computeExtremes in tqdm(([True] if minColor is None or maxColor is None else | ||||
|                 cameraIterativeMean.add(imagePrnuEstimateNpArray) | ||||
|             else: | ||||
|                 # Still use `cameraIterativeMean` to simplify the implementation. | ||||
|                 cameraIterativeMean.mean = np.mean([cameraTrainingImage - mergeSingleColorChannelImagesAccordingToBayerFilter({color: cameraColorMeans[camera][color].mean for color in Color}) for cameraTrainingImage in [multipleColorsImage]], axis = 0)#[:cameraTrainingImageIndex + 1]]) | ||||
|                 print(f'{cameraIterativeMean.mean = }') | ||||
|                 print(f'{camerasIterativeMean[camera].mean = }') | ||||
|                 if cameraIterativeMean.mean[0, 0] != 0: | ||||
|                     exit(1) | ||||
|                 cameraIterativeMean.mean = np.mean([cameraTrainingImage - mergeSingleColorChannelImagesAccordingToBayerFilter({color: cameraColorMeans[camera][color].mean for color in Color}) for cameraTrainingImage in cameraTrainingImages[camera][:cameraTrainingImageIndex + 1]], axis = 0) | ||||
|             # If we are considering the last camera and (not `PREDICT_ONLY_ON_WHOLE_TRAINING_SET` or we are considering the last training image), then we proceeded an additional image for all cameras and we can predict the accuracy at this learning step. | ||||
|             if cameraIndex == numberOfCameras - 1 and (not PREDICT_ONLY_ON_WHOLE_TRAINING_SET or cameraTrainingImageIndex == numberOfTrainingImages - 1): | ||||
|                 numberOfTrainingImagesAccuracy = 0 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user