236 lines
8.8 KiB
Python
236 lines
8.8 KiB
Python
from enum import Enum, auto
|
|
import skimage.restoration
|
|
import numpy as np
|
|
import rawpy
|
|
from tqdm import tqdm
|
|
from PIL import Image, ImageDraw
|
|
from skimage import img_as_float
|
|
from datetime import datetime
|
|
import builtins as __builtin__
|
|
from scipy.ndimage import gaussian_filter
|
|
import os
|
|
from skimage.restoration import estimate_sigma
|
|
from scipy import fftpack
|
|
import imageio
|
|
|
|
class Color(Enum):
|
|
RED = auto()
|
|
GREEN_RIGHT = auto()
|
|
GREEN_BOTTOM = auto()
|
|
BLUE = auto()
|
|
|
|
def __str__(self):
|
|
return self.name.lower()
|
|
|
|
class Denoiser(Enum):
|
|
WAVELET = auto()
|
|
BILATERAL = auto()
|
|
TV_CHAMBOLLE = auto()
|
|
MEAN = auto()
|
|
NL_MEANS = auto()
|
|
LOW_PASS = auto()
|
|
|
|
def __str__(self):
|
|
return self.name.lower()
|
|
|
|
class Distance(Enum):
|
|
ROOT_MEAN_SQUARE = auto()
|
|
PEARSON_CORRELATION = auto()
|
|
|
|
def __str__(self):
|
|
return self.name.lower()
|
|
|
|
def getEndUserName(self):
|
|
return self.name.replace('_', '').title()
|
|
|
|
def getPearsonCorrelationDistance(firstImage, secondImage):
|
|
pearsonCorrelation = scipy.stats.pearsonr(firstImage.flatten(), secondImage.flatten()).statistic
|
|
if pearsonCorrelation < 0:
|
|
print('Negative Pearson correlation, investigate further!')
|
|
exit(1)
|
|
return abs(pearsonCorrelation - 1)
|
|
|
|
# Source: https://stackoverflow.com/a/43346070
|
|
def getGaussianKernel(size, sigma):
|
|
axis = np.linspace(-(size - 1) / 2., (size - 1) / 2., size)
|
|
gauss = np.exp(-0.5 * np.square(axis) / np.square(sigma))
|
|
kernel = np.outer(gauss, gauss)
|
|
return kernel / np.sum(kernel)
|
|
|
|
# `denoiser` can be all values of `Denoiser` except `MEAN`.
|
|
def denoise(imageNpArray, denoiser):
|
|
if denoiser != Denoiser.LOW_PASS:
|
|
skImageRestorationDenoise = getattr(skimage.restoration, f'denoise_{denoiser}')
|
|
|
|
match denoiser:
|
|
case Denoiser.WAVELET:
|
|
imageDenoisedNpArray = skImageRestorationDenoise(imageNpArray, rescale_sigma=True)
|
|
case Denoiser.BILATERAL:
|
|
imageDenoisedNpArray = skImageRestorationDenoise(imageNpArray, sigma_color=0.05, sigma_spatial=15)
|
|
case Denoiser.TV_CHAMBOLLE:
|
|
imageDenoisedNpArray = skImageRestorationDenoise(imageNpArray, weight=0.2)
|
|
case Denoiser.NL_MEANS:
|
|
sigmaEst = np.mean(estimate_sigma(imageNpArray, channel_axis = -1))
|
|
imageDenoisedNpArray = skImageRestorationDenoise(imageNpArray, patch_size = 7, patch_distance = 4, h = 0.8 * sigmaEst, fast_mode = True)
|
|
case Denoiser.LOW_PASS:
|
|
# Based on [the Stack Overflow answer 54662336](https://stackoverflow.com/a/54662336).
|
|
# fft of image
|
|
fft1 = fftpack.fftshift(fftpack.fft2(imageNpArray))
|
|
|
|
# Padding as follows may not be perfectly correct but seems fine enough.
|
|
GAUSSIAN_SIGMA = 50
|
|
|
|
imageNpArrayShape = imageNpArray.shape
|
|
lowPassNpArraySize = min(imageNpArrayShape)
|
|
lowPassNpArray = getGaussianKernel(lowPassNpArraySize, GAUSSIAN_SIGMA)
|
|
|
|
wantedHeight, wantedWidth = imageNpArrayShape
|
|
verticalPadSize, horizontalPadSize = [(wantedDimension - lowPassNpArraySize) // 2 for wantedDimension in [wantedHeight, wantedWidth]]
|
|
rectangleLowPassNpArray = np.pad(lowPassNpArray, [(verticalPadSize, verticalPadSize), (horizontalPadSize, horizontalPadSize)])
|
|
|
|
# Unsure that doing such *normalization* is appropriate.
|
|
rectangleLowPassNpArray /= rectangleLowPassNpArray.max()
|
|
|
|
# multiply both the images
|
|
filtered = np.multiply(fft1, rectangleLowPassNpArray)
|
|
|
|
# inverse fft
|
|
ifft2 = np.real(fftpack.ifft2(fftpack.ifftshift(filtered)))
|
|
ifft2 = np.maximum(0, np.minimum(ifft2, 255))
|
|
|
|
imageDenoisedNpArray = ifft2.astype(np.uint8)
|
|
return imageDenoisedNpArray
|
|
|
|
class iterativeMean:
|
|
mean = None
|
|
numberOfElementsInMean = 0
|
|
|
|
def add(self, element):
|
|
if self.mean is None:
|
|
self.mean = element
|
|
else:
|
|
self.mean = ((self.mean * self.numberOfElementsInMean) + element) / (self.numberOfElementsInMean + 1)
|
|
self.numberOfElementsInMean += 1
|
|
|
|
RAW_IMAGE_FILE_EXTENSIONS = [
|
|
'arw',
|
|
'nef',
|
|
]
|
|
|
|
def getRawColorChannel(raw, color):
|
|
colorDesc = raw.color_desc.decode('ascii')
|
|
assert colorDesc == 'RGBG'
|
|
assert np.array_equal(raw.raw_pattern, np.array([[0, 1], [3, 2]], dtype = np.uint8))
|
|
# RG
|
|
# GB
|
|
rawImageVisible = raw.raw_image_visible.copy()
|
|
|
|
redRawImageVisible = rawImageVisible[::2, ::2]
|
|
greenRightRawImageVisible = rawImageVisible[::2, 1::2]
|
|
|
|
greenBottomRawImageVisible = rawImageVisible[1::2, ::2]
|
|
blueRawImageVisible = rawImageVisible[1::2, 1::2]
|
|
|
|
match color:
|
|
case Color.RED:
|
|
imageNpArray = redRawImageVisible
|
|
case Color.GREEN_RIGHT:
|
|
imageNpArray = greenRightRawImageVisible
|
|
case Color.GREEN_BOTTOM:
|
|
imageNpArray = greenBottomRawImageVisible
|
|
case Color.BLUE:
|
|
imageNpArray = blueRawImageVisible
|
|
return imageNpArray
|
|
|
|
def isARawImage(imageFilePath):
|
|
return any([imageFilePath.lower().endswith(f'.{rawImageFileExtension}') for rawImageFileExtension in RAW_IMAGE_FILE_EXTENSIONS])
|
|
|
|
def getColorChannel(imageFilePath, color):
|
|
if isARawImage(imageFilePath):
|
|
numpyFilePath = f'{imageFilePath}.{color}.npy'
|
|
if False and os.path.isfile(numpyFilePath):
|
|
imageNpArray = np.load(numpyFilePath)
|
|
else:
|
|
with rawpy.imread(imageFilePath) as raw:
|
|
imageNpArray = getRawColorChannel(raw, color)
|
|
else:
|
|
imagePil = Image.open(imageFilePath)
|
|
imageNpArray = img_as_float(np.array(imagePil))
|
|
return imageNpArray
|
|
|
|
def escapeFilePath(filePath):
|
|
return filePath.replace('/', '_')
|
|
|
|
|
|
def getNewIndex(index, offset):
|
|
newIndex = (index - offset) * 2 + offset
|
|
return newIndex
|
|
|
|
def silentTqdm(data, desc = None):
|
|
return data
|
|
|
|
def mergeSingleColorChannelImagesAccordingToBayerFilter(singleColorChannelImages):
|
|
colorImage = singleColorChannelImages[list(Color)[0]]
|
|
multipleColorsImage = np.empty([dimension * 2 for dimension in colorImage.shape], dtype = np.float64)
|
|
'''
|
|
Assume Bayer Filter:
|
|
RG
|
|
GB
|
|
'''
|
|
multipleColorsImage[::2,::2] = singleColorChannelImages[Color.RED]
|
|
multipleColorsImage[1::2,::2] = singleColorChannelImages[Color.GREEN_RIGHT]
|
|
multipleColorsImage[::2,1::2] = singleColorChannelImages[Color.GREEN_BOTTOM]
|
|
multipleColorsImage[1::2,1::2] = singleColorChannelImages[Color.BLUE]
|
|
return multipleColorsImage
|
|
|
|
def saveNpArray(fileName, npArray):
|
|
with open(f'{fileName}.npy', 'wb') as f:
|
|
np.save(f, npArray)
|
|
|
|
def rescaleRawImageForDenoiser(imageNpArray, minColor, maxColor):
|
|
imageNpArray = (imageNpArray - minColor) / (maxColor - minColor)
|
|
return imageNpArray
|
|
|
|
def updateExtremes(imageNpArray, minColor, maxColor):
|
|
colorRawImageVisibleMin = imageNpArray.min()
|
|
colorRawImageVisibleMax = imageNpArray.max()
|
|
if minColor is None or colorRawImageVisibleMin < minColor:
|
|
minColor = colorRawImageVisibleMin
|
|
if maxColor is None or colorRawImageVisibleMax > maxColor:
|
|
maxColor = colorRawImageVisibleMax
|
|
return minColor, maxColor
|
|
|
|
def print(*toPrint):
|
|
#__builtin__.print(datetime.now(), *toPrint)
|
|
tqdm.write(f'{datetime.now()} {str(toPrint)}')
|
|
|
|
def getColorMeans(imagesFileNames, colors, singleColorChannelCropResolution = None):
|
|
colorMeans = {}
|
|
for color in colors:
|
|
colorIterativeMean = iterativeMean()
|
|
for imageFileName in tqdm(imagesFileNames, f'Computing mean of {str(color).replace("_", " ")} colored images'):
|
|
imageNpArray, minColor_, maxColor_ = getImageNpArray(imageFileName, False, color, Denoiser.MEAN, None, None)
|
|
if singleColorChannelCropResolution is not None:
|
|
imageNpArray = getImageCrop(imageNpArray, singleColorChannelCropResolution)
|
|
imageNpArray = gaussian_filter(imageNpArray, sigma = 5)
|
|
colorIterativeMean.add(imageNpArray)
|
|
colorMeans[color] = colorIterativeMean.mean
|
|
return colorMeans
|
|
|
|
def getImageNpArray(imageFilePath, computeExtremes, color, denoiser, minColor, maxColor):
|
|
imageNpArray = getColorChannel(imageFilePath, color)
|
|
|
|
if computeExtremes:
|
|
minColor, maxColor = updateExtremes(imageNpArray, minColor, maxColor)
|
|
return None, minColor, maxColor
|
|
|
|
if isARawImage(imageFilePath) and denoiser != Denoiser.MEAN:
|
|
imageNpArray = rescaleRawImageForDenoiser(imageNpArray, minColor, maxColor)
|
|
# Pay attention to range of values expected by the denoiser.
|
|
# Indeed if provide the thousands valued raw image, then the denoiser only returns values between 0 and 1 and making the difference between both look pointless.
|
|
return imageNpArray, minColor, maxColor
|
|
|
|
def getImageCrop(image, cropResolution):
|
|
imageCrop = image[:cropResolution[0], :cropResolution[1]]
|
|
return imageCrop
|