diff --git a/algorithms/context_adaptative_interpolator/context_adaptive_interpolator.py b/algorithms/context_adaptive_interpolator/context_adaptive_interpolator.py similarity index 85% rename from algorithms/context_adaptative_interpolator/context_adaptive_interpolator.py rename to algorithms/context_adaptive_interpolator/context_adaptive_interpolator.py index da680b1..f96f28f 100644 --- a/algorithms/context_adaptative_interpolator/context_adaptive_interpolator.py +++ b/algorithms/context_adaptive_interpolator/context_adaptive_interpolator.py @@ -6,14 +6,15 @@ from wiener_filter import wienerFilter # Assume greyscale PIL image passed. # What about other color channels? See #11. -def contextAdaptiveInterpolator(I, IImage): +def contextAdaptiveInterpolator(I, IImage, showProgress = False): rImage = Image.new('L', (IImage.size[0] - 2, IImage.size[1] - 2)) r = rImage.load() # This threshold is debatable. See #13. THRESHOLD = 20 - print('before for loops') + if showProgress: + print('before for loops') # Equation (10) # Accelerate computation. See #15. for m in range(1, IImage.size[0] - 1): @@ -40,7 +41,8 @@ def contextAdaptiveInterpolator(I, IImage): else: newPixel = I[m, n] - median(A) r[m - 1, n - 1] = round(newPixel) - print('after for loops') + if showProgress: + print('after for loops') # Why need to rotate the image? See #14. #rImage.rotate(-90).show() @@ -49,5 +51,6 @@ def contextAdaptiveInterpolator(I, IImage): # $\sigma_0^2$ is the noise variance. sigma_0 = 9 ** 0.5 - print('before wiener filter') - return wienerFilter(r, rImage, Q, sigma_0) \ No newline at end of file + if showProgress: + print('before wiener filter') + return wienerFilter(r, rImage, Q, sigma_0, showProgress) \ No newline at end of file diff --git a/algorithms/context_adaptative_interpolator/test.py b/algorithms/context_adaptive_interpolator/test.py similarity index 100% rename from algorithms/context_adaptative_interpolator/test.py rename to algorithms/context_adaptive_interpolator/test.py diff --git a/algorithms/context_adaptative_interpolator/wiener_filter.py b/algorithms/context_adaptive_interpolator/wiener_filter.py similarity index 79% rename from algorithms/context_adaptative_interpolator/wiener_filter.py rename to algorithms/context_adaptive_interpolator/wiener_filter.py index 33d3577..3e0343e 100644 --- a/algorithms/context_adaptative_interpolator/wiener_filter.py +++ b/algorithms/context_adaptive_interpolator/wiener_filter.py @@ -1,7 +1,7 @@ from PIL import Image from tqdm import tqdm -def wienerFilter(r, rImage, Q, sigma_0): +def wienerFilter(r, rImage, Q, sigma_0, showProgress): h_wImage = Image.new('L', (rImage.size[0], rImage.size[1])) h_wImagePixels = h_wImage.load() @@ -29,10 +29,13 @@ def wienerFilter(r, rImage, Q, sigma_0): B_q = [(x, z) for x in getPixelIndexesAround(i, numberOfPixelsInEachDirection) for z in getPixelIndexesAround(j, numberOfPixelsInEachDirection)] return max(0, (1 / q ** 2) * sum([h[getPixelWithinImage(x, hImage.size[0]), getPixelWithinImage(z, hImage.size[1])] ** 2 - sigma_0 ** 2 for (x, z) in B_q])) - print('wiener filter start for loops') - for i in tqdm(range(rImage.size[0])): + if showProgress: + print('wiener filter start for loops') + rImageSize0Range = range(rImage.size[0]) + for i in tqdm(rImageSize0Range) if showProgress else rImageSize0Range: for j in range(rImage.size[1]): h_wImagePixels[i, j] = round(h_w(rImage, r, i, j)) - print('wiener filter end for loops') + if showProgress: + print('wiener filter end for loops') return h_wImage.rotate(-90) \ No newline at end of file diff --git a/datasets/fake/generate_dataset.py b/datasets/fake/generate_dataset.py index d06b093..a6d419f 100644 --- a/datasets/fake/generate_dataset.py +++ b/datasets/fake/generate_dataset.py @@ -7,11 +7,14 @@ sys.path.insert(0, '../../algorithms/distance/') from rmsdiff import rmsdiff +sys.path.insert(0, '../../algorithms/context_adaptive_interpolator/') + +from context_adaptive_interpolator import contextAdaptiveInterpolator + IMAGE_SIZE = 64 def randomImage(scale): - # Is `np.clip` necessary? See `toPilImage`. - return np.random.normal(loc = 0, scale = scale, size = (IMAGE_SIZE, IMAGE_SIZE)) + return np.maximum(np.random.normal(loc = 0, scale = scale, size = (IMAGE_SIZE, IMAGE_SIZE)), 0) prnu = randomImage(scale = 1) @@ -19,12 +22,16 @@ images = [randomImage(scale = 10) + prnu for _ in range(10)] allImages = [prnu] + images def toPilImage(npArray): - nonNegativeArray = npArray - np.min(allImages) - nonNegativeArray = np.round(255 * nonNegativeArray / np.max(allImages)) - return Image.fromarray(np.uint8(nonNegativeArray)) + npArray = np.round(255 * npArray / np.max(allImages)) + return Image.fromarray(np.uint8(npArray)) -toPilImage(prnu).show() +prnu = toPilImage(prnu) +#prnu.show() + +images = [toPilImage(image) for image in images] +#images[0].show() for image in images: - print(rmsdiff(image, prnu)) - print(rmsdiff(contextAdaptiveInterpolator(image), prnu)) \ No newline at end of file + initialRmsDiff = rmsdiff(image, prnu) + caiRmsDiff = rmsdiff(contextAdaptiveInterpolator(image.load(), image), prnu) + print(f'{initialRmsDiff=} {caiRmsDiff=}') \ No newline at end of file