[
  {
    "path": "BM3D/BM3D-SAPCA/README-BM3D-SAPCA.txt",
    "content": "--------------------------------------------------------------------\n\n BM3D-SAPCA : BM3D with Shape-Adaptive Principal Component Analysis\n                            v1.00, 2009\n\n--------------------------------------------------------------------\n\nCopyright (c) 2009-2011 Tampere University of Technology. \nAll rights reserved.\nThis work should be used for nonprofit purposes only.\n\nAuthor:                      Alessandro Foi\n\nBM3D web page:               http://www.cs.tut.fi/~foi/GCF-BM3D\n\n\n\nBM3D-SAPCA is an algorithm for attenuation of additive white\nGaussian noise (AWGN) from grayscale images.\n\n\nThis software package reproduces the results from the article:\n\n K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian,\n \"BM3D Image Denoising with Shape-Adaptive Principal\n Component Analysis\", Proc. Workshop on Signal Processing\n with Adaptive Sparse Structured Representations (SPARS'09),\n Saint-Malo, France, April 2009.\n \n ( PDF available at  http://www.cs.tut.fi/~foi/GCF-BM3D )\n\n \n--------------------------------------------------------------------\n \nThis demo package includes routines from both the\nLASIP 2D demobox (http://www.cs.tut.fi/~lasip/2D/)  and the\nPointwise SA-DCT demobox (http://www.cs.tut.fi/~foi/SA-DCT/).\n\n--------------------------------------------------------------------\n\n\n\n--------------------------------------------------------------------\n Disclaimer\n--------------------------------------------------------------------\n\nAny unauthorized use of these routines for industrial or profit-\noriented activities is expressively prohibited. By downloading \nand/or using any of these files, you implicitly agree to all the \nterms of the TUT limited license, as specified in the document\nLegal_Notice.txt (included in this package) and online at\nhttp://www.cs.tut.fi/~foi/GCF-BM3D/legal_notice.html\n\n"
  },
  {
    "path": "BM3D/BM3D-SAPCA/demo_BM3DSAPCA.m",
    "content": "% BM3D-SAPCA : BM3D with Shape-Adaptive Principal Component Analysis  (v1.00, 2009)\n% (demo script)\n%\n% BM3D-SAPCA is an algorithm for attenuation of additive white Gaussian noise (AWGN)\n% from grayscale images. This algorithm reproduces the results from the article:\n%  K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"BM3D Image Denoising with\n%  Shape-Adaptive Principal Component Analysis\", Proc. Workshop on Signal Processing\n%  with Adaptive Sparse Structured Representations (SPARS'09), Saint-Malo, France,\n%  April 2009.     (PDF available at  http://www.cs.tut.fi/~foi/GCF-BM3D )\n%\n%\n% SYNTAX:\n%\n%     y_est = BM3DSAPCA2009(z, sigma)\n%\n% where  z  is an image corrupted by AWGN with noise standard deviation  sigma\n% and  y_est  is an estimate of the noise-free image.\n% Signals are assumed on the intensity range [0,1].\n%\n%\n% USAGE EXAMPLE:\n%\n%     y = im2double(imread('Cameraman256.png'));\n%     sigma=25/255;\n%     z=y+sigma*randn(size(y));\n%     y_est = BM3DSAPCA2009(z,sigma);\n%\n%\n%\n% Copyright (c) 2009-2011 Tampere University of Technology.   All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% author:  Alessandro Foi,   email:  firstname.lastname@tut.fi\n%\n%%\n\nclear all\n\ny = im2double(imread('Cameraman256.png'));\n% y = im2double(imread('Lena512.png'));\nrandn('seed',0);\n\nsigma=25/255;\nz=y+sigma*randn(size(y));\n\ny_est = BM3DSAPCA2009(z,sigma);\n\nPSNR = 10*log10(1/mean((y(:)-y_est(:)).^2));\ndisp(['PSNR = ',num2str(PSNR)])\nif exist('ssim_index')\n    [mssim ssim_map] = ssim_index(y*255, y_est*255);\n    disp(['SSIM = ',num2str(mssim)])\nend\n\n"
  },
  {
    "path": "BM3D/BM3D-SAPCA/function_CreateLPAKernels.m",
    "content": "% Creates LPA kernels cell array    (function_CreateLPAKernels)\n%\n% Alessandro Foi - Tampere University of Technology - 2003-2005\n% ---------------------------------------------------------------\n%\n%  Builds kernels cell arrays kernels{direction,size}\n%                  and        kernels_higher_order{direction,size,1:2}\n%               kernels_higher_order{direction,size,1}  is the 3D matrix\n%                   of all kernels for that particular direction/size\n%               kernels_higher_order{direction,size,2}  is the 2D matrix\n%                   containing the orders indices for the kernels\n%                   contained in kernels_higher_order{direction,size,1}\n%\n%   ---------------------------------------------------------------------\n% \n%   kernels_higher_order{direction,size,1}(:,:,1) is the funcion estimate kernel\n%   kernels_higher_order{direction,size,1}(:,:,2) is a first derivative estimate kernel\n%\n%   kernels_higher_order{direction,size,1}(:,:,n) is a higher order derivative estimate kernel\n%   whose orders with respect to x and y are specified in\n%   kernels_higher_order{direction,size,2}(n,:)=\n%                           =[xorder yorder xorder+yorder]\n%   \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfunction [kernels, kernels_higher_order]=function_createLPAkernels(m,h1,h2,TYPE,window_type,directional_resolution,sig_winds,beta)\n\n%--------------------------------------------------------------------------\n% LPA ORDER AND KERNELS SIZES\n%--------------------------------------------------------------------------\n%    m=[2,0];        % THE VECTOR ORDER OF LPA;\n\n%    h1=[1 2 3 4 5];    %   sizes of the kernel\n%    h2=[1 2 3 4 5];    %   row vectors h1 and h2 need to have the same lenght\n\n\n%--------------------------------------------------------------------------\n% WINDOWS PARAMETERS\n%--------------------------------------------------------------------------\n%    sig_winds=[h1*1 ; h1*1];    % Gaussian parameter\n%    beta=1;                     % Parameter of window 6\n\n%    window_type=1 ;  % window_type=1 for uniform, window_type=2 for Gaussian\n% window_type=6 for exponentions with beta\n% window_type=8 for Interpolation\n\n%    TYPE=00;        % TYPE IS A SYMMETRY OF THE WINDOW\n                     % 00 SYMMETRIC\n                     % 10 NONSYMMETRIC ON X1 and SYMMETRIC ON X2\n                     % 11 NONSYMMETRIC ON X1,X2  (Quadrants)\n                     % \n                     % for rotated directional kernels the method that is used for rotation can be specified by adding \n                     % a binary digit in front of these types, as follows:\n                     % \n                     % 10 \n                     % 11  ARE \"STANDARD\" USING NN (Nearest Neighb.) (you can think of these numbers with a 0 in front)\n                     % 00\n                     % \n                     % 110\n                     % 111  ARE EXACT SAMPLING OF THE EXACT ROTATED KERNEL\n                     % 100\n                     % \n                     % 210\n                     % 211  ARE WITH BILINEAR INTERP\n                     % 200\n                     % \n                     % 310\n                     % 311  ARE WITH BICUBIC INTERP (not reccomended)\n                     % 300\n\n%--------------------------------------------------------------------------\n% DIRECTIONAL PARAMETERS\n%--------------------------------------------------------------------------\n%    directional_resolution=4;       % number of directions\n\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%% From this point onwards this file and the create_LPA_kernels.m should be identical %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\n\n\n\n\n\n\nlenh=max(length(h1),length(h2));\nclear kernels\nclear kernels_higher_order\nkernels=cell(directional_resolution,lenh);\nkernels_higher_order=cell(directional_resolution,lenh,2);\nTHETASTEP=2*pi/directional_resolution;\nTHETA=[0:THETASTEP:2*pi-THETASTEP];\n\n\ns1=0;\nfor theta=THETA,\n    s1=s1+1;\n    for s=1:lenh,\n        \n        \n        [gh,gh1,gh1degrees]=function_LPAKernelMatrixTheta(ceil(h2(s)),ceil(h1(s)),window_type,[sig_winds(1,s) sig_winds(2,s)],TYPE,theta, m);\n        kernels{s1,s}=gh;                          % degree=0 kernel\n        kernels_higher_order{s1,s,1}=gh1;          % degree>=0 kernels\n        kernels_higher_order{s1,s,2}=gh1degrees;   % polynomial indexes matrix\n       \nend   % different lengths loop\nend   % different directions loop\n\n"
  },
  {
    "path": "BM3D/BM3D-SAPCA/function_LPAKernelMatrixTheta.m",
    "content": "% Return the discrete kernels for LPA estimation and their degrees matrix\n%\n% function [G, G1, index_polynomials]=function_LPAKernelMatrixTheta(h2,h1,window_type,sig_wind,TYPE,theta, m)\n%\n%\n% Outputs:\n%\n% G  kernel for function estimation\n% G1 kernels for function and derivative estimation\n%       G1(:,:,j), j=1 for function estimation, j=2 for d/dx, j=3 for d/dy,\n%                contains 0 and all higher order kernels (sorted by degree:\n%                 1 x y y^2 x^3 x^2y xy^2 y^3 etc...)\n% index_polynomials  matrix of degrees first column x powers, second\n%                    column y powers, third column total degree\n%\n%\n% Inputs:\n%\n% h2, h1  size of the kernel (size of the \"asymmetrical portion\")\n% m=[m(1) m(2)] the vector order of the LPA    any order combination should work \n% \"theta\" is an angle of the directrd window\n% \"TYPE\" is a type of the window support\n% \"sig_wind\" - vector - sigma parameters of the Gaussian wind\n% \"beta\"- parameter of the power in some weights for the window function\n%           (these last 3 parameters are fed into function_Window2D function)\n%\n%\n% Alessandro Foi, 6 march 2004\n\nfunction [G, G1, index_polynomials]=function_LPAKernelMatrixTheta(h2,h1,window_type,sig_wind,TYPE,theta, m)\nglobal beta\n\n%G1=0;\nm(1)=min(h1,m(1));\nm(2)=min(h2,m(2));\n\n% builds ordered matrix of the monomes powers\nnumber_of_polynomials=(min(m)+1)*(max(m)-min(m)+1)+(min(m)+1)*min(m)/2;   %   =size(index_polynomials,1) \nindex_polynomials=zeros(number_of_polynomials,2);\nindex3=1;\nfor index1=1:min(m)+1\n    for index2=1:max(m)+2-index1\n        index_polynomials(index3,:)=[index1-1,index2-1];\n        index3=index3+1;\n    end\nend\nif m(1)>m(2)\n    index_polynomials=fliplr(index_polynomials);\nend\nindex_polynomials(:,3)=index_polynomials(:,1)+index_polynomials(:,2);    %calculates degrees of polynomials\nindex_polynomials=sortrows(sortrows(index_polynomials,2),3);             %sorts polynomials by degree (x first)\n\n%=====================================================================================================================================\n\nhalfH=max(h1,h2);\nH=-halfH+1:halfH-1;\n\n\n% creates window function and then rotates it\n%   win_fun=zeros(halfH-1,halfH-1);\nfor x1=H\n    for x2=H\n        if TYPE==00|TYPE==200|TYPE==300 % SYMMETRIC WINDOW\n            win_fun1(x2+halfH,x1+halfH)=function_Window2D(x1/h1/(1-1000*eps),x2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight        \n        end\n        if TYPE==11|TYPE==211|TYPE==311 % NONSYMMETRIC ON X1,X2 WINDOW\n            win_fun1(x2+halfH,x1+halfH)=(x1>=-0.05)*(x2>=-0.05)*function_Window2D(x1/h1/(1-1000*eps),x2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight\n        end\n        if TYPE==10|TYPE==210|TYPE==310 % NONSYMMETRIC ON X1 WINDOW\n            win_fun1(x2+halfH,x1+halfH)=(x1>=-0.05)*function_Window2D(x1/h1/(1-1000*eps),x2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight\n        end\n        \n        if TYPE==100|TYPE==110|TYPE==111 % exact sampling\n            xt1=x1*cos(-theta)+x2*sin(-theta);\n            xt2=x2*cos(-theta)-x1*sin(-theta);\n            if TYPE==100 % SYMMETRIC WINDOW\n                win_fun1(x2+halfH,x1+halfH)=function_Window2D(xt1/h1/(1-1000*eps),xt2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight        \n            end\n            if TYPE==111 % NONSYMMETRIC ON X1,X2 WINDOW\n                \n                win_fun1(x2+halfH,x1+halfH)=(xt1>=-0.05)*(xt2>=-0.05)*function_Window2D(xt1/h1/(1-1000*eps),xt2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight\n            end\n            if TYPE==110 % NONSYMMETRIC ON X1 WINDOW\n                \n                win_fun1(x2+halfH,x1+halfH)=(xt1>=-0.05)*function_Window2D(xt1/h1/(1-1000*eps),xt2/h2/(1-1000*eps),window_type,sig_wind,beta,h2/h1); % weight\n            end\n        end\n        \n    end\nend\nwin_fun=win_fun1;\nif (theta~=0)&(TYPE<100)\n    win_fun=imrotate(win_fun1,theta*180/pi,'nearest');     % use 'nearest' or 'bilinear' for different interpolation schemes ('bicubic'...?)\nend\nif (theta~=0)&(TYPE>=200)&(TYPE<300)\n    win_fun=imrotate(win_fun1,theta*180/pi,'bilinear');     % use 'nearest' or 'bilinear' for different interpolation schemes ('bicubic'...?)\nend\nif (theta~=0)&(TYPE>=300)\n    win_fun=imrotate(win_fun1,theta*180/pi,'bicubic');     % use 'nearest' or 'bilinear' for different interpolation schemes ('bicubic'...?)\nend\n\n\n\n% make the weight support a square\nwin_fun2=zeros(max(size(win_fun)));\nwin_fun2((max(size(win_fun))-size(win_fun,1))/2+1:max(size(win_fun))-((max(size(win_fun))-size(win_fun,1))/2),(max(size(win_fun))-size(win_fun,2))/2+1:max(size(win_fun))-((max(size(win_fun))-size(win_fun,2))/2))=win_fun;\nwin_fun=win_fun2;\n\n\n%=====================================================================================================================================\n\n\n%%%%  rotated coordinates\nH=-(size(win_fun,1)-1)/2:(size(win_fun,1)-1)/2;\nhalfH=(size(win_fun,1)+1)/2;\nh_radious=halfH;\nHcos=H*cos(theta); Hsin=H*sin(theta);\n\n\n%%%% Calculation of FI matrix\nFI=zeros(number_of_polynomials);\ni1=0;\nfor s1=H\n    i1=i1+1;\n    i2=0;\n    for s2=H\n        i2=i2+1;\n        x1=Hcos(s1+h_radious)-Hsin(s2+h_radious);\n        x2=Hsin(s1+h_radious)+Hcos(s2+h_radious);\n        phi=sqrt(win_fun(s2+halfH,s1+halfH))*(prod(((ones(number_of_polynomials,1)*[x1 x2]).^index_polynomials(:,1:2)),2)./prod(gamma(index_polynomials(:,1:2)+1),2).*(-ones(number_of_polynomials,1)).^index_polynomials(:,3));\n        FI=FI+phi*phi';\n    end % end of s2\nend % end of s1\n\n%FI_inv=((FI+1*eps*eye(size(FI)))^(-1));  % invert FI matrix\nFI_inv=pinv(FI);   % invert FI matrix (using pseudoinverse)\nG1=zeros([size(H,2) size(H,2) number_of_polynomials]);\n\n%%%% Calculation of mask\ni1=0;\nfor s1=H\n    i1=i1+1;\n    i2=0;\n    for s2=H\n        i2=i2+1;\n        x1=Hcos(s1+h_radious)-Hsin(s2+h_radious);\n        x2=Hsin(s1+h_radious)+Hcos(s2+h_radious);\n        phi=FI_inv*win_fun(s2+halfH,s1+halfH)*(prod(((ones(number_of_polynomials,1)*[x1 x2]).^index_polynomials(:,1:2)),2)./prod(gamma(index_polynomials(:,1:2)+1),2).*(-ones(number_of_polynomials,1)).^index_polynomials(:,3));\n        G(i2,i1,1)=phi(1);              %   Function Est\n        G1(i2,i1,:)=phi(:)';  % Function est & Der est on X Y etc...\n    end % end of s1\nend % end of s2\n%keyboard"
  },
  {
    "path": "BM3D/BM3D-SAPCA/function_Window2D.m",
    "content": "% Returns a scalar/matrix weights (window function) for the LPA estimates\n% function w=function_Window2D(X,Y,window,sig_wind, beta);\n% X,Y scalar/matrix variables\n% window - type of the window weight\n% sig_wind - std scaling for the Gaussian ro-weight\n% beta -parameter of the degree in the weights\n%----------------------------------------------------------------------------------\n% V. Katkovnik & A. Foi - Tampere University of Technology -  2002-2005\n\n\nfunction w=function_Window2D(X,Y,window,sig_wind, beta,ratio);\n\nif nargin == 5\n    ratio=1;\nend\n\nIND=(abs(X)<=1)&(abs(Y)<=1);\nIND2=((X.^2+Y.^2)<=1);\nIND3=((X.^2+(Y*ratio).^2)<=1);\n   \n\nif window==1           % rectangular symmetric window\nw=IND; end\n\nif window==2   %Gaussian\n  \nX=X/sig_wind(1);\nY=Y/sig_wind(2);\nw = IND.*exp(-(X.^2 + Y.^2)/2); %*(abs(Y)<=0.1*abs(X));%.*IND2; %((X.^2+Y.^2)<=1); \nend\n\nif window==3  % Quadratic window\n    w=(1-(X.^2+Y.^2)).*((X.^2+Y.^2)<=1); end\n\nif window==4           % triangular symmetric window\n w=(1-abs(X)).*(1-abs(Y)).*((X.^2+Y.^2)<=1); end\n  \n    \nif window==5           % Epanechnikov symmetric window\n  w=(1-X.^2).*(1-Y.^2).*((X.^2+Y.^2)<=1); \nend\n\nif window==6   % Generalized Gaussian\n  \nX=X/sig_wind;\nY=Y/sig_wind;\nw = exp(-((X.^2 + Y.^2).^beta)/2).*((X.^2+Y.^2)<=1); end\n\n\nif window==7\n      \nX=X/sig_wind;\nY=Y/sig_wind;\nw = exp(-abs(X) - abs(Y)).*IND; end\n\nif window==8 % Interpolation\n    \nw=(1./(abs(X).^4+abs(Y).^4+0.0001)).*IND2; \nend\n\nif window==9 % Interpolation\n    \n    NORM=(abs(X)).^2+(abs(Y)).^2+0.0001;\nw=(1./NORM.*(1-sqrt(NORM)).^2).*(NORM<=1); \nend\n\nif window==10\n    w=((X.^2+Y.^2)<=1);\nend\n\n\nif window==11\n  \ntemp=asin(Y./sqrt(X.^2+Y.^2+eps));\ntemp=temp*0.6; % Width of Beam\ntemp=(temp>0)*min(temp,1)+(temp<=0)*max(temp,-1);\n\nw=max(0,IND.*cos(pi*temp));   \n    \n  \nend\n\n    \n\nif window==111\n  \ntemp=asin(Y./sqrt(X.^2+Y.^2+eps));\ntemp=temp*0.8; % Width of Beam\ntemp=(temp>0)*min(temp,1)+(temp<=0)*max(temp,-1);\n\nw=max(0,IND3.*(cos(pi*temp)>0));   \n% w=((X.^2+Y.^2)<=1);    \nend\n\nif window==112\n  \ntemp=atan(Y/(X+eps));\n%temp=temp*0.8; % Width of Beam\n%temp=(temp>0)*min(temp,1)+(temp<=0)*max(temp,-1);\nw=max(0,IND3.*((abs(temp))<=pi/4));   \n% w=((X.^2+Y.^2)<=1);    \n\nend\n\n    \n    \n    \n    \n        \n    \n    \n    \n\n\n "
  },
  {
    "path": "BM3D/BM3D.m",
    "content": "function [PSNR, SSIM, y_est] = BM3D(y, z, sigma, profile, print_to_screen)\n\nimage_name = [\n%     'montage.png'\n     'Cameraman256.png'\n%     'boat.png'\n%     'Lena512.png'\n%     'house.png'\n%     'barbara.png'\n%     'peppers256.png'\n%     'fingerprint.png'\n%     'couple.png'\n%     'hill.png'\n%     'man.png'\n    ];\n\nif (exist('profile') ~= 1)\n    profile         = 'np'; %% default profile\nend\n\nif (exist('sigma') ~= 1)\n    sigma               = 10; %% default standard deviation of the AWGN\nend\n\n%%%% Following are the parameters for the Normal Profile.\n\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'bior1.5'; %% transform used for the HT filt. of size N1 x N1\ntransform_2D_Wiener_name = 'dct';     %% transform used for the Wiener filt. of size N1_wiener x N1_wiener\ntransform_3rd_dim_name   = 'haar';    %% transform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Hard-thresholding (HT) parameters:\nN1                  = 8;   %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 3;   %% sliding step to process every next reference block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimension of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM), must be odd\ntau_match           = 3000;%% threshold for the block-distance (d-distance)\nlambda_thr2D        = 0;   %% threshold parameter for the coarse initial denoising used in the d-distance measure\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D transform domain\nbeta                = 2.0; %% parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Wiener filtering parameters:\nN1_wiener           = 8;\nNstep_wiener        = 3;\nN2_wiener           = 32;\nNs_wiener           = 39;\ntau_match_wiener    = 400;\nbeta_wiener         = 2.0;\n\n%%%% Block-matching parameters:\nstepFS              = 1;  %% step that forces to switch to full-search BM, \"1\" implies always full-search\nsmallLN             = 'not used in np'; %% if stepFS > 1, then this specifies the size of the small local search neighb.\nstepFSW             = 1;\nsmallLNW            = 'not used in np';\nthrToIncStep        = 8;  % if the number of non-zero coefficients after HT is less than thrToIncStep,\n                          % than the sliding step to the next reference block is incresed to (nm1-1)\n\nif strcmp(profile, 'lc') == 1\n\n    Nstep               = 6;\n    Ns                  = 25;\n    Nstep_wiener        = 5;\n    N2_wiener           = 16;\n    Ns_wiener           = 25;\n\n    thrToIncStep        = 3;\n    smallLN             = 3;\n    stepFS              = 6*Nstep;\n    smallLNW            = 2;\n    stepFSW             = 5*Nstep_wiener;\n\nend\n\nif (strcmp(profile, 'vn') == 1) || (sigma > 40)\n\n    N2                  = 32;\n    Nstep               = 4;\n \n    N1_wiener           = 11;\n    Nstep_wiener        = 6;\n\n    lambda_thr3D        = 2.8;\n    thrToIncStep        = 3;\n    tau_match_wiener    = 3500;\n    tau_match           = 25000;\n    \n    Ns_wiener           = 39;\n    \nend\n\n% The 'vn_old' profile corresponds to the original parameters for strong noise proposed in [1].\nif (strcmp(profile, 'vn_old') == 1) && (sigma > 40)\n\n    transform_2D_HT_name = 'dct'; \n    \n    N1                  = 12;\n    Nstep               = 4;\n \n    N1_wiener           = 11;\n    Nstep_wiener        = 6;\n\n    lambda_thr3D        = 2.8;\n    lambda_thr2D        = 2.0;\n    thrToIncStep        = 3;\n    tau_match_wiener    = 3500;\n    tau_match           = 5000;\n    \n    Ns_wiener           = 39;\n    \nend\n\ndecLevel = 0;        %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\nthr_mask = ones(N1); %% N1xN1 mask of threshold scaling coeff. --- by default there is no scaling, however the use of different thresholds for different wavelet decompoistion subbands can be done with this matrix\n\nif strcmp(profile, 'high') == 1 %% this profile is not documented in [1]\n    \n    decLevel     = 1; \n    Nstep        = 2;\n    Nstep_wiener = 2;\n    lambda_thr3D = 2.5;\n    vMask = ones(N1,1); vMask((end/4+1):end/2)= 1.01; vMask((end/2+1):end) = 1.07; %% this allows to have different threhsolds for the finest and next-to-the-finest subbands\n    thr_mask = vMask * vMask'; \n    beta         = 2.5;\n    beta_wiener  = 1.5;\n    \nend\n\n%%% Check whether to dump information to the screen or remain silent\ndump_output_information = 1;\nif (exist('print_to_screen') == 1) && (print_to_screen == 0)\n    dump_output_information = 0;\nend\n\n%%%% Create transform matrices, etc.\n%%%%\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, decLevel);     %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name, 0); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1) || (strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1)\n    %%% If Haar is used in the 3-rd dimension, then a fast internal transform is used, thus no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% matrix-vector multiplication for the 1D case.\n    for hpow = 0:ceil(log2(max(N2,N2_wiener)))\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(h, transform_3rd_dim_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows used in the aggregation of block-wise estimates\n%%%%\nif beta_wiener==2 && beta==2 && N1_wiener==8 && N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924];\n    Wwin2D_wiener = Wwin2D;\nelse\n    Wwin2D           = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\n    Wwin2D_wiener    = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the aggregation of the Wiener filt. part\nend\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% If needed, read images, generate noise, or scale the images to the \n%%%% [0,1] interval\n%%%%\nif (exist('y') ~= 1) || (exist('z') ~= 1)\n    y        = im2double(imread(image_name));  %% read a noise-free image and put in intensity range [0,1]\n    randn('seed', 0);                          %% generate seed\n    z        = y + (sigma/255)*randn(size(y)); %% create a noisy image\nelse  % external images\n    \n    image_name = 'External image';\n    \n    % convert z to double precision if needed\n    z = double(z);\n    \n    % convert y to double precision if needed\n    y = double(y);\n    \n    % if z's range is [0, 255], then convert to [0, 1]\n    if (max(z(:)) > 10) % a naive check for intensity range\n        z = z / 255;\n    end\n    \n    % if y's range is [0, 255], then convert to [0, 1]\n    if (max(y(:)) > 10) % a naive check for intensity range\n        y = y / 255;\n    end\nend\n\n\n\nif (size(z,3) ~= 1) || (size(y,3) ~= 1)\n    error('BM3D accepts only grayscale 2D images.');\nend\n\n\n% Check if the true image y is a valid one; if not, then we cannot compute PSNR, etc.\ny_is_invalid_image = (length(size(z)) ~= length(size(y))) | (size(z,1) ~= size(y,1)) | (size(z,2) ~= size(y,2));\nif (y_is_invalid_image)\n    dump_output_information = 0;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%% Print image information to the screen\n%%%%\nif dump_output_information == 1\n    fprintf('Image: %s (%dx%d), sigma: %.1f\\n', image_name, size(z,1), size(z,2), sigma);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 1. Produce the basic estimate by HT filtering\n%%%%\ntic;\ny_hat = bm3d_thr(z, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n\tlambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, (sigma/255), thrToIncStep, single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(thr_mask), Wwin2D, smallLN, stepFS );\nestimate_elapsed_time = toc;\n\nif dump_output_information == 1\n    PSNR_INITIAL_ESTIMATE = 10*log10(1/mean((y(:)-double(y_hat(:))).^2));\n    fprintf('BASIC ESTIMATE, PSNR: %.2f dB\\n', PSNR_INITIAL_ESTIMATE);\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 2. Produce the final estimate by Wiener filtering (using the \n%%%%  hard-thresholding initial estimate)\n%%%%\ntic;\ny_est = bm3d_wiener(z, y_hat, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    'unused arg', tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, (sigma/255), 'unused arg', single(TforW), single(TinvW)', inverse_hadper_trans_single_den, Wwin2D_wiener, smallLNW, stepFSW, single(ones(N1_wiener)) );\nwiener_elapsed_time = toc;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Calculate the final estimate's PSNR, print it, and show the\n%%%% denoised image next to the noisy one\n%%%%\ny_est = double(y_est);\n\nPSNR = 0; %% Remains 0 if the true image y is not available\nSSIM = 0;\nif (~y_is_invalid_image) % checks if y is a valid image\n    PSNR = 10*log10(1/mean((y(:)-y_est(:)).^2)); % y is valid\n    SSIM = ssim(y, y_est);\nend\n\nif dump_output_information == 1\n    fprintf('FINAL ESTIMATE (total time: %.1f sec), PSNR: %.2f dB\\n', ...\n        wiener_elapsed_time + estimate_elapsed_time, PSNR);\n\n    figure, imshow(z); title(sprintf('Noisy %s, PSNR: %.3f dB (sigma: %d)', ...\n        image_name(1:end-4), 10*log10(1/mean((y(:)-z(:)).^2)), sigma));\n\n    figure, imshow(y_est); title(sprintf('Denoised %s, PSNR: %.3f dB', ...\n        image_name(1:end-4), PSNR));\n    \nend\n\nreturn;\n\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1\n    dec_levels = 0;\nend\n\nif N == 1\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1\n    Tforward    = hadamard(N);\nelseif (N == 8) && strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) && strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) && strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0)\n        Q = -Q; \n    end\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;\n\n"
  },
  {
    "path": "BM3D/BM3DDEB.m",
    "content": "function [ISNR, y_hat_RWI] = BM3DDEB(experiment_number, test_image_name)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright (c) 2008-2014 Tampere University of Technology. All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Kostadin Dabov\n%     Alessandro Foi         email: alessandro.foi _at_ tut.fi\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n%  This function implements the image deblurring method proposed in:\n%\n%  [1] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Image\n%   restoration  by sparse 3D transform-domain collaborative filtering,\"\n%   Proc SPIE Electronic Imaging, January 2008.\n%\n%  FUNCTION INTERFACE:\n%\n%  [PSNR, y_hat_RWI] = BM3DDEB(experiment_number, test_image_name)\n%\n%  INPUT:\n%   1) experiment_number: 1 -> PSF 1, sigma^2 = 2\n%                         2 -> PSF 1, sigma^2 = 8\n%                         3 -> PSF 2, sigma^2 = 0.308\n%                         4 -> PSF 3, sigma^2 = 49\n%                         5 -> PSF 4, sigma^2 = 4\n%                         6 -> PSF 5, sigma^2 = 64\n%\n%   2) test_image_name:   a valid filename of a grayscale test image\n%\n%  OUTPUT:\n%   1) ISNR:              the output improvement in SNR, dB\n%   2) y_hat_RWI:         the restored image\n%\n%  ! The function can work without any of the input arguments,\n%   in which case, the internal default ones are used !\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%% Fixed regularization parameters (obtained empirically after a rough optimization)\nRegularization_alpha_RI = 4e-4;\nRegularization_alpha_RWI = 5e-3;\n\n%%%% Experiment number (see below for details, e.g. how the blur is generated, etc.)\nif (exist('experiment_number') ~= 1)\n    experiment_number = 3; % 1 -- 6\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Select a single image filename (might contain path)\n%%%%\nif (exist('test_image_name') ~= 1)\n    test_image_name = [\n        %        'Lena512.png'\n        'Cameraman256.png'\n        %        'barbara.png'\n        %        'house.png'\n        ];\nend\n\n%%%% Select 2D transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name      = 'dst'; %% 2D transform (of size N1 x N1) used in Step 1\ntransform_2D_Wiener_name  = 'dct'; %% 2D transform (of size N1_wiener x N1_wiener) used in Step 2\ntransform_3rd_dimage_name = 'haar'; %% 1D tranform used in the 3-rd dim, the same for both steps\n\n%%%% Step 1 (BM3D with collaborative hard-thresholding) parameters:\nN1                  = 8;   %% N1 x N1 is the block size\nNstep               = 3;   %% sliding step to process every next refernece block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimensiona of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM)\ntau_match           = 6000;%% threshold for the block distance (d-distance)\nlambda_thr2D        = 0;   %% threshold for the coarse initial denoising used in the d-distance measure\nlambda_thr3D        = 2.9; %% threshold for the hard-thresholding\nbeta                = 0; %% the beta parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Step 2 (BM3D with collaborative Wiener filtering) parameters:\nN1_wiener           = 8;\nNstep_wiener        = 2;\nN2_wiener           = 16;\nNs_wiener           = 39;\ntau_match_wiener    = 800;\nbeta_wiener         = 0;\n\n%%%%  Specify whether to print results and display images\nprint_to_screen     = 1;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Make parameters compatible with the interface of the mex-functions\n%%%%\n\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, 0); %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name, 0); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dimage_name, 'haar') == 1),\n    %%% Fast internal transform is used, no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% vector-matrix multiplications\n    for hpow = 0:ceil(log2(max(N2,N2_wiener))),\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd] = getTransfMatrix(h, transform_3rd_dimage_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\nif beta == 0 & beta_wiener == 0\n    Wwin2D = ones(N1,N1);\n    Wwin2D_wiener = ones(N1_wiener,N1_wiener);\nelse\n    Wwin2D        = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the hard-thresholding part\n    Wwin2D_wiener = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the Wiener filtering part\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Read an image and generate a blurred and noisy image\n%%%%\ny = im2double(imread(test_image_name));\n\nif experiment_number==1\n    sigma=sqrt(2)/255;\n    for x1=-7:7; for x2=-7:7; v(x1+8,x2+8)=1/(x1^2+x2^2+1); end, end; v=v./sum(v(:));\nend\nif experiment_number==2\n    sigma=sqrt(8)/255;\n    s1=0; for a1=-7:7; s1=s1+1; s2=0; for a2=-7:7; s2=s2+1; v(s1,s2)=1/(a1^2+a2^2+1); end, end;  v=v./sum(v(:));\nend\nif experiment_number==3\n    BSNR=40; sigma=-1; % if \"sigma=-1\", then the value of sigma depends on the BSNR\n    v=ones(9); v=v./sum(v(:));\nend\nif experiment_number==4\n    sigma=7/255;\n    v=[1 4 6 4 1]'*[1 4 6 4 1]; v=v./sum(v(:));  % PSF\nend\nif experiment_number==5\n    sigma=2/255;\n    v=fspecial('gaussian', 25, 1.6);\nend\nif experiment_number==6\n    sigma=8/255;\n    v=fspecial('gaussian', 25, .4);\nend\n\n\n[Xv, Xh]  = size(y);\n[ghy,ghx] = size(v);\nbig_v  = zeros(Xv,Xh); big_v(1:ghy,1:ghx)=v; big_v=circshift(big_v,-round([(ghy-1)/2 (ghx-1)/2])); % pad PSF with zeros to whole image domain, and center it\nV      = fft2(big_v); % frequency response of the PSF\ny_blur = imfilter(y, v(end:-1:1,end:-1:1), 'circular'); % performs blurring (by circular convolution)\n\nrandn('seed',0);  %%% fix seed for the random number generator\nif sigma == -1;   %% check whether to use BSNR in order to define value of sigma\n    sigma=sqrt(norm(y_blur(:)-mean(y_blur(:)),2)^2 /(Xh*Xv*10^(BSNR/10))); % compute sigma from the desired BSNR\nend\n\n%%%% Create a blurred and noisy observation\nz = y_blur + sigma*randn(Xv,Xh);\n\ntic;\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 1: Final estimate by Regularized Inversion (RI) followed by\n%%%% BM3D with collaborative hard-thresholding\n%%%%\n\n%%%% Step 1.1. Regularized Inversion\nRI= conj(V)./( (abs(V).^2) + Regularization_alpha_RI * Xv*Xh*sigma^2); % Transfer Matrix for RI    %% Standard Tikhonov Regularization\nzRI=real(ifft2( fft2(z).* RI ));   % Regularized Inverse Estimate (RI OBSERVATION)\n\nstdRI = zeros(N1, N1);\nfor ii = 1:N1,\n    for jj = 1:N1,\n        UnitMatrix = zeros(N1,N1); UnitMatrix(ii,jj)=1;\n        BasisElementPadded = zeros(Xv, Xh); BasisElementPadded(1:N1,1:N1) = Tinv*UnitMatrix*Tinv';\n        TransfBasisElementPadded = fft2(BasisElementPadded);\n        stdRI(ii,jj) = sqrt( (1/(Xv*Xh)) * sum(sum(abs(TransfBasisElementPadded.*RI).^2)) )*sigma;\n    end,\nend\n\n%%%% Step 1.2. Colored noise suppression by BM3D with collaborative hard-\n%%%% thresholding\n\ny_hat_RI = bm3d_thr_colored_noise(zRI, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n    lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, sigma, 0, single(Tfor), single(Tinv)',...\n    inverse_hadper_trans_single_den, single(stdRI'), Wwin2D, 0, 1 );\n\nPSNR_INITIAL_ESTIMATE = 10*log10(1/mean((y(:)-y_hat_RI(:)).^2));\nISNR_INITIAL_ESTIMATE = PSNR_INITIAL_ESTIMATE - 10*log10(1/mean((y(:)-z(:)).^2));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 2: Final estimate by Regularized Wiener Inversion (RWI) followed\n%%%% by BM3D with collaborative Wiener filtering\n%%%%\n\n%%%% Step 2.1. Regularized Wiener Inversion\nWiener_Pilot = abs(fft2(double(y_hat_RI)));   %%% Wiener reference estimate\nRWI  = conj(V).*Wiener_Pilot.^2./(Wiener_Pilot.^2.*(abs(V).^2) + Regularization_alpha_RWI*Xv*Xh*sigma^2);   % Transfer Matrix for RWI (uses standard regularization 'a-la-Tikhonov')\nzRWI = real(ifft2(fft2(z).*RWI));   % RWI OBSERVATION\n\nstdRWI = zeros(N1_wiener, N1_wiener);\nfor ii = 1:N1_wiener,\n    for jj = 1:N1_wiener,\n        UnitMatrix = zeros(N1_wiener,N1_wiener); UnitMatrix(ii,jj)=1;\n        BasisElementPadded = zeros(Xv, Xh); BasisElementPadded(1:N1_wiener,1:N1_wiener) = TinvW*UnitMatrix*TinvW';\n        TransfBasisElementPadded = fft2(BasisElementPadded);\n        stdRWI(ii,jj) = sqrt( (1/(Xv*Xh)) * sum(sum(abs(TransfBasisElementPadded.*RWI).^2)) )*sigma;\n    end,\nend\n\n%%%% Step 2.2. Colored noise suppression by BM3D with collaborative Wiener\n%%%% filtering\ny_hat_RWI = bm3d_wiener_colored_noise(zRWI, y_hat_RI, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    0, tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, 0, single(stdRWI'), single(TforW), single(TinvW)',...\n    inverse_hadper_trans_single_den, Wwin2D_wiener, 0, 1, single(ones(N1_wiener)) );\n\nelapsed_time = toc;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Calculate the final estimate's PSNR and ISNR, print them, and show the\n%%%% restored image\n%%%%\nPSNR = 10*log10(1/mean((y(:)-y_hat_RWI(:)).^2));\nISNR = PSNR - 10*log10(1/mean((y(:)-z(:)).^2));\n\nif print_to_screen == 1\n    fprintf('Image: %s, Exp %d, Time: %.1f sec, PSNR-RI: %.2f dB, PSNR-RWI: %.2f, ISNR-RWI: %.2f dB\\n', ...\n        test_image_name, experiment_number, elapsed_time, PSNR_INITIAL_ESTIMATE, PSNR, ISNR);\n    figure,imshow(z);\n    figure,imshow(double(y_hat_RWI));\nend\n\nreturn;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the\n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is\n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n        0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n        0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n        -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n        0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n        0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n        0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n        0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];\nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n        0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n        0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n        0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n        0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n        0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n        0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n        0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n        0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n        0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n        0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n        0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n        0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n        0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n        0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x);\n    if (Q(1) < 0),\n        Q = -Q;\n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');\n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))';\n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;"
  },
  {
    "path": "BM3D/BM3DSHARP.m",
    "content": "function [y_hat] = BM3DSHARP(z, sigma, alpha_sharp, profile, print_to_screen)\n%\n%  Joint sharpening and denoising with BM3D. This is implementation of the \n%  BM3D-SH3D sharpening method that is developed in:\n%\n%  [1] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Joint image\n%   sharpening and denoising by 3D transform-domain collaborative filtering,\" \n%   Proc. 2007 Int. TICSP Workshop Spectral Meth. Multirate Signal Process.,\n%   SMMSP 2007, Moscow, Russia, September 2007.\n%\n%  FUNCTION INTERFACE:\n%\n%  [ysharp] = BM3DSHARP(z, sigma, alpha_sharp, profile, print_to_screen)\n%\n%  The function can work without any of the input arguments, hence they are\n%  optional!\n%\n%  INPUTS (OPTIONAL):\n%\n%        1) z (matrix, size MxN)       : Input image (noisy and with poor contrast)\n%        2) sigma (double)             : Noise (IF ANY noise) standard deviation (signal assumed\n%                                          in the range [0, 255])\n%        3) alpha_sharp (double)       : Sharpening parameter (default: 1.5):\n%                                         (1,inf) -> sharpen\n%                                          1      -> no sharpening\n%                                         (0,1)   -> de-sharpen\n%        4) profile (char vector)      : 'lc' --> fast \n%                                        'np' --> normal (default)\n%        5) print_to_screen (boolean)  : 0 --> do not print output\n%                                          information (and do not plot figures)\n%                                        1 --> print figures (default)\n%\n%   OUTPUTS:\n%        1) ysharp (matrix, size MxN)  : Sharpened image (in the range [0,1])\n%\n%  BASIC USAGE EXAMPLES:     \n%     \n%     sigma = 10;\n%     z = im2double(imread('cameraman.tif'));\n%     z = z + (sigma/255)*randn(size(z));\n%     alpha_sharp = 1.3;\n%     [ysharp] = BM3DSHARP(z, sigma, alpha_sharp);\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright  2007 Tampere University of Technology. All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Kostadin Dabov (2007), email: kostadin.dabov _at_ tut.fi\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% In case, an input image z is not provided, then use the filename \n%%%%  below to read an original image (might contain path also). Later, \n%%%%  artificial AWGN noise is added and this noisy image is processed \n%%%%  by the BM3D-SH3D.\n%%%%\nif (exist('image_name') ~= 1)\n    image_name = [\n    % \n    %%%% Grayscale images\n    %     'barco.png'\n    %     'pentagon.tif'\n         'Cameraman256.png'   \n    %     'boat.png'\n    %     'Lena512.png'\n    %     'house.png'\n    %     'barbara.png'\n];\nend\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Quality/complexity trade-off profile selection\n%%%%\n%%%%  'np' --> Normal Profile (balanced quality)\n%%%%  'lc' --> Low Complexity Profile (fast, lower quality)\n%%%%\n%%%%  'high' --> High Profile (high quality, not documented in [1])\n%%%%\nif (exist('profile') ~= 1)\n    profile         = 'np'; %% default profile\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Specify the std. dev. of the corrupting noise\n%%%%\nif (exist('sigma') ~= 1),\n    if (exist('z') ~= 1)\n        sigma = 20; %% default standard deviation of the AWGN\n    else\n        fprintf('Please specify value for the s.t.d. \"sigma\"\\n');\n        y_hat = 0;\n        return;\n    end\nend\n    \nif (exist('alpha_sharp') ~= 1)\n    alpha_sharp = 3/2;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Following are the parameters for the Normal Profile.\n%%%%\n\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'bior1.5'; %% transform used for the HT filt. of size N1 x N1\ntransform_3rd_dim_name   = 'haar';    %% transform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Hard-thresholding (HT) parameters:\nN1                  = 8;   %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 3;   %% sliding step to process every next reference block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimension of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM), must be odd\ntau_match           = 3000;%% threshold for the block-distance (d-distance)\nlambda_thr2D        = 0;   %% threshold parameter for the coarse initial denoising used in the d-distance measure\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D transform domain\nbeta                = 2.0; %% parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Block-matching parameters:\nstepFS              = 1;  %% step that forces to switch to full-search BM, \"1\" implies always full-search\nsmallLN             = 'not used in np'; %% if stepFS > 1, then this specifies the size of the small local search neighb.\nthrToIncStep        = 8;  %% used in the HT filtering to increase the sliding step in uniform regions\n\nif strcmp(profile, 'lc') == 1,\n\n    Nstep               = 6;\n    Ns                  = 25;\n\n    thrToIncStep        = 3;\n    smallLN             = 3;\n    stepFS              = 6*Nstep;\n\nend\n\nif (strcmp(profile, 'vn') == 1) | (sigma > 40),\n\n    transform_2D_HT_name = 'dct'; \n    \n    N1                  = 12;\n    Nstep               = 4;\n \n    lambda_thr3D        = 2.8;\n    lambda_thr2D        = 2.0;\n    thrToIncStep        = 3;\n    tau_match           = 5000;\n    \nend\n\ndecLevel = 0;        %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\nthr_mask = ones(N1); %% N1xN1 mask of threshold scaling coeff. --- by default there is no scaling, however the use of different thresholds for different wavelet decompoistion subbands can be done with this matrix\n\nif strcmp(profile, 'high') == 1, %% this profile is not documented in [1]\n    \n    decLevel     = 1; \n    Nstep        = 2;\n    lambda_thr3D = 2.5;\n    vMask = ones(N1,1); vMask((end/4+1):end/2)= 1.01; vMask((end/2+1):end) = 1.07; %% this allows to have different threhsolds for the finest and next-to-the-finest subbands\n    thr_mask = vMask * vMask'; \n    beta         = 2.5;\n    beta_wiener  = 1.5;\n    \nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%% Check whether to dump information to the screen or remain silent\ndump_output_information = 1;\nif (exist('print_to_screen') == 1) & (print_to_screen == 0),\n    dump_output_information = 0;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Create transform matrices, etc.\n%%%%\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, decLevel);     %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1) | (strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1),\n    %%% If Haar is used in the 3-rd dimension, then a fast internal transform is used, thus no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% matrix-vector multiplication for the 1D case.\n    for hpow = 0:ceil(log2(max(N2,N2_wiener))),\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(h, transform_3rd_dim_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows used in the aggregation of block-wise estimates\n%%%%\nif beta==2 & N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924];\nelse\n    Wwin2D = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\nend\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% If needed, read images, generate noise, or scale the images to the \n%%%% [0,1] interval\n%%%%\nif (exist('z') ~= 1)\n    y        = im2double(imread(image_name));  %% read a noise-free image and put in intensity range [0,1]\n    randn('seed', 0);                          %% generate seed\n    z        = y + (sigma/255)*randn(size(y)); %% create a noisy image\nelse  % external images\n    \n    image_name = 'External image';\n    \n    % convert z to double precision if needed\n    z = double(z);\n    \n    % if z's range is [0, 255], then convert to [0, 1]\n    if (max(z(:)) > 10), % a naive check for intensity range\n        z = z / 255;\n    end\n    \nend\n\nif (size(z,3) ~= 1),\n    error('BM3D-SH3D accepts only grayscale 2D images.');\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%% Print image information to the screen\n%%%%\nif dump_output_information == 1,\n    fprintf('Image: %s (%dx%d), sigma: %.1f\\n', image_name, size(z,1), size(z,2), sigma);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Apply the filtering MEX-subroutine\n%%%%\ntic;\ny_hat = bm3d_thr_sharpen_var(z, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n         lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, (sigma/255), thrToIncStep, single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(thr_mask), Wwin2D, smallLN, stepFS, 1/alpha_sharp );\nestimate_elapsed_time = toc;\n\nif dump_output_information == 1,\n    fprintf('SHARPENING COMPLETED (total time: %.1f sec)\\n', ...\n        estimate_elapsed_time);\n    imshow(z); figure, imshow(double(y_hat));\nend\n\nreturn;\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;\n\n"
  },
  {
    "path": "BM3D/BM3D_CFA.m",
    "content": "function [varargout] = BM3D_CFA(z, sigma)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n%  BM3D_CFA is the modification of the BM3D algorithm for attenuation of additive white Gaussian noise from \n%  Bayer CFA images. This algorithm reproduces the results from the article:\n%\n%  [1] A. Danielyan, M. Vehvilinen, A. Foi, V. Katkovnik, and K. Egiazarian,\n%       Cross-color BM3D filtering of noisy raw data, \n%       Proc. Int. Workshop on Local and Non-Local Approx. in Image Process.,\n%       LNLA 2009, Tuusula, Finland, pp. 125-129, August 2009.\n%\n%  FUNCTION INTERFACE:\n%\n%  [y_wiener, y_ht] = BM3D(z, sigma)\n%\n%  ! The function can work without any of the input arguments, \n%   in which case, the internal default ones are used !\n\n%  INPUT ARGUMENTS (OPTIONAL):\n%\n%     2) z (matrix M x N): Noisy image (intensities in range [0,1] or [0,255])\n%     3) sigma (double)  : Std. dev. of the noise (corresponding to intensities\n%                          in range [0,255] even if the range of z is [0,1])\n%  OUTPUTS:                                             \n%     1) y_wiener (matrix M x N): Final(wiener) estimate (in the range [0,1])\n%     2) y_ht (matrix M x N): Basic (hard-thresholding) estimate (in the range [0,1])\n%\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright (c) 2009-2014 Tampere University of Technology.\n% All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Aram Danielyan, email: aram dot danielyan _at_ .tut.fi\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% In case, a noisy image z is not provided, then use the filename \n%%%%  below to read an original image (might contain path also). Later, \n%%%%  artificial AWGN noise is added and this noisy image is processed \n%%%%  by the BM3D.\n%%%%\nimage_name = [\n    'kodim07.png'\n%     'kodim08.png'\n%     'kodim19.png'\n%     'kodim23.png'\n    ];\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Quality/complexity trade-off profile selection\n%%%%\n%%%%  'np' --> Normal Profile (balanced quality)\n\nif ~exist('profile','var')\n    profile         = 'np'; %% default profile\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Specify the std. dev. of the corrupting noise\n%%%%\nif ~exist('sigma','var')\n    sigma               = 25; %% default standard deviation of the AWGN\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Following are the parameters for the Normal Profile.\n%%%%\n\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'dct'; %% transform used for the HT filt. of size N1 x N1\ntransform_2D_Wiener_name = 'dct';\ntransform_3rd_dim_name   = 'haar';    %% transform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Hard-thresholding (HT) parameters:\nN1                  = 5;   %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 3;   %% sliding step to process every next reference block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimension of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM), must be odd\nlambda_thr2D        = 0;\ntau_match           = 3000;%% threshold for the block-distance (d-distance)\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D transform domain\nbeta                = 2.0; %% parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Step 2: Wiener filtering parameters:\nN1_wiener           = 6;\nNstep_wiener        = 3;\nN2_wiener           = 32;\nNs_wiener           = 39;\ntau_match_wiener    = 400;\nbeta_wiener         = 2.0;\n\n\n%%%% Block-matching parameters:\nstepFS              = 1;  %% step that forces to switch to full-search BM, \"1\" implies always full-search\nsmallLN             = 'not used in np'; %% if stepFS > 1, then this specifies the size of the small local search neighb.\nstepFSW             = 1;\nsmallLNW            = 'not used in np';\nthrToIncStep        = 8;  % if the number of non-zero coefficients after HT is less than thrToIncStep,\n                          % than the sliding step to the next reference block is incresed to (nm1-1)\n\ndecLevel = 0;        %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\nthr_mask = ones(N1); %% N1xN1 mask of threshold scaling coeff. --- by default there is no scaling, however the use of different thresholds for different wavelet decompoistion subbands can be done with this matrix\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Create transform matrices, etc.\n%%%%\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, decLevel);     %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name, 0); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1) | (strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1),\n    %%% If Haar is used in the 3-rd dimension, then a fast internal transform is used, thus no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% matrix-vector multiplication for the 1D case.\n    for hpow = 0:ceil(log2(max(N2,N2_wiener))),\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(h, transform_3rd_dim_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows used in the aggregation of block-wise estimates\n%%%%\nif beta_wiener==2 & beta==2 & N1_wiener==8 & N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924];\n    Wwin2D_wiener = Wwin2D;\nelse\n    Wwin2D           = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\n    Wwin2D_wiener    = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the aggregation of the Wiener filt. part\nend\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% If needed, read images, generate noise, or scale the images to the \n%%%% [0,1] interval\n%%%%\nif ~exist('z','var')\n    yRGB        = im2double(imread(image_name));  %% read a noise-free image and put in intensity range [0,1]\n    y = zeros(size(yRGB,1), size(yRGB,2));\n    y(1:2:end,1:2:end) = yRGB(1:2:end,1:2:end,2);\n    y(2:2:end,2:2:end) = yRGB(2:2:end,2:2:end,2);\n    y(1:2:end,2:2:end) = yRGB(1:2:end,2:2:end,1);\n    y(2:2:end,1:2:end) = yRGB(2:2:end,1:2:end,3);\n    \n    randn('seed', 0);                          %% generate seed\n    z        = y + (sigma/255)*randn(size(y)); %% create a noisy image\n    \nelse  % external images\n    \n    image_name = 'External image';\n    \n    % convert z to double precision if needed\n    z = double(z);\n    y= [];\nend\n\nif (size(z,3) ~= 1)\n    error('BM3D accepts only grayscale 2D images.');\nend\n\n%%% Check whether to dump information to the screen or remain silent\nif isempty(y)\n    dump_output_information = false;\nelse\n    dump_output_information = true;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%% Print image information to the screen\n%%%%\nif dump_output_information\n    fprintf('Image: %s (%dx%d), sigma: %.1f\\n', image_name, size(z,1), size(z,2), sigma);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 1. Produce the basic estimate by HT filtering\n%%%%\ntic;\ny_ht = bm3d_CFA_thr(z, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n\tlambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, (sigma/255), thrToIncStep, single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(thr_mask), Wwin2D, smallLN, stepFS );\nestimate_elapsed_time = toc;\n\nif dump_output_information\n    PSNR_INITIAL_ESTIMATE = 10*log10(1/mean((y(:)-double(y_ht(:))).^2));\n    fprintf('BASIC ESTIMATE, PSNR: %.2f dB\\n', PSNR_INITIAL_ESTIMATE);\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 2. Produce the final estimate by Wiener filtering (using the \n%%%%  hard-thresholding initial estimate)\n%%%%\ntic;\ny_wiener = bm3d_CFA_wiener(z, y_ht, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    'unused arg', tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, (sigma/255), 'unused arg', single(TforW), single(TinvW)', inverse_hadper_trans_single_den, Wwin2D_wiener, smallLNW, stepFSW, single(ones(N1_wiener)) );\nwiener_elapsed_time = toc;\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Calculate the final estimate's PSNR, print it, and show the\n%%%% denoised image next to the noisy one\n%%%%\ny_wiener = double(y_wiener);\n\n\nif dump_output_information \n    PSNR = 10*log10(1/mean((y(:)-y_wiener(:)).^2)); % y is valid\n    fprintf('FINAL ESTIMATE (total time: %.1f sec), PSNR: %.2f dB\\n', ...\n        wiener_elapsed_time + estimate_elapsed_time, PSNR);\n\n    figure, imshow(z); title(sprintf('Noisy %s, PSNR: %.3f dB (sigma: %d)', ...\n        image_name(1:end-4), 10*log10(1/mean((y(:)-z(:)).^2)), sigma));\n\n    figure, imshow(y_wiener); title(sprintf('Denoised %s, PSNR: %.3f dB', ...\n        image_name(1:end-4), PSNR));\n    \nend\n\nif nargout==0\n    varargout={};\nelse\n    varargout{1}=y_wiener;\n    varargout{2}=y_ht;\nend\n\nreturn;\n\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;\n\n"
  },
  {
    "path": "BM3D/CBM3D.m",
    "content": "function [PSNR, yRGB_est] = CBM3D(yRGB, zRGB, sigma, profile, print_to_screen, colorspace)\n%\n%  CBM3D is algorithm for attenuation of additive white Gaussian noise from \n%  color RGB images. This algorithm reproduces the results from the article:\n%\n%  [1] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Color image\n%   denoising via sparse 3D collaborative filtering with grouping constraint in \n%   luminance-chrominance space,\" submitted to IEEE Int. Conf. Image Process., \n%   January 2007, in review, preprint at http://www.cs.tut.fi/~foi/GCF-BM3D.\n%\n%  FUNCTION INTERFACE:\n%\n%  [PSNR, yRGB_est] = CBM3D(yRGB, zRGB, sigma, profile, print_to_screen, colorspace)\n%\n%  ! The function can work without any of the input arguments, \n%   in which case, the internal default ones are used !\n% \n%  BASIC USAGE EXAMPLES:\n%\n%     Case 1) Using the default parameters (i.e., image name, sigma, etc.)\n% \n%      [PSNR, yRGB_est] = CBM3D;\n% \n%     Case 2) Using an external noisy image:\n%\n%      % Read an RGB image and scale its intensities in range [0,1]\n%      yRGB = im2double(imread('image_House256rgb.png')); \n%      % Generate the same seed used in the experimental results of [1]\n%      randn('seed', 0);\n%      % Standard deviation of the noise --- corresponding to intensity \n%      %  range [0,255], despite that the input was scaled in [0,1]\n%      sigma = 25;\n%      % Add the AWGN with zero mean and standard deviation 'sigma'\n%      zRGB = yRGB + (sigma/255)*randn(size(yRGB));\n%      % Denoise 'zRGB'. The denoised image is 'yRGB_est', and 'NA = 1'  \n%      %  because the true image was not provided\n%      [NA, yRGB_est] = CBM3D(1, zRGB, sigma); \n%      % Compute the putput PSNR\n%      PSNR = 10*log10(1/mean((yRGB(:)-yRGB_est(:)).^2))\n%      % show the noisy image 'zRGB' and the denoised 'yRGB_est'\n%      figure; imshow(min(max(zRGB,0),1));   \n%      figure; imshow(min(max(yRGB_est,0),1));\n% \n%     Case 3) If the original image yRGB is provided as the first input \n%      argument, then some additional information is printed (PSNRs, \n%      figures, etc.). That is, \"[NA, yRGB_est] = BM3D(1, zRGB, sigma);\" in the\n%      above code should be replaced with:\n% \n%      [PSNR, yRGB_est] = CBM3D(yRGB, zRGB, sigma);\n% \n% \n%  INPUT ARGUMENTS (OPTIONAL):\n%     1) yRGB (M x N x 3): Noise-free RGB image (needed for computing PSNR),\n%                           replace with the scalar 1 if not available.\n%     2) zRGB (M x N x 3): Noisy RGBimage (intensities in range [0,1] or [0,255])\n%     3) sigma (double)  : Std. dev. of the noise (corresponding to intensities\n%                            in range [0,255] even if the range of zRGB is [0,1])\n%     4) profile (char)  : 'np' --> Normal Profile \n%                          'lc' --> Fast Profile\n%     5) print_to_screen : 0 --> do not print output information (and do \n%                                not plot figures)\n%                          1 --> print information and plot figures\n%     6) colorspace (char): 'opp'   --> use opponent colorspace\n%                          'yCbCr' --> use yCbCr colorspace\n%\n%  OUTPUTS:\n%     1) PSNR (double)          : Output PSNR (dB), only if the original \n%                                 image is available, otherwise PSNR = 0                                               \n%     2) yRGB_est (M x N x 3): Final RGB estimate (in the range [0,1])\n%\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright (c) 2007-2011 Tampere University of Technology.\n% All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Kostadin Dabov, email: dabov _at_ cs.tut.fi\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% In case, there is no input image (zRGB or yRGB), then use the filename \n%%%%  below to read an original image (might contain path also). Later, \n%%%%  artificial AWGN noise is added and this noisy image is processed \n%%%%  by the CBM3D.\n%%%%\nimage_name = [\n%    'kodim12.png'\n   'image_Lena512rgb.png'\n%     'image_House256rgb.png'\n%    'image_Peppers512rgb.png'\n%    'image_Baboon512rgb.png'\n%    'image_F16_512rgb.png'\n   ];\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Quality/complexity trade-off \n%%%%\n%%%%  'np' --> Normal Profile (balanced quality)\n%%%%  'lc' --> Low Complexity Profile (fast, lower quality)\n%%%%\n%%%%  'high' --> High Profile (high quality, not documented in [1])\n%%%%\n%%%%  'vn' --> This profile is automatically enabled for high noise \n%%%%           when sigma > 40\n%%%%\n%%%%  'vn_old' --> This is the old 'vn' profile that was used in [1].\n%%%%           It gives inferior results than 'vn' in most cases. \n%%%%\nif (exist('profile') ~= 1)\n    profile         = 'np'; %% default profile\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Specify the std. dev. of the corrupting noise\n%%%%\nif (exist('sigma') ~= 1),\n   sigma                = 50; %% default standard deviation of the AWGN\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Colorspace in which we perform denoising. BM is applied to the first\n%%%%  component and the matching information is re-used for the other two.\n%%%%\nif (exist('colorspace') ~= 1),\n    colorspace              = 'opp'; %%% (valid colorspaces are: 'yCbCr' and 'opp')\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Following are the parameters for the Normal Profile.\n%%%%\n\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'bior1.5'; %% transform used for the HT filt. of size N1 x N1\ntransform_2D_Wiener_name = 'dct';     %% transform used for the Wiener filt. of size N1_wiener x N1_wiener\ntransform_3rd_dim_name   = 'haar';    %% transform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Hard-thresholding (HT) parameters:\nN1                  = 8;   %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 3;   %% sliding step to process every next reference block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimension of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM), must be odd\ntau_match           = 3000;%% threshold for the block-distance (d-distance)\nlambda_thr2D        = 0;   %% threshold parameter for the coarse initial denoising used in the d-distance measure\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D transform domain\nbeta                = 2.0; %% parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Wiener filtering parameters:\nN1_wiener           = 8;\nNstep_wiener        = 3;\nN2_wiener           = 32;\nNs_wiener           = 39;\ntau_match_wiener    = 400;\nbeta_wiener         = 2.0;\n\n%%%% Block-matching parameters:\nstepFS              = 1;  %% step that forces to switch to full-search BM, \"1\" implies always full-search\nsmallLN             = 'not used in np'; %% if stepFS > 1, then this specifies the size of the small local search neighb.\nstepFSW             = 1;\nsmallLNW            = 'not used in np';\nthrToIncStep        = 8;  %% used in the HT filtering to increase the sliding step in uniform regions\n\nif strcmp(profile, 'lc') == 1,\n\n    Nstep               = 6;\n    Ns                  = 25;\n    Nstep_wiener        = 5;\n    N2_wiener           = 16;\n    Ns_wiener           = 25;\n\n    thrToIncStep        = 3;\n    smallLN             = 3;\n    stepFS              = 6*Nstep;\n    smallLNW            = 2;\n    stepFSW             = 5*Nstep_wiener;\n\nend\n\n% Profile 'vn' was proposed in \n%  Y. Hou, C. Zhao, D. Yang, and Y. Cheng, 'Comment on \"Image Denoising by Sparse 3D Transform-Domain\n%  Collaborative Filtering\"', accepted for publication, IEEE Trans. on Image Processing, July, 2010.\n% as a better alternative to that initially proposed in [1] (which is currently in profile 'vn_old')\nif (strcmp(profile, 'vn') == 1) | (sigma > 40),\n\n    N2                  = 32;\n    Nstep               = 4;\n \n    N1_wiener           = 11;\n    Nstep_wiener        = 6;\n\n    lambda_thr3D        = 2.8;\n    thrToIncStep        = 3;\n    tau_match_wiener    = 3500;\n    tau_match           = 25000;\n    \n    Ns_wiener           = 39;\n    \nend\n\n% The 'vn_old' profile corresponds to the original parameters for strong noise proposed in [1].\nif (strcmp(profile, 'vn_old') == 1) & (sigma > 40),\n\n    transform_2D_HT_name = 'dct'; \n    \n    N1                  = 12;\n    Nstep               = 4;\n \n    N1_wiener           = 11;\n    Nstep_wiener        = 6;\n\n    lambda_thr3D        = 2.8;\n    lambda_thr2D        = 2.0;\n    thrToIncStep        = 3;\n    tau_match_wiener    = 3500;\n    tau_match           = 5000;\n    \n    Ns_wiener           = 39;\n    \nend\n\n\ndecLevel = 0;        %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\nthr_mask = ones(N1); %% N1xN1 mask of threshold scaling coeff. --- by default there is no scaling, however the use of different thresholds for different wavelet decompoistion subbands can be done with this matrix\n\nif strcmp(profile, 'high') == 1,\n    \n    decLevel     = 1; \n    Nstep        = 2;\n    Nstep_wiener = 2;\n    lambda_thr3D = 2.5;\n    vMask = ones(N1,1); vMask((end/4+1):end/2)= 1.01; vMask((end/2+1):end) = 1.07; %% this allows to have different threhsolds for the finest and next-to-the-finest subbands\n    thr_mask = vMask * vMask'; \n    beta         = 2.5;\n    beta_wiener  = 1.5;\n    \nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%% Check whether to dump information to the screen or reamin silent\ndump_output_information = 1;\nif (exist('print_to_screen') == 1) & (print_to_screen == 0),\n    dump_output_information = 0;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Create transform matrices, etc.\n%%%%\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, decLevel);  %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1) | (strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1),\n    %%% If Haar is used in the 3-rd dimension, then a fast internal transform is used, thus no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% matrix-vector multiplication for the 1D case.\n    for hpow = 0:ceil(log2(max(N2,N2_wiener))),\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(h, transform_3rd_dim_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows used in the aggregation of block-wise estimates\n%%%%\nif beta_wiener==2 & beta==2 & N1_wiener==8 & N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924];\n    Wwin2D_wiener = Wwin2D;\nelse\n    Wwin2D           = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\n    Wwin2D_wiener    = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the aggregation of the Wiener filt. part\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% If needed, read images, generate noise, or scale the images to the \n%%%% [0,1] interval\n%%%%\nif (exist('yRGB') ~= 1) | (exist('zRGB') ~= 1)\n    yRGB        = im2double(imread(image_name));  %% read a noise-free image\n    randn('seed', 0);                          %% generate seed\n    zRGB        = yRGB + (sigma/255)*randn(size(yRGB)); %% create a noisy image\nelse % external images\n    image_name = 'External image';\n    \n    % convert zRGB to double precision\n    zRGB = double(zRGB);\n\n    % convert yRGB to double precision\n    yRGB = double(yRGB);\n    \n    % if zRGB's range is [0, 255], then convert to [0, 1]\n    if (max(zRGB(:)) > 10), % a naive check for intensity range\n        zRGB = zRGB / 255;\n    end\n    \n    % if yRGB's range is [0, 255], then convert to [0, 1]\n    if (max(yRGB(:)) > 10), % a naive check for intensity range\n        yRGB = yRGB / 255;\n    end    \nend\n\n\nif (size(zRGB,3) ~= 3) | (size(zRGB,4) ~= 1),\n    error('CBM3D accepts only input RGB images (i.e. matrices of size M x N x 3).');\nend\n\n% Check if the true image yRGB is a valid one; if not, then we cannot compute PSNR, etc.\nyRGB_is_invalid_image = (length(size(zRGB)) ~= length(size(yRGB))) | (size(zRGB,1) ~= size(yRGB,1)) | (size(zRGB,2) ~= size(yRGB,2)) | (size(zRGB,3) ~= size(yRGB,3));\nif (yRGB_is_invalid_image),\n    dump_output_information = 0;\nend\n\n\n[Xv, Xh, numSlices] = size(zRGB);              %%% obtain image sizes\n\nif numSlices ~= 3\n    fprintf('Error, an RGB color image is required!\\n');\n    return;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Change colorspace, compute the l2-norms of the new color channels\n%%%%\n[zColSpace l2normLumChrom] = function_rgb2LumChrom(zRGB, colorspace);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Print image information to the screen\n%%%%\nif dump_output_information == 1,\n    fprintf(sprintf('Image: %s (%dx%dx%d), sigma: %.1f\\n', image_name, Xv, Xh, numSlices, sigma));\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 1. Basic estimate by collaborative hard-thresholding and using\n%%%% the grouping constraint on the chrominances.\n%%%%\ntic;\ny_hat = bm3d_thr_color(zColSpace, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n    lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, sigma/255, thrToIncStep, single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(thr_mask), 'unused arg', 'unused arg', l2normLumChrom, Wwin2D, smallLN, stepFS );\nestimate_elapsed_time = toc;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 2. Final estimate by collaborative Wiener filtering and using\n%%%% the grouping constraint on the chrominances.\n%%%%\ntic;\nyRGB_est = bm3d_wiener_color(zColSpace, y_hat, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    'unused_arg', tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, sigma/255, 'unused arg', single(TforW), single(TinvW)', inverse_hadper_trans_single_den, 'unused arg', 'unused arg', l2normLumChrom, Wwin2D_wiener, smallLNW, stepFSW );\nwiener_elapsed_time = toc;\n\nyRGB_est = double(yRGB_est);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Convert back to RGB colorspace\n%%%%\nyRGB_est = function_LumChrom2rgb(yRGB_est, colorspace);\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Calculate final estimate's PSNR and ISNR, print them, and show the\n%%%% denoised image\n%%%%\nPSNR = 0; %% Remains 0 if the true image yRGB is not available\nif (~yRGB_is_invalid_image), % then we assume yRGB is a valid image\n    PSNR = 10*log10(1/mean((yRGB(:)-yRGB_est(:)).^2));\nend\n\nif dump_output_information == 1,\n    fprintf(sprintf('FINAL ESTIMATE (total time: %.1f sec), PSNR: %.2f dB\\n', ...\n        wiener_elapsed_time + estimate_elapsed_time, PSNR));\n\n    figure, imshow(min(max(zRGB,0),1)); title(sprintf('Noisy %s, PSNR: %.3f dB (sigma: %d)', ...\n        image_name(1:end-4), 10*log10(1/mean((yRGB(:)-zRGB(:)).^2)), sigma));\n\n    figure, imshow(min(max(yRGB_est,0),1)); title(sprintf('Denoised %s, PSNR: %.3f dB', ...\n        image_name(1:end-4), PSNR));\nend\n\nreturn;\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;\n\nfunction [y, A, l2normLumChrom]=function_rgb2LumChrom(xRGB, colormode)\n% Forward color-space transformation   ( inverse transformation is function_LumChrom2rgb.m )\n%\n% Alessandro Foi - Tampere University of Technology - 2005 - 2006   Public release v1.03 (March 2006)\n% -----------------------------------------------------------------------------------------------------------------------------------------------\n%\n% SYNTAX:\n%\n%   [y A l2normLumChrom] = function_rgb2LumChrom(xRGB, colormode);\n%\n% INPUTS:\n%   xRGB  is RGB image with range [0 1]^3\n%\n%   colormode = 'opp', 'yCbCr', 'pca', or a custom 3x3 matrix\n%\n%       'opp'     Opponent color space ('opp' is equirange version)\n%       'yCbCr'   The standard yCbCr (e.g. for JPEG images)\n%       'pca'     Principal components   (note that this transformation is renormalized to be equirange) \n%\n% OUTPUTS:\n%   y  is color-transformed image (with range typically included in or equal to [0 1]^3, depending on the transformation matrix)\n%\n%   l2normLumChrom (optional) l2-norm of the transformation (useful for noise std calculation)\n%   A  transformation matrix  (used necessarily if colormode='pca')\n%\n%   NOTES:  -  If only two outputs are used, then the second output is l2normLumChrom, unless colormode='pca';\n%           -  'opp' is used by default if no colormode is specified.\n%\n%\n% USAGE EXAMPLE FOR PCA TRANSFORMATION:\n%  %%%%  -- forward color transformation --\n%    if colormode=='pca'\n%       [zLumChrom colormode] = function_rgb2LumChrom(zRGB,colormode); % 'colormode' is assigned a 3x3 transform matrix\n%    else\n%       zLumChrom = function_rgb2LumChrom(zRGB,colormode);\n%    end\n%\n%  %%%% [ ... ]  Some processing  [ ... ]\n%\n%  %%%%  -- inverse color transformation --\n%    zRGB=function_LumChrom2rgb(zLumChrom,colormode);\n%\n\nif nargin==1\n    colormode='opp';\nend\nchange_output=0;\nif size(colormode)==[3 3]\n    A=colormode;\n    l2normLumChrom=sqrt(sum(A.^2,2));\nelse\n    if strcmp(colormode,'opp')\n        A=[1/3 1/3 1/3; 0.5  0  -0.5; 0.25  -0.5  0.25];\n    end\n    if strcmp(colormode,'yCbCr')\n        A=[0.299   0.587   0.114;   -0.16873660714285  -0.33126339285715   0.5;   0.5  -0.4186875  -0.0813125];\n    end\n    if strcmp(colormode,'pca')\n        A=princomp(reshape(xRGB,[size(xRGB,1)*size(xRGB,2) 3]))';\n        A=A./repmat(sum(A.*(A>0),2)-sum(A.*(A<0),2),[1 3]);  %% ranges are normalized to unitary length;\n    else\n        if nargout==2\n            change_output=1;\n        end\n    end\nend\n\n%%%% Make sure that each channel's intensity range is [0,1]\nmaxV = sum(A.*(A>0),2);\nminV = sum(A.*(A<0),2);\nyNormal = (reshape(xRGB,[size(xRGB,1)*size(xRGB,2) 3]) * A' - repmat(minV, [1 size(xRGB,1)*size(xRGB,2)])') * diag(1./(maxV-minV)); % put in range [0,1]\ny = reshape(yNormal, [size(xRGB,1) size(xRGB,2) 3]);\n\n%%%% The l2-norm of each of the 3 transform basis elements \nl2normLumChrom = diag(1./(maxV-minV))*sqrt(sum(A.^2,2));\n\nif change_output\n    A=l2normLumChrom;\nend\n\nreturn;\n\n\n\n\nfunction yRGB=function_LumChrom2rgb(x,colormode)\n% Inverse color-space transformation   ( forward transformation is function_rgb2LumChrom.m )\n%\n% Alessandro Foi - Tampere University of Technology - 2005 - 2006   Public release v1.03 (March 2006)\n% -----------------------------------------------------------------------------------------------------------------------------------------------\n%\n% SYNTAX:\n%\n%   yRGB = function_LumChrom2rgb(x,colormode);\n%\n% INPUTS:\n%  x  is color-transformed image (with range typically included in or equal to [0 1]^3, depending on the transformation matrix)\n%\n%  colormode = 'opp', 'yCbCr', or a custom 3x3 matrix (e.g. provided by the forward transform when 'pca' is selected)\n%\n%       'opp'      opponent color space ('opp' is equirange version)\n%       'yCbCr'    standard yCbCr (e.g. for JPEG images)\n%\n% OUTPUTS:\n%   x  is RGB image (with range [0 1]^3)\n%\n%\n% NOTE:    'opp' is used by default if no colormode is specified\n%\n\nif nargin==1\n    colormode='opp';\nend\nif size(colormode)==[3 3]\n    A=colormode;\n    B=inv(A);\nelse\n    if strcmp(colormode,'opp')\n        A =[1/3 1/3 1/3; 0.5  0  -0.5; 0.25  -0.5  0.25];\n        B =[1 1 2/3;1 0 -4/3;1 -1 2/3];\n    end\n    if strcmp(colormode,'yCbCr')\n        A=[0.299   0.587   0.114;   -0.16873660714285  -0.33126339285715   0.5;   0.5  -0.4186875  -0.0813125];\n        B=inv(A);\n    end\nend\n\n%%%% Make sure that each channel's intensity range is [0,1]\nmaxV = sum(A.*(A>0),2);\nminV = sum(A.*(A<0),2);\nxNormal = reshape(x,[size(x,1)*size(x,2) 3]) * diag(maxV-minV) +  repmat(minV, [1 size(x,1)*size(x,2)])'; % put in range [0,1]\nyRGB = reshape(xNormal * B', [ size(x,1) size(x,2) 3]);\n\nreturn;\n\n"
  },
  {
    "path": "BM3D/CVBM3D.m",
    "content": "function [Xdenoised] = CVBM3D(Xnoisy, sigma, Xorig)\n%  CVBM3D denoising of RGB videos corrupted with AWGN.\n%\n%\n%  [Xdenoised] = CVBM3D(Xnoisy, sigma, Xorig)\n%\n%  INPUTS:\n%\n%   1)  Xnoisy --> Either a filename of a noisy AVI RGB uncompressed video (e.g. 'SMg20.avi') \n%                  or a 4-D matrix of dimensions (M x N x 3 x NumberOfFrames)\n%                  The intensity range is [0,255]!\n%   2)  Sigma -->  Noise standard deviation (assumed intensity range is [0,255])\n%\n%   3)  Xorig     (optional parameter) --> Filename of the original video\n%\n%  OUTPUT: .avi files are written to the current matlab folder\n%\n%   1) Xdenoised --> A 4-D matrix with the denoised RGB-video\n%\n%  USAGE EXAMPLES:\n%   1) To denoise a video:\n%      CVBM3D('SMg20.avi', 20)\n%\n%   2) To denoise a video and print PSNR:\n%      CVBM3D('SMg20.avi', 20, 'SM.avi')\n%\n%   1) To denoise a 4-D matrix representing a noisy RGB video:\n%      CVBM3D(X_4D_matrix, 20)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright  2009 Tampere University of Technology. All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n% If no input argument is provided, then use the internal ones from below:\nif exist('sigma', 'var') ~= 1,\n    Xnoisy = 'SMg20.avi';  sigma = 20;  ;\nend\n\n% Whether or not to print information to the screen\ndump_information = 1;\n\n% If the input is a 4-D matrix, then save it as AVI file that is used as\n% input to the denoising\nif ischar(Xnoisy) == 0;\n    NumberOfFrames = size(Xnoisy,4);\n\n    if NumberOfFrames <= 1\n        error('The input RGB video should be a 4-D matrix (M x N x 3 x NumberOfFrames)');\n    end\n    avi_filename = sprintf('ExternalMatrix_%.6d.avi', round(rand*50000));\n    if exist(avi_filename, 'file') == 2,\n        delete(avi_filename);\n    end\n    mov = avifile(avi_filename, 'Colormap', gray(256), 'compression', 'None', 'fps', 30);\n    if mean2(Xnoisy) <= 1\n        fprintf('Possible error: the input RGB-videos should be in range [0,255] and not in [0,1]!\\n');\n    else\n        for ii = [1:NumberOfFrames],\n            mov = addframe(mov, uint8(Xnoisy(:,:,:,ii)));\n        end        \n    end\n    mov = close(mov);\n    \n    if dump_information == 1\n        fprintf('The input 4-D matrix was written to: %s.\\n', avi_filename);\n    end\n\n    clear Xnoisy\n    Xnoisy = avi_filename;\nend\n\n% Read some properties of the noisy RGB video\nnoi_avi_file_info = aviinfo(Xnoisy);\nNumberOfFrames = noi_avi_file_info.NumFrames;\n\n%%% Read Xorig video --- needed if one wants to compute PSNR and ISNR\nif exist('Xorig', 'var') == 1,\n    if ischar(Xorig) == 1;    \n        org_avi_file_info = aviinfo(Xorig);\n        mo = aviread(Xorig);\n        Xorig = zeros([size(mo(1).cdata), NumberOfFrames], 'single');\n        for cf = 1:NumberOfFrames\n            Xorig(:,:,:,cf) = single(mo(cf).cdata(:,:,:));\n        end\n        clear mo;\n\n        if (org_avi_file_info.NumFrames == noi_avi_file_info.NumFrames && org_avi_file_info.FramesPerSecond == noi_avi_file_info.FramesPerSecond && ...\n                org_avi_file_info.Width == noi_avi_file_info.Width && org_avi_file_info.Height == noi_avi_file_info.Height)\n            dump_information = 1;\n        end \n    else\n        Xorig = single(Xorig);\n        if mean2(Xorig) <= 1\n            fprintf('Possible error: the input RGB-videos should be in range [0,255] and not in [0,1]!\\n');\n        end\n\n    end\nend\n\ndenoiseFrames  = min(9, NumberOfFrames);\ndenoiseFramesW = min(9, NumberOfFrames);\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Quality/complexity trade-off\n%%%%\n%%%%  'np' --> Normal Profile (balanced quality)\n%%%%  'lc' --> Low Complexity Profile (fast, lower quality)\n%%%%\nif (exist('bm3dProfile') ~= 1)\n    bm3dProfile         = 'np';\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Following are the parameters for the Normal Profile.\n%%%%\n\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'bior1.5'; %% transform used for the HT filt. of size N1 x N1\ntransform_2D_Wiener_name = 'dct';     %% transform used for the Wiener filt. of size N1_wiener x N1_wiener\ntransform_3rd_dim_name   = 'haar'; %% tranform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Step 1: Hard-thresholding (HT) parameters:\nN1                  = 8;  %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 5;  %% sliding step to process every next refernece block\nN2                  = 8;  %% maximum number of similar blocks (maximum size of the 3rd dimension of the 3D groups)\nNs                  = 7;  %% length of the side of the search neighborhood for full-search block-matching (BM)\nNpr                 = 3;  %% length of the side of the motion-adaptive search neighborhood, use din the predictive-search BM\ntau_match           = 3000; %% threshold for the block distance (d-distance)\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D DFT domain\ndsub                = 13;  %% a small value subtracted from the distnce of blocks with the same spatial coordinate as the reference one\nNb                  = 2;  %% number of blocks to follow in each next frame, used in the predictive-search BM\nbeta                = 2.0; %% the beta parameter of the 2D Kaiser window used in the reconstruction\n\n\n%%%% Step 2: Wiener filtering parameters:\nN1_wiener           = 7;\nNstep_wiener        = 4;\nN2_wiener           = 8;\nNs_wiener           = 7;\nNpr_wiener          = 3;\ntau_match_wiener    = 1000;\nbeta_wiener         = 2.0;\ndsub_wiener         = 1.5;\nNb_wiener           = 2;\n\n%%%% Block-matching parameters:\nstepFS              = 1; %% step that firces to switch to full-search BM, \"1\" implies always full-search\nstepFSW             = 1;\nthrToIncStep        = 8;  %% used in the HT filtering to increase the sliding step in uniform regions\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Following are the parameters for the Low Complexity Profile.\n%%%%\nif strcmp(bm3dProfile, 'lc') == 1,\n    lambda_thr3D = 2.8;\n    denoiseFrames  = min(5, NumberOfFrames);\n    denoiseFramesW = min(5, NumberOfFrames);\n    N2_wiener = 4;\n    N2 = 4;\n    Ns = 3;\n    Ns_wiener = 3;\n    Nb = 1;\n    Nb_wiener = 1;\nend\n\nif strcmp(bm3dProfile, 'hi') == 1,\n    Nstep        = 3;\n    Nstep_wiener = 3;\nend\n\nif sigma > 30,\n    N1_wiener = 8;\n    tau_match    = 4500;\n    tau_match_wiener    = 3000;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Create transform matrices, etc.\n%%%%\ndecLevel                     = 0;    %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\ndecLevel3                    = 0;    %% dec. level for the wavelet transform in the 3rd dimension\n\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, decLevel); %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1 || strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1),\n    %%% Fast internal transform is used, no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later computed by\n    %%% matrix multiplication with them\n    for hh = [1 2 4 8 16 32];\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(hh, transform_3rd_dim_name, decLevel3);\n        hadper_trans_single_den{hh}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{hh} = single(Tinv3rd');\n    end\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows that scale the reconstructed blocks\n%%%%\nif beta_wiener==2 & beta==2 & N1_wiener==7 & N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924 ];\n    Wwin2D_wiener = [ 0.1924    0.3151    0.4055    0.4387    0.4055    0.3151    0.1924;\n        0.3151    0.5161    0.6640    0.7184    0.6640    0.5161    0.3151;\n        0.4055    0.6640    0.8544    0.9243    0.8544    0.6640    0.4055;\n        0.4387    0.7184    0.9243    1.0000    0.9243    0.7184    0.4387;\n        0.4055    0.6640    0.8544    0.9243    0.8544    0.6640    0.4055;\n        0.3151    0.5161    0.6640    0.7184    0.6640    0.5161    0.3151;\n        0.1924    0.3151    0.4055    0.4387    0.4055    0.3151    0.1924 ];\nelse\n    Wwin2D           = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\n    Wwin2D_wiener    = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the aggregation of the Wiener filt. part\nend\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Read an image, generate noise and add it to the image\n%%%%\n\nif dump_information == 1\n    fprintf('Input video: %s, sigma: %.1f\\n', Xnoisy, sigma);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Determine unique filenames of intermediate avi files\n%%%%\n\nHT_avi_file = sprintf('%s_cvbm3d_step1_0.avi', Xnoisy(1:end-4));\nDenoised_avi_file = sprintf('%s_cvbm3d_0.avi', Xnoisy(1:end-4));\ni = 1;\nwhile (exist(['./' HT_avi_file], 'file') ~= 0) | (exist(['./' Denoised_avi_file],'file') ~= 0)\n    HT_avi_file = sprintf('%s_cvbm3d_step1_%d.avi', Xnoisy(1:end-4),i);\n    Denoised_avi_file = sprintf('%s_cvbm3d_%d.avi', Xnoisy(1:end-4),i);\n    i = i + 1;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Initial estimate by hard-thresholding filtering\nHT_IO = {which(Xnoisy), HT_avi_file};\n\ntic;\nbm3d_thr_video_c(HT_IO, hadper_trans_single_den, Nstep, N1, N2, 0,...\n    lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, sigma/255, thrToIncStep,...\n    single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(ones(N1)),...\n    'unused arg', dsub*dsub/255 * (sigma^2 / 255), ones(NumberOfFrames,1), Wwin2D,...\n    (Npr-1)/2, stepFS, denoiseFrames, Nb, 0 );\nestimate_elapsed_time = toc;\n\nif dump_information == 1\n%     mo = aviread(HT_avi_file);\n%     y_hat = zeros([size(mo(1).cdata(:,:,1)), 3, NumberOfFrames], 'single');\n%     for cf = 1:NumberOfFrames\n%         y_hat(:,:,:,cf) = single(mo(cf).cdata(:,:,:))/255;\n%     end\n%     clear  mo\n% \n%     PSNR_HT_ESTIMATE = 10*log10(1/mean2((Xorig-y_hat).^2));\n%     fprintf('HT ESTIMATE, PSNR: %.3f dB\\n', PSNR_HT_ESTIMATE);\n%     clear y_hat;\n     fprintf('STEP1 completed!\\n');\nend\n\n%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% %%%% Final estimate by Wiener filtering (using the hard-thresholding\n% initial estimate)\n\nlut_ic = ClipComp16b(sigma/255);\n\nWIE_IO = {which(Xnoisy), HT_avi_file, Denoised_avi_file};\n\ntic;\nbm3d_wiener_video_c(WIE_IO, 'unused', hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    'unused_arg', tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, sigma/255, 'unused arg',...\n    single(TforW), single(TinvW)', inverse_hadper_trans_single_den, 'unused arg', dsub_wiener*dsub_wiener/255*(sigma^2 / 255),...\n    ones(NumberOfFrames,1), Wwin2D_wiener, (Npr_wiener-1)/2, stepFSW, denoiseFramesW, Nb_wiener, 0, lut_ic);\n\nwiener_elapsed_time = toc;\n\nif nargout == 1\n    mo = aviread(Denoised_avi_file);\n    Xdenoised = zeros([size(mo(1).cdata(:,:,1)), 3, NumberOfFrames], 'single');\n    for cf = 1:NumberOfFrames\n        Xdenoised(:,:,:,cf) = single(mo(cf).cdata(:,:,:));\n    end\n    clear  mo\nend\n\nif dump_information == 1\n    if nargout ~= 1\n        mo = aviread(Denoised_avi_file);\n        Xdenoised = zeros([size(mo(1).cdata(:,:,1)), 3, NumberOfFrames], 'single');\n        for cf = 1:NumberOfFrames\n            Xdenoised(:,:,:,cf) = single(mo(cf).cdata(:,:,:));\n        end\n        clear  mo\n    end\n    \n    PSNR_TEXT='';\n    if exist('Xorig', 'var') == 1\n        PSNR = 10*log10(255*255/mean((Xorig(:)-Xdenoised(:)).^2));\n        PSNR_TEXT=sprintf(' PSNR: %.3f dB,', PSNR);\n        New_Denoised_avi_file = sprintf('%s_PSNR%.2f.avi',Denoised_avi_file(1:end-4),PSNR);\n        movefile(Denoised_avi_file, New_Denoised_avi_file);\n        Denoised_avi_file = New_Denoised_avi_file;\n    end\n\n%     PSNRs = zeros(NumberOfFrames,1);\n%     for ii = 1:NumberOfFrames,\n%         PSNRs(ii) = 10*log10(1/mean2( (Xorig(:,:,:,ii)-Xdenoised(:,:,:,ii)).^2));\n%         fprintf('Frame: %d, PSNR: %.2f\\n', ii, PSNRs(ii));\n%     end\n    if nargout == 0\n        clear Xdenoised\n    end\n\n    fprintf('FILTERING COMPLETED (frames/sec: %.2f,%s denoised video saved as %s)\\n', ...\n        NumberOfFrames/(wiener_elapsed_time + estimate_elapsed_time), PSNR_TEXT, Denoised_avi_file);\n    \nend\n\n\nreturn;\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif (N == 7) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward =[ 0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227;\n       0.521120889169602   0.417906505941275   0.231920613924330                   0  -0.231920613924330  -0.417906505941275  -0.521120889169602;\n       0.481588117120063   0.118942442321354  -0.333269317528993  -0.534522483824849  -0.333269317528993   0.118942442321354   0.481588117120063;\n       0.417906505941275  -0.231920613924330  -0.521120889169602                   0   0.521120889169602   0.231920613924330  -0.417906505941275;\n       0.333269317528993  -0.481588117120063  -0.118942442321354   0.534522483824849  -0.118942442321354  -0.481588117120063   0.333269317528993;\n       0.231920613924330  -0.521120889169602   0.417906505941275                   0  -0.417906505941275   0.521120889169602  -0.231920613924330;\n       0.118942442321354  -0.333269317528993   0.481588117120063  -0.534522483824849   0.481588117120063  -0.333269317528993   0.118942442321354];   \nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;\n\n\n"
  },
  {
    "path": "BM3D/IDDBM3D/BM3DDEB_init.m",
    "content": "function [ISNR, y_hat_RI,y_hat_RWI,zRI] = BM3DDEB_init(experiment_number, y, z, v, sigma)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright  2008 Tampere University of Technology. All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Kostadin Dabov, email: kostadin.dabov _at_ tut.fi\n%     Alessandro Foi\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n%  This function implements the image deblurring method proposed in:\n%\n%  [1] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Image \n%   restoration  by sparse 3D transform-domain collaborative filtering,\" \n%   Proc SPIE Electronic Imaging, January 2008.\n%\n%  FUNCTION INTERFACE:\n%\n%  [PSNR, y_hat_RWI] = BM3DDEB(experiment_number, test_image_name)\n%  \n%  INPUT:\n%   1) experiment_number: 1 -> PSF 1, sigma^2 = 2\n%                         2 -> PSF 1, sigma^2 = 8\n%                         3 -> PSF 2, sigma^2 = 0.308\n%                         4 -> PSF 3, sigma^2 = 49\n%                         5 -> PSF 4, sigma^2 = 4\n%                         6 -> PSF 5, sigma^2 = 64\n%         \n%   2) test_image_name:   a valid filename of a grayscale test image\n%\n%  OUTPUT:\n%   1) ISNR:              the output improvement in SNR, dB\n%   2) y_hat_RWI:         the restored image\n%\n%  ! The function can work without any of the input arguments, \n%   in which case, the internal default ones are used !\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%% Fixed regularization parameters (obtained empirically after a rough optimization)\nRegularization_alpha_RI = 4e-4;\nRegularization_alpha_RWI = 5e-3;\n\n%%%% Experiment number (see below for details, e.g. how the blur is generated, etc.)\nif (exist('experiment_number') ~= 1)\n    experiment_number = 3; % 1 -- 6\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Select a single image filename (might contain path)\n%%%%\n% if (exist('test_image_name') ~= 1)\n%     test_image_name = [\n% %        'Lena512.png'\n%         'Cameraman256.png'\n% %        'barbara.png'\n% %        'house.png'\n%     ];\n% end\n\n%%%% Select 2D transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name      = 'dst'; %% 2D transform (of size N1 x N1) used in Step 1 \ntransform_2D_Wiener_name  = 'dct'; %% 2D transform (of size N1_wiener x N1_wiener) used in Step 2 \ntransform_3rd_dimage_name = 'haar'; %% 1D tranform used in the 3-rd dim, the same for both steps\n\n%%%% Step 1 (BM3D with collaborative hard-thresholding) parameters:\nN1                  = 8;   %% N1 x N1 is the block size\nNstep               = 3;   %% sliding step to process every next refernece block\nN2                  = 16;  %% maximum number of similar blocks (maximum size of the 3rd dimensiona of a 3D array)\nNs                  = 39;  %% length of the side of the search neighborhood for full-search block-matching (BM)\ntau_match           = 6000;%% threshold for the block distance (d-distance)\nlambda_thr2D        = 0;   %% threshold for the coarse initial denoising used in the d-distance measure\nlambda_thr3D        = 2.9; %% threshold for the hard-thresholding \nbeta                = 0; %% the beta parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Step 2 (BM3D with collaborative Wiener filtering) parameters:\nN1_wiener           = 8;\nNstep_wiener        = 2;\nN2_wiener           = 16;\nNs_wiener           = 39;\ntau_match_wiener    = 800;\nbeta_wiener         = 0;\n\n%%%%  Specify whether to print results and display images\nprint_to_screen     = 0;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Make parameters compatible with the interface of the mex-functions\n%%%%\n\n[Tfor, Tinv]   = getTransfMatrix(N1, transform_2D_HT_name, 0); %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW] = getTransfMatrix(N1_wiener, transform_2D_Wiener_name, 0); %% get (normalized) forward and inverse transform matrices\n\nif (strcmp(transform_3rd_dimage_name, 'haar') == 1),\n    %%% Fast internal transform is used, no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later applied by\n    %%% vector-matrix multiplications\n    for hpow = 0:ceil(log2(max(N2,N2_wiener))),\n        h = 2^hpow;\n        [Tfor3rd, Tinv3rd] = getTransfMatrix(h, transform_3rd_dimage_name, 0);\n        hadper_trans_single_den{h}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{h} = single(Tinv3rd');\n    end\nend\n\nif beta == 0 & beta_wiener == 0\n    Wwin2D = ones(N1_wiener,N1_wiener);\n    Wwin2D_wiener = ones(N1,N1);\nelse\n    Wwin2D        = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the hard-thresholding part\n    Wwin2D_wiener = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the Wiener filtering part\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% %%%% Read an image and generate a blurred and noisy image\n% %%%%\n% y = im2double(imread(test_image_name));\n% \n% if experiment_number==1\n%     sigma=sqrt(2)/255; \n%     for x1=-7:7; for x2=-7:7; v(x1+8,x2+8)=1/(x1^2+x2^2+1); end, end; v=v./sum(v(:));\n% end\n% if experiment_number==2\n%     sigma=sqrt(8)/255;\n%     s1=0; for a1=-7:7; s1=s1+1; s2=0; for a2=-7:7; s2=s2+1; v(s1,s2)=1/(a1^2+a2^2+1); end, end;  v=v./sum(v(:));\n% end\n% if experiment_number==3\n%     BSNR=40; sigma=-1; % if \"sigma=-1\", then the value of sigma depends on the BSNR\n%     v=ones(9); v=v./sum(v(:));\n% end\n% if experiment_number==4\n%     sigma=7/255;\n%     v=[1 4 6 4 1]'*[1 4 6 4 1]; v=v./sum(v(:));  % PSF\n% end\n% if experiment_number==5\n%     sigma=2/255;\n%     v=fspecial('gaussian', 25, 1.6);\n% end\n% if experiment_number==6\n%     sigma=8/255;\n%     v=fspecial('gaussian', 25, .4);\n% end\n% \n% \n[Xv, Xh]  = size(y);\n[ghy,ghx] = size(v);\nbig_v  = zeros(Xv,Xh); big_v(1:ghy,1:ghx)=v; big_v=circshift(big_v,-round([(ghy-1)/2 (ghx-1)/2])); % pad PSF with zeros to whole image domain, and center it\nV      = fft2(big_v); % frequency response of the PSF\n% y_blur = imfilter(y, v, 'circular'); % performs blurring (by circular convolution)\n% \n% randn('seed',0);  %%% fix seed for the random number generator\n% if sigma == -1;   %% check whether to use BSNR in order to define value of sigma\n%     sigma=sqrt(norm(y_blur(:)-mean(y_blur(:)),2)^2 /(Xh*Xv*10^(BSNR/10))); % compute sigma from the desired BSNR\n% end\n% \n% %%%% Create a blurred and noisy observation\n% z = y_blur + sigma*randn(Xv,Xh);\n\n\ntic;\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 1: Final estimate by Regularized Inversion (RI) followed by \n%%%% BM3D with collaborative hard-thresholding\n%%%%\n\n%%%% Step 1.1. Regularized Inversion\nRI= conj(V)./( (abs(V).^2) + Regularization_alpha_RI * Xv*Xh*sigma^2); % Transfer Matrix for RI    %% Standard Tikhonov Regularization\nzRI=real(ifft2( fft2(z).* RI ));   % Regularized Inverse Estimate (RI OBSERVATION)\n\nstdRI = zeros(N1, N1);\nfor ii = 1:N1,\n    for jj = 1:N1,\n        UnitMatrix = zeros(N1,N1); UnitMatrix(ii,jj)=1;\n        BasisElementPadded = zeros(Xv, Xh); BasisElementPadded(1:N1,1:N1) = Tinv*UnitMatrix*Tinv'; \n        TransfBasisElementPadded = fft2(BasisElementPadded);\n        stdRI(ii,jj) = sqrt( (1/(Xv*Xh)) * sum(sum(abs(TransfBasisElementPadded.*RI).^2)) )*sigma;\n    end,\nend\n\n%%%% Step 1.2. Colored noise suppression by BM3D with collaborative hard-\n%%%% thresholding \n\ny_hat_RI = bm3d_thr_colored_noise(zRI, hadper_trans_single_den, Nstep, N1, N2, lambda_thr2D,...\n    lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, sigma, 0, single(Tfor), single(Tinv)',...\n    inverse_hadper_trans_single_den, single(stdRI'), Wwin2D, 0, 1 );\n\nPSNR_INITIAL_ESTIMATE = 10*log10(1/mean((y(:)-y_hat_RI(:)).^2));\nISNR_INITIAL_ESTIMATE = PSNR_INITIAL_ESTIMATE - 10*log10(1/mean((y(:)-z(:)).^2));\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Step 2: Final estimate by Regularized Wiener Inversion (RWI) followed\n%%%% by BM3D with collaborative Wiener filtering\n%%%%\n\n%%%% Step 2.1. Regularized Wiener Inversion\nWiener_Pilot = abs(fft2(double(y_hat_RI)));   %%% Wiener reference estimate\nRWI  = conj(V).*Wiener_Pilot.^2./(Wiener_Pilot.^2.*(abs(V).^2) + Regularization_alpha_RWI*Xv*Xh*sigma^2);   % Transfer Matrix for RWI (uses standard regularization 'a-la-Tikhonov')\nzRWI = real(ifft2(fft2(z).*RWI));   % RWI OBSERVATION\n\nstdRWI = zeros(N1_wiener, N1_wiener);\nfor ii = 1:N1,\n    for jj = 1:N1,\n        UnitMatrix = zeros(N1,N1); UnitMatrix(ii,jj)=1;\n        BasisElementPadded = zeros(Xv, Xh); BasisElementPadded(1:N1,1:N1) = idct2(UnitMatrix); \n        TransfBasisElementPadded = fft2(BasisElementPadded);\n        stdRWI(ii,jj) = sqrt( (1/(Xv*Xh)) * sum(sum(abs(TransfBasisElementPadded.*RWI).^2)) )*sigma;\n    end,\nend\n\n%%%% Step 2.2. Colored noise suppression by BM3D with collaborative Wiener\n%%%% filtering\ny_hat_RWI = bm3d_wiener_colored_noise(zRWI, y_hat_RI, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n     0, tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, 0, single(stdRWI'), single(TforW), single(TinvW)',...\n     inverse_hadper_trans_single_den, Wwin2D_wiener, 0, 1, single(ones(N1_wiener)) );\n\nelapsed_time = toc;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Calculate the final estimate's PSNR and ISNR, print them, and show the\n%%%% restored image\n%%%%\nPSNR = 10*log10(1/mean((y(:)-y_hat_RWI(:)).^2));\nISNR = PSNR - 10*log10(1/mean((y(:)-z(:)).^2));\n\nif print_to_screen == 1\nfprintf('Image: %s, Exp %d, Time: %.1f sec, PSNR-RI: %.2f dB, PSNR-RWI: %.2f, ISNR-RWI: %.2f dB\\n', ...\n    test_image_name, experiment_number, elapsed_time, PSNR_INITIAL_ESTIMATE, PSNR, ISNR);\n    figure,imshow(z);\n    figure,imshow(double(y_hat_RWI));\nend\n\nreturn;\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;"
  },
  {
    "path": "BM3D/IDDBM3D/Demo_IDDBM3D.m",
    "content": "function  [isnr, y_hat] = Demo_IDDBM3D(experiment_number, test_image_name)\n% ------------------------------------------------------------------------------------------\n%\n%     Demo software for BM3D-frame based image deblurring\n%               Public release ver. 0.8 (beta) (June 03, 2011)\n%\n% ------------------------------------------------------------------------------------------\n%\n%  This function implements the IDDBM3D image deblurring algorithm proposed in:\n%\n%  [1] A.Danielyan, V. Katkovnik, and K. Egiazarian, \"BM3D frames and \n%   variational image deblurring,\" submitted to IEEE TIP, May 2011 \n%\n% ------------------------------------------------------------------------------------------\n%\n% authors:               Aram Danielyan\n%                        Vladimir Katkovnik\n%\n% web page:              http://www.cs.tut.fi/~foi/GCF-BM3D/\n%\n% contact:               firstname.lastname@tut.fi\n%\n% ------------------------------------------------------------------------------------------\n% Copyright (c) 2011 Tampere University of Technology.\n% All rights reserved.\n% This work should be used for nonprofit purposes only.\n% ------------------------------------------------------------------------------------------\n%\n% Disclaimer\n% ----------\n%\n% Any unauthorized use of these routines for industrial or profit-oriented activities is\n% expressively prohibited. By downloading and/or using any of these files, you implicitly\n% agree to all the terms of the TUT limited license (included in the file Legal_Notice.txt).\n% ------------------------------------------------------------------------------------------\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%  FUNCTION INTERFACE:\n%\n%  [psnr, y_hat] = Demo_IDDBM3D(experiment_number, test_image_name)\n%  \n%  INPUT:\n%   1) experiment_number: 1 -> PSF 1, sigma^2 = 2\n%                         2 -> PSF 1, sigma^2 = 8\n%                         3 -> PSF 2, sigma^2 = 0.308\n%                         4 -> PSF 3, sigma^2 = 49\n%                         5 -> PSF 4, sigma^2 = 4\n%                         6 -> PSF 5, sigma^2 = 64\n%                         7-13 -> experiments 7-13 are not described in [1].\n%                         see this file for the blur and noise parameters.\n%   2) test_image_name:   a valid filename of a grayscale test image\n%\n%  OUTPUT:\n%   1) isnr           the output improvement in SNR, dB\n%   2) y_hat:         the restored image\n%\n%  ! The function can work without any of the input arguments, \n%   in which case, the internal default ones are used !\n%   \n%   To run this demo functions within the BM3D package should be accessible to Matlab \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\naddpath('../')\n\nif ~exist('experiment_number','var'), experiment_number=3; end\nif ~exist('test_image_name','var'), test_image_name='Cameraman256.png'; end\n\nfilename=test_image_name;\n\nif 1 % \n    initType = 'bm3ddeb'; %use output of the BM3DDEB to initialize the algorithm\nelse\n\tinitType = 'zeros'; %use zero image to initialize the algorithm\nend\n\nmatchType = 'bm3ddeb'; %build groups using output of the BM3DDEB algorithm\nnumIt = 200;\n\nfprintf('Experiment number: %d\\n', experiment_number);\nfprintf('Image: %s\\n', filename);\n\n%% ------- Generating bservation ---------------------------------------------\ndisp('--- Generating observation ----');\ny=im2double(imread(filename));\n\n[yN,xN]=size(y);\n\nswitch experiment_number\n    case 1\n        sigma=sqrt(2)/255; \n        for x1=-7:7; for x2=-7:7; h(x1+8,x2+8)=1/(x1^2+x2^2+1); end, end; h=h./sum(h(:));\n    case 2\n        sigma=sqrt(8)/255;\n        s1=0; for a1=-7:7; s1=s1+1; s2=0; for a2=-7:7; s2=s2+1; h(s1,s2)=1/(a1^2+a2^2+1); end, end;  h=h./sum(h(:));\n    case 3 \n        BSNR=40;\n        sigma=-1; % if \"sigma=-1\", then the value of sigma depends on the BSNR\n        h=ones(9); h=h./sum(h(:));\n    case 4\n        sigma=7/255;\n        h=[1 4 6 4 1]'*[1 4 6 4 1]; h=h./sum(h(:));  % PSF\n    case 5\n        sigma=2/255;\n        h=fspecial('gaussian', 25, 1.6);\n    case 6\n        sigma=8/255;\n        h=fspecial('gaussian', 25, .4);\n    %extra experiments\n    case 7\n        BSNR=30;\n        sigma=-1;\n        h=ones(9); h=h./sum(h(:));            \n    case 8\n        BSNR=20;\n        sigma=-1;\n        h=ones(9); h=h./sum(h(:));  \n    case 9\n        BSNR=40;\n        sigma=-1;\n        h=fspecial('gaussian', 25, 1.6);    \n    case 10\n        BSNR=20;\n        sigma=-1;\n        h=fspecial('gaussian', 25, 1.6);            \n    case 11\n        BSNR=15;\n        sigma=-1; \n        h=fspecial('gaussian', 25, 1.6);    \n    case 12\n        BSNR=40;\n        sigma=-1; % if \"sigma=-1\", then the value of sigma depends on the BSNR\n        h=ones(19); h=h./sum(h(:));            \n    case 13\n        BSNR=25;\n        sigma=-1; % if \"sigma=-1\", then the value of sigma depends on the BSNR\n        h=ones(19); h=h./sum(h(:));  \nend\n\ny_blur = imfilter(y, h, 'circular'); % performs blurring (by circular convolution)\n\nif sigma == -1;   %% check whether to use BSNR in order to define value of sigma\n    sigma=sqrt(norm(y_blur(:)-mean(y_blur(:)),2)^2 /(yN*xN*10^(BSNR/10)));\n    %     Xv% compute sigma from the desired BSNR\nend\n\n%%%% Create a blurred and noisy observation\nrandn('seed',0);\nz = y_blur + sigma*randn(yN, xN);\n\nbsnr=10*log10(norm(y_blur(:)-mean(y_blur(:)),2)^2 /sigma^2/yN/xN);\npsnr_z =PSNR(y,z,1,0);\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nfprintf('Observation BSNR: %4.2f, PSNR: %4.2f\\n', bsnr, psnr_z);\n\n%% ----- Computing initial estimate ---------------------\ndisp('--- Computing initial estimate  ----');\n\n[dummy, y_hat_RI,y_hat_RWI,zRI] = BM3DDEB_init(experiment_number, y, z, h, sigma);\n\nswitch lower(initType)\n    case 'zeros'\n        y_hat_init=zeros(size(z));\n    case 'zri'\n        y_hat_init=zRI;\n    case 'ri'\n        y_hat_init=y_hat_RI;\n    case 'bm3ddeb'\n        y_hat_init=y_hat_RWI;\n\nend\n\nswitch lower(matchType)\n    case 'z'\n        match_im = z;\n    case 'y'\n        match_im = y;\n    case 'zri'\n        match_im = zRI;\n    case 'ri'\n        match_im = y_hat_RI;\n    case 'bm3ddeb'\n        match_im = y_hat_RWI;   \nend\n\npsnr_init = PSNR(y, y_hat_init,1,0);\n\nfprintf('Initialization method: %s\\n', initType);\nfprintf('Initial estimate ISNR: %4.2f, PSNR: %4.2f\\n', psnr_init-psnr_z, psnr_init);\n\n%% ------- Core algorithm ---------------------\n%------ Description of the parameters of the IDDBM3D function ----------\n%y - true image (use [] if true image is unavaliable)\n%z - observed\n%h - blurring PSF\n%y_hat_init - initial estimate y_0\n%match_im - image used to constuct groups and calculate weights g_r\n%sigma - standard deviation of the noise\n%threshType = 'h'; %use 's' for soft thresholding\n%numIt - number of iterations\n%gamma - regularization parameter see [1]\n%tau - regularization parameter see [1] (thresholding level)\n%xi - regularization parameter see [1], it is always set to 1 in this implementation\n%showFigure - set to True to display figure with current estimate\n%--------------------------------------------------------------------\n\nthreshType = 'h';\nshowFigure = true;\n\nswitch threshType\n    case {'s'}\n        gamma_tau_xi_inits= [\n            0.0004509 0.70 1;%1\n            0.0006803 0.78 1;%2\n            0.0003485 0.65 1;%3\n            0.0005259 0.72 1;%4\n            0.0005327 0.82 1;%5\n            7.632e-05 0.25 1;%6\n            0.0005818 0.81 1;%7\n            0.001149  1.18 1;%8\n            0.0004155 0.74 1;%9\n            0.0005591 0.74 1;%10\n            0.0007989 0.82 1;%11\n            0.0006702 0.75 1;%12\n            0.001931  1.83 1;%13 \n        ];\n    case {'h'}\n        gamma_tau_xi_inits= [ \n            0.00051   3.13 1;%1\n            0.0006004 2.75 1;%2\n            0.0004573 2.91 1;%3\n            0.0005959 2.82 1;%4\n            0.0006018 3.63 1;%5\n            0.0001726 2.24 1;%6\n            0.00062   2.98 1;%7\n            0.001047  3.80 1;%8\n            0.0005125 3.00 1;%9\n            0.0005685 2.80 1;%10\n            0.0005716 2.75 1;%11\n            0.0005938 2.55 1;%12\n            0.001602  4.16 1;%13\n        ];\nend\n\ngamma = gamma_tau_xi_inits(experiment_number,1);\ntau   = gamma_tau_xi_inits(experiment_number,2)/255*2.7;\nxi    = gamma_tau_xi_inits(experiment_number,3);\n\ndisp('-------- Start ----------');\nfprintf('Number of iterations to perform: %d\\n', numIt);\nfprintf('Thresholding type: %s\\n', threshType);\n\ny_hat = IDDBM3D(y, h, z, y_hat_init, match_im, sigma, threshType, numIt, gamma, tau, xi, showFigure);\npsnr = PSNR(y,y_hat,1,0);\nisnr = psnr-psnr_z;\n\ndisp('-------- Results --------');\nfprintf('Final estimate ISNR: %4.2f, PSNR: %4.2f\\n', isnr, psnr);\nreturn;\n\nend\n\nfunction PSNRdb = PSNR(x, y, maxval, borders)\n    if ~exist('borders', 'var'), borders = 0; end\n    if ~exist('maxval', 'var'), maxval = 255; end\n    \n    xx=borders+1:size(x,1)-borders;\n    yy=borders+1:size(x,2)-borders;\n            \n    PSNRdb = zeros(1,size(x,3));\n    for fr=1:size(x,3) \n        err = x(xx,yy,fr) - y(xx,yy,fr);\n        PSNRdb(fr) = 10 * log10((maxval^2)/mean2(err.^2));    \n    end\nend"
  },
  {
    "path": "BM3D/LEGAL_NOTICE.txt",
    "content": "Legal Notice\n\nBy accessing these World Wide Web pages you agree to the following terms. If you do not agree to the following terms, please notice that you are not allowed to use the site.\n\nCopyright, author rights, trademarks and other intellectual property rights\n\nThis website and its contents are protected by copyright, author rights and/or other intellectual property rights which are the property of Tampere University of Technology (\"TUT\"), its researchers and/or third parties. Reproduction, modification, and use of the materials (or any information incorporated thereto such as but not limited to reports, publications, software, pictures, diagrams, video material) published on this website are hereby authorized provided that:\n\n(i) reproduction, use, and modification are for informational and non-commercial or personal use only and will not be copied or posted on any network computer or broadcast in any media; and\n\n(ii) any reproduction or modification retains all original notices including proprietary or copyright notices; and\n\n(iii) reference to the original authors is given whenever results, which arise from the use of the provided material or any modification of it, are made public.\n\nNo other use of the materials and of any information incorporated thereto is hereby authorized.\n\nIn addition, be informed that some names are protected by trademarks which are the property of TUT, its researchers and/or other third parties whether a specific mention in that respect is made or not.\n\nDisclaimers\n\nThe material, which is found on this website, is provided for general information only and should not be relied upon or used as the basis for making any transactions of any kind whatsoever. All the information and any part thereof provided on this website are provided  AS IS  without warranty of any kind either expressed or implied including, without limitation, warranties of merchantability, fitness for a particular purpose or non infringement of intellectual property rights.\nTUT makes no representations or warranties as to the accuracy or completeness of any materials and information incorporated thereto and contained on this website. TUT makes no representations or warranties that access to this website will be uninterrupted or error-free, that this website (the materials and/or any information incorporated thereto) will be secure and free of virus or other harmful components.\n\nThe use of the materials (or any information incorporated thereto), in whole or in part, contained in this website is your sole responsibility. TUT disclaims any liability for any damages whatsoever including without limitation direct, indirect, incidental and/or consequential damages resulting from access to the website and use of the materials provided therein.\n\nThis website may contain links to third party sites. The links are provided to you only as a convenience and the inclusion of any link do not imply either an endorsement by TUT of the linked sites or any warranty from TUT on said sites. Access to said linked sites is at your own risk.\n\nTransmission of user information\n\nAny and all information or request for information you may direct to TUT through this website or through e-mail as may be linked to this website is to be considered as not confidential.\n\nYou may also address your information or request through mail to TUT's registered office for the attention of the department identified in the relevant part of this website.\n\nModifications\n\nTUT reserves the right to revise the site or withdraw access to them at any time. "
  },
  {
    "path": "BM3D/README.txt",
    "content": "-------------------------------------------------------------------\n\n  BM3D demo software for image/video restoration and enhancement  \n                   Public release v2.00 (30 January 2014) \n\n-------------------------------------------------------------------\n\nCopyright (c) 2006-2014 Tampere University of Technology. \nAll rights reserved.\nThis work should be used for nonprofit purposes only.\n\nAuthors:                     Kostadin Dabov\n                             Aram Danieyan\n                             Alessandro Foi\n\n\nBM3D web page:               http://www.cs.tut.fi/~foi/GCF-BM3D\n\n\n-------------------------------------------------------------------\n Contents\n-------------------------------------------------------------------\n\nThe package comprises these functions\n\n*) BM3D.m        : BM3D grayscale-image denoising [1]\n*) CBM3D.m       : CBM3D RGB-image denoising [2]\n*) VBM3D.m       : VBM3D grayscale-video denoising [3]\n*) CVBM3D.m      : CVBM3D RGB-video denoising\n*) BM3DSHARP.m   : BM3D-SHARP grayscale-image sharepening & \n                   denoising [4]\n*) BM3DDEB.m     : BM3D-DEB grayscale-image deblurring [5]\n*) IDDBM3D\\Demo_IDDBM3D : IDDBM3D grayscale-image deblurring [8]\n*) BM3D-SAPCA\\BM3DSAPCA2009 : BM3D-SAPCA grayscale-image denoising [9]\n*) BM3D_CFA.m    : BM3D denoising of Bayer data [10]\n\nFor help on how to use these scripts, you can e.g. use \"help BM3D\"\nor \"help CBM3D\".\n\nEach demo calls MEX-functions that allow to change all possible \nparameters used in the algorithm from within the corresponding \nM-file.\n\n\n-------------------------------------------------------------------\n Installation\n-------------------------------------------------------------------\n\nUnzip both BM3D.zip (contains codes) and BM3D_images.zip (contains \ntest images) in a folder that is in the MATLAB path.\n\n\n-------------------------------------------------------------------\n Requirements\n-------------------------------------------------------------------\n\n*) MS Windows (32 or 64 bit), Linux (32 bit or 64 bit)\n   or Mac OS X (32 or 64 bit)\n*) Matlab v.7.1 or later with installed:\n   -- Image Processing Toolbox (for visualization with \"imshow\")\n*) CVBM3D currently supports only 32-bit and 64-bit Windows.\n*) IDDBM3D currently supports only 32-bit and 64-bit Windows and\n   requires Microsoft Visual C++ 2008 SP1 Redistributable Package\n   to be installed. It can be downloaded from:\n    (x86) http://www.microsoft.com/downloads/en/details.aspx?FamilyID=A5C84275-3B97-4AB7-A40D-3802B2AF5FC2\n    (x64) http://www.microsoft.com/downloads/en/details.aspx?FamilyID=BA9257CA-337F-4B40-8C14-157CFDFFEE4E\n\n\n-------------------------------------------------------------------\n Change log\n-------------------------------------------------------------------\nv2.00   (30 January 2014)\n + Added BM3D_CFA denoising algorithm for Bayer data [10].\n ! Various fixes in BM3DDEB main script: now works correctly with \n   asymmetric PSFs; corrected several typos which caused first or\n   second collaborative filtering stages to fail whenever the block\n   sizes and 2-D transforms differed from the default ones.\n\nv1.9    (26 August 2011)\n + Added BM3D-SAPCA denoising algorithm [9].\n\nv1.8    (4 July 2011)\n + Added IDDBM3D deblurring algorithm [8].\n ! Improved float precision of BM3D, CBM3D, and BM3DDEB mex-files.\n \nv1.7.6  (4 February 2011) \n + Added support for Matlab running on Mac OSX 32-bit\n . Changed the strong-noise parameters (\"vn\" profile) in CBM3D.m,\n   as proposed in [6].\n\nv1.7.5  (7 July 2010)\n . Changed the strong-noise parameters (\"vn\" profile) in BM3D.m,\n   as proposed in [6].\n\nv1.7.4  (3 May 2010)\n + Added support for Matlab running on Mac OSX 64-bit\n\nv1.7.3  (15 March 2010)\n ! Fixed a problem with writing to AVI files in CVBM3D\n ! Fixed a problem with VBM3D when the input is a 3-D matrix\n\nv1.7.2  (8 Dec 2009)\n ! Fixed the output of CVBM3D to be in range [0,255] instead of \n   in range [0,1]\n\nv1.7.1  (2 Dec 2009)\n ! Fixed a bug in VBM3D.m introduced in v1.7 that concerns the\n   declipping\n\nv1.7  (12 Nov 2009)\n + Added CVBM3D.m script that performs denoising on RGB-videos with\n   AWGN\n ! Fixed VBM3D.m to use declipping in the case when noisy AVI file\n   is provided\n\nv1.6  (17 June 2009)\n ! Made few fixes to the \"getTransfMatrix\" internal function.\n   If used with default parameters, BM3D no longer requires\n   neither Wavelet, PDE, nor Signal Processing toolbox.\n + Added support for x86_64 Linux\n\nv1.5.1  (20 Nov 2008)\n ! Fixed bugs for older versions of Matlab\n + Added support for 32-bit Linux\n + improved the structure of the VBM3D.m script\n\nv1.5  (18 Oct 2008)\n + Added x86_64 version of the MEX-files that run on 64-bit Matlab \n   under Windows\n + Added a missing function in BM3DDEB.m\n + Improves some of the comments in the codes\n ! Fixed a bug in VBM3D when only a input noisy video is provided\n\nv1.4.1  (26 Feb 2008)\n ! Fixed a bug in the grayscale-image deblurring codes and made\n   these codes compatible with Matlab 7 or newer versions.\n\nv1.4  (1 Feb 2008)\n + Added grayscale-image deblurring\n\nv1.3  (12 Oct 2007)\n + Added grayscale-image joint sharpening and denoising\n\nv1.2.1  (4 Sept 2007)\n ! Fixed the output of the VBM3D to be the final Wiener estimate \n   rather than the intermediate basic estimate\n ! Fixed a problem when the original video is provided as a 3D\n   matrix\n\nv1.2  (11 June 2007) \n + Added grayscale-video denoising files\n\nv1.1.3  (4 May 2007)\n + Added support for Linux x86-compatible platforms\n\nv1.1.2 \n ! Fixed bugs related with Matlab v.6.1\n\nv1.1.1  (8 March 2007)\n ! Fixed bugs related with Matlab v.6 (e.g., \"isfloat\" was not \n   available and \"imshow\" did not work with single precision)\n + Improved the usage examples shown by executing \"help BM3D\"\n   or \"help CBM3D\" MATLAB commands\n\nv1.1  (6 March 2007)\n ! Fixed a bug in comparisons of the image sizes, which was\n   causing problems when executing \"CBM3D(1,z,sigma);\"\n ! Fixed a bug that was causing a crash when the input images are\n   of type \"uint8\"\n ! Fixed a problem that has caused some versions of imshow to \n   report an error\n ! Fixed few typos in the comments of the functions\n . Made the parameters of the BM3D and the C-BM3D the same\n\nv1.0  (9 December 2006)\n + Initial version, based on BM3D-DFT [7] package (November 2005)\n\n\n-------------------------------------------------------------------\n References\n-------------------------------------------------------------------\n\n[1] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Image \ndenoising by sparse 3D transform-domain collaborative filtering,\" \nIEEE Trans. Image Process., vol. 16, no. 8, August 2007.\n\n[2] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Color \nimage denoising via sparse 3D collaborative filtering with \ngrouping constraint in luminance-chrominance space,\" Proc. IEEE\nInt. Conf. Image Process., ICIP 2007, San Antonio (TX), USA, \nSeptember 2007.\n\n[3] K. Dabov, A. Foi, and K. Egiazarian, \"Video denoising by \nsparse 3D transform-domain collaborative filtering,\" Proc.\nEuropean Signal Process. Conf., EUSIPCO 2007, Poznan, Poland,\nSeptember 2007.\n\n[4] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Joint \nimage sharpening and denoising by 3D transform-domain \ncollaborative filtering,\" Proc. 2007 Int. TICSP Workshop Spectral \nMeth. Multirate Signal Process., SMMSP 2007, Moscow, Russia, \nSeptember 2007.\n\n[5] K. Dabov, A. Foi, and K. Egiazarian, \"Image restoration by \nsparse 3D transform-domain collaborative filtering,\" Proc. SPIE\nElectronic Imaging '08, vol. 6812, no. 6812-1D, San Jose (CA),\nUSA, January 2008.\n\n[6] Y. Hou, C. Zhao, D. Yang, and Y. Cheng, 'Comment on \"Image \nDenoising by Sparse 3D Transform-Domain Collaborative Filtering\"'\naccepted for publication, IEEE Trans. Image Process., July, 2010.\n\n[7] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"Image\ndenoising with block-matching and 3D filtering,\" Proc. SPIE\nElectronic Imaging '06, vol. 6064, no. 6064A-30, San Jose (CA),\nUSA, January 2006.\n\n[8] A.Danielyan, V. Katkovnik, and K. Egiazarian, \"BM3D frames and \nvariational image deblurring,\" accepted for publication in IEEE\nTrans. Image Process.\nPreprint online at http://www.cs.tut.fi/~foi/GCF-BM3D\n\n[9] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, \"BM3D Image\nDenoising with Shape-Adaptive Principal Component Analysis\", Proc.\nWorkshop on Signal Processing with Adaptive Sparse Structured\nRepresentations (SPARS'09), Saint-Malo, France, April 2009.\n\n[10] A. Danielyan, M. Vehvilinen, A. Foi, V. Katkovnik, and\nK. Egiazarian, \"Cross-color BM3D filtering of noisy raw data\", \nProc. Int. Workshop on Local and Non-Local Approx. in Image Process.,\nLNLA 2009, Tuusula, Finland, pp. 125-129, August 2009.\n\n \n-------------------------------------------------------------------\n Disclaimer\n-------------------------------------------------------------------\n\nAny unauthorized use of these routines for industrial or profit-\noriented activities is expressively prohibited. By downloading \nand/or using any of these files, you implicitly agree to all the \nterms of the TUT limited license:\nhttp://www.cs.tut.fi/~foi/GCF-BM3D/legal_notice.html\n\n\n-------------------------------------------------------------------\n Feedback\n-------------------------------------------------------------------\n\nIf you have any comment, suggestion, or question, please do\ncontact    Alessandro Foi   at  firstname.lastname@tut.fi\n\n"
  },
  {
    "path": "BM3D/VBM3D.m",
    "content": "function [PSNR_FINAL_ESTIMATE, y_hat_wi] = VBM3D(Xnoisy, sigma, NumberOfFrames, dump_information, Xorig, bm3dProfile)\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n%  VBM3D is a Matlab function for attenuation of additive white Gaussian \n%  noise from grayscale videos. This algorithm reproduces the results from the article:\n%\n%  [1] K. Dabov, A. Foi, and K. Egiazarian, \"Video denoising by sparse 3D\n%  transform-domain collaborative filtering,\" European Signal Processing\n%  Conference (EUSIPCO-2007), September 2007. (accepted)\n%\n%  INTERFACE:\n%\n%  [PSNR, Xest] = VBM3D(Xnoisy, Sigma, NFrames, PrintInfo, Xorig)\n%\n%  INPUTS:\n%   1)  Xnoisy     --> A filename of a noisy .avi video, e.g. Xnoisy = 'gstennisg20.avi'\n%        OR\n%       Xnoisy     --> A 3D matrix of a noisy video in a  (floating point data in range [0,1],\n%                                                     or in [0,255])\n%   2)  Sigma --> Noise standard deviation (assumed range is [0,255], no matter what is\n%                                           the input's range)\n%\n%   3)  NFrames   (optional paremter!) --> Number of frames to process. If set to 0 or \n%                                          ommited, then process all frames (default: 0).\n%\n%   4)  PrintInfo (optional paremter!) --> If non-zero, then print to screen and save \n%                                          the denoised video in .AVI\n%                                          format. (default: 1)\n%\n%   5)  Xorig     (optional paremter!) --> Original video's filename or 3D matrix \n%                                          If provided, PSNR, ISNR will be computed.\n%\n%   NOTE: If Xorig == Xnoisy, then artificial noise is added internally and the\n%   obtained noisy video is denoised.\n%\n%  OUTPUTS:\n%  \n%   1) PSNR --> If Xorig is valid video, then this contains the PSNR of the\n%                denoised one\n%\n%   1) Xest --> Final video estimate in a 3D matrix (intensities in range [0,1])\n%\n%   *) If \"PrintInfo\" is non-zero, then save the denoised video in the current \n%       MATLAB folder.\n%\n%  USAGE EXAMPLES:\n%\n%     1) Denoise a noisy (clipped in [0,255] range) video sequence, e.g. \n%        'gsalesmang20.avi' corrupted with AWGN with std. dev. 20:\n%          \n%          Xest = VBM3D('gsalesmang20.avi', 20, 0, 1); \n%     \n%     2) The same, but also print PSNR, ISNR numbers.\n%        \n%          Xest = VBM3D('gsalesmang20.avi', 20, 0, 1, 'gsalesman.avi');\n%\n%     3) Add artificial noise to a video, then denoise it (without \n%        considering clipping in [0,255]):\n%        \n%          Xest = VBM3D('gsalesman.avi', 20, 0, 1, 'gsalesman.avi');\n%  \n%\n%  RESTRICTIONS:\n%\n%     Since the video sequences are read into memory as 3D matrices,\n%     there apply restrictions on the input video size, which are thus\n%     proportional to the maximum memory allocatable by Matlab.\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%\n% Copyright  2007 Tampere University of Technology. All rights reserved.\n% This work should only be used for nonprofit purposes.\n%\n% AUTHORS:\n%     Kostadin Dabov, email: dabov _at_ cs.tut.fi\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n% If no input argument is provided, then use these internal ones:\nif exist('sigma', 'var') ~= 1,\n    Xnoisy = 'gsalesmang20.avi'; Xorig = 'gsalesman.avi'; sigma = 20;\n    %Xnoisy = 'gstennisg20.avi';  Xorig = 'gstennis.avi';  sigma = 20;\n    %Xnoisy = 'gflowersg20.avi';   Xorig = 'gflower.avi';   sigma = 20;\n    \n    %Xnoisy = 'gsalesman.avi'; Xorig = Xnoisy; sigma = 20;\n    \n    NumberOfFrames = 0; %% 0 means process ALL frames.\nend\n\n\n\nif exist('dump_information', 'var') ~= 1,\n    dump_information = 1; % 1 -> print informaion to the screen and save the processed video as an AVI file\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Obtain infromation about the input noisy video\n%%%%\nif (ischar(Xnoisy) == 1), % if the input is a video filename\n    isCharacterName = 1;\n    Xnoisy_name = Xnoisy;\n    videoInfo = aviinfo(Xnoisy);\n    videoHeight = videoInfo.Height;\n    videoWidth = videoInfo.Width;\n    TotalFrames = videoInfo.NumFrames;\nelseif length(size(Xnoisy)) == 3% the input argument is a 3D video (spatio-temporal) matrix\n    Xnoisy_name = 'Input 3D matrix';\n    isCharacterName = 0;\n    [videoHeight, videoWidth, TotalFrames] = size(Xnoisy);\nelse\n    fprintf('Oops! The input argument Xnoisy should be either a filename or a 3D matrix!\\n');\n    PSNR_FINAL_ESTIMATE = 0;\n    y_hat_wi = 0;\n    return;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Check if we want to process all frames, and save as 'NumberOfFrames' \n%%%% the desired number of frames to process\n%%%%\nif exist('NumberOfFrames', 'var') == 1,\n    if NumberOfFrames <= 0,\n        NumberOfFrames = TotalFrames;\n    else\n        NumberOfFrames = max(min(NumberOfFrames, TotalFrames), 1);\n    end    \nelse\n    NumberOfFrames = TotalFrames;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%  Quality/complexity trade-off\n%%%%\n%%%%  'np' --> Normal Profile (balanced quality)\n%%%%  'lc' --> Low Complexity Profile (fast, lower quality)\n%%%%\nif (exist('bm3dProfile', 'var') ~= 1)\n    bm3dProfile         = 'np';\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Parameters for the Normal Profile.\n%%%%\n%%%% Select transforms ('dct', 'dst', 'hadamard', or anything that is listed by 'help wfilters'):\ntransform_2D_HT_name     = 'bior1.5'; %% transform used for the HT filt. of size N1 x N1\ntransform_2D_Wiener_name = 'dct';     %% transform used for the Wiener filt. of size N1_wiener x N1_wiener\ntransform_3rd_dim_name   = 'haar'; %% tranform used in the 3-rd dim, the same for HT and Wiener filt.\n\n%%%% Step 1: Hard-thresholding (HT) parameters:\ndenoiseFrames       = min(9, NumberOfFrames); % number of frames in the temporalwindow (should not exceed the total number of frames 'NumberOfFrames')\nN1                  = 8;  %% N1 x N1 is the block size used for the hard-thresholding (HT) filtering\nNstep               = 6;  %% sliding step to process every next refernece block\nN2                  = 8;  %% maximum number of similar blocks (maximum size of the 3rd dimension of the 3D groups)\nNs                  = 7;  %% length of the side of the search neighborhood for full-search block-matching (BM)\nNpr                 = 5;  %% length of the side of the motion-adaptive search neighborhood, use din the predictive-search BM\ntau_match           = 3000; %% threshold for the block distance (d-distance)\nlambda_thr3D        = 2.7; %% threshold parameter for the hard-thresholding in 3D DFT domain\ndsub                = 7;  %% a small value subtracted from the distnce of blocks with the same spatial coordinate as the reference one \nNb                  = 2;  %% number of blocks to follow in each next frame, used in the predictive-search BM\nbeta                = 2.0; %% the beta parameter of the 2D Kaiser window used in the reconstruction\n\n%%%% Step 2: Wiener filtering parameters:\ndenoiseFramesW      = min(9, NumberOfFrames);\nN1_wiener           = 7;\nNstep_wiener        = 4;\nN2_wiener           = 8;\nNs_wiener           = 7;\nNpr_wiener          = 5;\ntau_match_wiener    = 1500;\nbeta_wiener         = 2.0;\ndsub_wiener         = 3;\nNb_wiener           = 2;\n\n%%%% Block-matching parameters:\nstepFS              = 1; %% step that forces to switch to full-search BM, \"1\" implies always full-search\nsmallLN             = 3; %% if stepFS > 1, then this specifies the size of the small local search neighb.\nstepFSW             = 1;\nsmallLNW            = 3;\nthrToIncStep        = 8;  %% used in the HT filtering to increase the sliding step in uniform regions\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Parameters for the Low Complexity Profile.\n%%%%\nif strcmp(bm3dProfile, 'lc') == 1,\n    lambda_thr3D = 2.8;\n    smallLN   = 2;\n    smallLNW  = 2;\n    denoiseFrames  = min(5, NumberOfFrames);\n    denoiseFramesW = min(5, NumberOfFrames);\n    N2_wiener = 4;\n    N2 = 4;\n    Ns = 3;\n    Ns_wiener = 3;\n    NB = 1;\n    Nb_wiener = 1;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Parameters for the High Profile.\n%%%%\nif strcmp(bm3dProfile, 'hi') == 1,\n    Nstep        = 3;\n    Nstep_wiener = 3;\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Parameters for the \"Very Noisy\" Profile.\n%%%%\nif sigma > 30,\n    N1 = 8;\n    N1_wiener = 8;\n    Nstep = 6;\n    tau_match    = 4500;\n    tau_match_wiener    = 3000;\nend\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Note: touch below this point only if you know what you are doing!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Extract the input noisy video and make sure intensities are in [0,1]\n%%%% interval, using single-precision float\nif isCharacterName,\n    mno = aviread(Xnoisy_name);\n    z = zeros([videoHeight, videoWidth, NumberOfFrames], 'single');\n    for cf = 1:NumberOfFrames\n        z(:,:,cf) = single(mno(cf).cdata(:,:,1)) * 0.0039216; % 1/255 = 0.0039216\n    end\n    clear  mno\nelse\n    if isinteger(Xnoisy) == 1,\n        z = single(Xnoisy) * 0.0039216; % 1/255 = 0.0039216\n    elseif isfloat(Xnoisy) == 0,\n        fprintf('Unknown format of \"Xnoisy\"! Must be a filename (array of char) or a 3D array of either floating point data (range [0,1]) or integer data (range [0,255]). \\n');\n        return;\n    else        \n        z = single(Xnoisy);\n    end\nend\n\nclear Xnoisy;\n\n%%%% If the original video is provided, then extract it to 'Xorig' \n%%%% which is later used to compute PSNR and ISNR\nif exist('Xorig', 'var') == 1,\n    randn('seed', 0);\n    if ischar(Xorig) == 0,\n        if isinteger(Xorig) == 1,\n            y = single(Xorig) * 0.0039216; % 1/255 = 0.0039216\n        elseif isfloat(Xorig) == 0,\n            fprintf('Unknown format of \"Xorig\"! Must be a filename (array of char) or a 3D array of either floating point data (range [0,1]) or integer data (range [0,255]). \\n');\n            return;            \n        else\n            y = single(Xorig);\n        end\n    else        \n        if strcmp(Xorig, Xnoisy_name) == 1, %% special case, noise is aritifically added\n            y = z;\n            z = z + (sigma/255) * randn(size(z));\n        else\n            mo = aviread(Xorig);\n            y = zeros([videoHeight, videoWidth, NumberOfFrames], 'single');\n            for cf = 1:NumberOfFrames\n                y(:,:,cf) = single(mo(cf).cdata(:,:,1)) * 0.0039216; % 1/255 = 0.0039216\n            end\n            clear mo\n        end\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Create transform matrices, etc.\n%%%%\ndecLevel           = 0;    %% dec. levels of the dyadic wavelet 2D transform for blocks (0 means full decomposition, higher values decrease the dec. number)\ndecLevel3          = 0;    %% dec. level for the wavelet transform in the 3rd dimension\n\n[Tfor, Tinv]       = getTransfMatrix(N1, transform_2D_HT_name, decLevel); %% get (normalized) forward and inverse transform matrices\n[TforW, TinvW]     = getTransfMatrix(N1_wiener, transform_2D_Wiener_name); %% get (normalized) forward and inverse transform matrices\nthr_mask           = ones(N1); %% N1xN1 mask of threshold scaling coeff. --- by default there is no scaling, however the use of different thresholds for different wavelet decompoistion subbands can be done with this matrix\n\nif (strcmp(transform_3rd_dim_name, 'haar') == 1 || strcmp(transform_3rd_dim_name(end-2:end), '1.1') == 1),\n    %%% Fast internal transform is used, no need to generate transform\n    %%% matrices.\n    hadper_trans_single_den         = {};\n    inverse_hadper_trans_single_den = {};\nelse\n    %%% Create transform matrices. The transforms are later computed by\n    %%% matrix multiplication with them\n    for hh = [1 2 4 8 16 32];\n        [Tfor3rd, Tinv3rd]   = getTransfMatrix(hh, transform_3rd_dim_name, decLevel3);\n        hadper_trans_single_den{hh}         = single(Tfor3rd);\n        inverse_hadper_trans_single_den{hh} = single(Tinv3rd');\n    end\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% 2D Kaiser windows that scale the reconstructed blocks\n%%%%\nif beta_wiener==2 & beta==2 & N1_wiener==7 & N1==8 % hardcode the window function so that the signal processing toolbox is not needed by default\n    Wwin2D = [ 0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.4325    0.6717    0.8644    0.9718    0.9718    0.8644    0.6717    0.4325;\n        0.3846    0.5974    0.7688    0.8644    0.8644    0.7688    0.5974    0.3846;\n        0.2989    0.4642    0.5974    0.6717    0.6717    0.5974    0.4642    0.2989;\n        0.1924    0.2989    0.3846    0.4325    0.4325    0.3846    0.2989    0.1924 ];\n    Wwin2D_wiener = [ 0.1924    0.3151    0.4055    0.4387    0.4055    0.3151    0.1924;\n        0.3151    0.5161    0.6640    0.7184    0.6640    0.5161    0.3151;\n        0.4055    0.6640    0.8544    0.9243    0.8544    0.6640    0.4055;\n        0.4387    0.7184    0.9243    1.0000    0.9243    0.7184    0.4387;\n        0.4055    0.6640    0.8544    0.9243    0.8544    0.6640    0.4055;\n        0.3151    0.5161    0.6640    0.7184    0.6640    0.5161    0.3151;\n        0.1924    0.3151    0.4055    0.4387    0.4055    0.3151    0.1924 ];\nelse\n    Wwin2D           = kaiser(N1, beta) * kaiser(N1, beta)'; % Kaiser window used in the aggregation of the HT part\n    Wwin2D_wiener    = kaiser(N1_wiener, beta_wiener) * kaiser(N1_wiener, beta_wiener)'; % Kaiser window used in the aggregation of the Wiener filt. part\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Read an image, generate noise and add it to the image\n%%%%\n\nl2normLumChrom = ones(NumberOfFrames,1); %%% NumberOfFrames == nSl !\n\nif dump_information == 1,\n    fprintf('Video: %s (%dx%dx%d), sigma: %.1f\\n', Xnoisy_name, videoHeight, videoWidth, NumberOfFrames, sigma);\nend\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%% Initial estimate by hard-thresholding filtering\ntic;\ny_hat = bm3d_thr_video(z, hadper_trans_single_den, Nstep, N1, N2, 0,...\n    lambda_thr3D, tau_match*N1*N1/(255*255), (Ns-1)/2, sigma/255, thrToIncStep, single(Tfor), single(Tinv)', inverse_hadper_trans_single_den, single(thr_mask), 'unused arg', dsub*dsub/255, l2normLumChrom, Wwin2D, (Npr-1)/2, stepFS, denoiseFrames, Nb );\nestimate_elapsed_time = toc;\n\nif exist('Xorig', 'var') == 1,\n    PSNR_INITIAL_ESTIMATE = 10*log10(1/mean((double(y(:))-double(y_hat(:))).^2));\n    PSNR_NOISE = 10*log10(1/mean((double(y(:))-double(z(:))).^2));\n    ISNR_INITIAL_ESTIMATE = PSNR_INITIAL_ESTIMATE - PSNR_NOISE;\n\n    if dump_information == 1,    \n        fprintf('BASIC ESTIMATE (time: %.1f sec), PSNR: %.3f dB, ISNR: %.3f dB\\n', ...\n            estimate_elapsed_time, PSNR_INITIAL_ESTIMATE, ISNR_INITIAL_ESTIMATE);\n    end\nend\n      \n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% %%%% Final estimate by Wiener filtering (using the hard-thresholding\n% initial estimate)\ntic;\ny_hat_wi = bm3d_wiener_video(z, y_hat, hadper_trans_single_den, Nstep_wiener, N1_wiener, N2_wiener, ...\n    'unused_arg', tau_match_wiener*N1_wiener*N1_wiener/(255*255), (Ns_wiener-1)/2, sigma/255, 'unused arg', single(TforW), single(TinvW)', inverse_hadper_trans_single_den, 'unused arg', dsub_wiener*dsub_wiener/255, l2normLumChrom, Wwin2D_wiener, (Npr_wiener-1)/2, stepFSW, denoiseFramesW, Nb_wiener );\n\n% In case the input noisy video is clipped in [0,1], then apply declipping  \nif isCharacterName\n    if exist('Xorig', 'var') == 1\n        if ~strcmp(Xorig, Xnoisy_name)\n            [y_hat_wi] = ClipComp16b(sigma/255, y_hat_wi);\n        end\n    else\n        [y_hat_wi] = ClipComp16b(sigma/255, y_hat_wi);\n    end\nend\n\nwiener_elapsed_time = toc;\n\n\n\nPSNR_FINAL_ESTIMATE = 0;\nif exist('Xorig', 'var') == 1,\n    PSNR_FINAL_ESTIMATE = 10*log10(1/mean((double(y(:))-double(y_hat_wi(:))).^2)); \n    ISNR_FINAL_ESTIMATE = PSNR_FINAL_ESTIMATE - 10*log10(1/mean((double(y(:))-double(z(:))).^2));\nend\n\nif dump_information == 1,\n\n    text_psnr = '';\n    if exist('Xorig', 'var') == 1\n\n        \n        %%%% Un-comment the following to print the PSNR of each frame\n        %\n        %     PSNRs = zeros(NumberOfFrames,1);\n        %     for ii = [1:NumberOfFrames],\n        %         PSNRs(ii) = 10*log10(1/mean2((y(:,:,ii)-y_hat_wi(:,:,ii)).^2));\n        %         fprintf(['Frame: ' sprintf('%d',ii) ', PSNR: ' sprintf('%.2f',PSNRs(ii)) '\\n']);\n        %     end\n        %\n\n        fprintf('FINAL ESTIMATE, PSNR: %.3f dB, ISNR: %.3f dB\\n', ...\n             PSNR_FINAL_ESTIMATE, ISNR_FINAL_ESTIMATE);\n\n        figure, imshow(double(z(:,:,ceil(NumberOfFrames/2)))); % show the central frame\n        title(sprintf('Noisy frame #%d',ceil(NumberOfFrames/2)));           \n        \n        figure, imshow(double(y_hat_wi(:,:,ceil(NumberOfFrames/2)))); % show the central frame\n        title(sprintf('Denoised frame #%d',ceil(NumberOfFrames/2)));\n        \n        text_psnr = sprintf('_PSNR%.2f', PSNR_FINAL_ESTIMATE);\n    end\n    \n    fprintf('The denoising took: %.1f sec (%.4f sec/frame). ', ...\n        wiener_elapsed_time+estimate_elapsed_time, (wiener_elapsed_time+estimate_elapsed_time)/NumberOfFrames);\n\n    \n    text_vid = 'Denoised';\n    FRATE = 30; % default value\n    if isCharacterName,\n        text_vid = Xnoisy_name(1:end-4);\n        ainfo = aviinfo(Xnoisy_name);\n        FRATE = ainfo.FramesPerSecond;\n    end\n\n    avi_filename = sprintf('%s%s_%s_BM3D.avi', text_vid, text_psnr, bm3dProfile);\n    \n    if exist(avi_filename, 'file') ~= 0,\n        delete(avi_filename);\n    end\n    mov = avifile(avi_filename, 'Colormap', gray(256), 'compression', 'None', 'fps', FRATE);\n    for ii = [1:NumberOfFrames],\n        mov = addframe(mov, uint8(round(255*double(y_hat_wi(:,:,ii)))));\n    end\n    mov = close(mov);\n    fprintf('The denoised video written to: %s.\\n\\n', avi_filename);\n    \nend\n\nreturn;\n\n\n\n\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n% Some auxiliary functions \n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n\n\nfunction [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n% Create forward and inverse transform matrices, which allow for perfect\n% reconstruction. The forward transform matrix is normalized so that the \n% l2-norm of each basis element is 1.\n%\n% [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)\n%\n%  INPUTS:\n%\n%   N               --> Size of the transform (for wavelets, must be 2^K)\n%\n%   transform_type  --> 'dct', 'dst', 'hadamard', or anything that is \n%                       listed by 'help wfilters' (bi-orthogonal wavelets)\n%                       'DCrand' -- an orthonormal transform with a DC and all\n%                       the other basis elements of random nature\n%\n%   dec_levels      --> If a wavelet transform is generated, this is the\n%                       desired decomposition level. Must be in the\n%                       range [0, log2(N)-1], where \"0\" implies\n%                       full decomposition.\n%\n%  OUTPUTS:\n%\n%   Tforward        --> (N x N) Forward transform matrix\n%\n%   Tinverse        --> (N x N) Inverse transform matrix\n%\n\nif exist('dec_levels', 'var') ~= 1,\n    dec_levels = 0;\nend\n\nif N == 1,\n    Tforward = 1;\nelseif strcmp(transform_type, 'hadamard') == 1,\n    Tforward    = hadamard(N);\nelseif (N == 8) & strcmp(transform_type, 'bior1.5')==1 % hardcoded transform so that the wavelet toolbox is not needed to generate it\n    Tforward =  [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.219417649252501   0.449283757993216   0.449283757993216   0.219417649252501  -0.219417649252501  -0.449283757993216  -0.449283757993216  -0.219417649252501;\n       0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846  -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284;\n      -0.083506045090284   0.083506045090284  -0.083506045090284   0.083506045090284   0.569359398342846   0.402347308162278  -0.402347308162278  -0.569359398342846;\n       0.707106781186547  -0.707106781186547                   0                   0                   0                   0                   0                   0;\n                       0                   0   0.707106781186547  -0.707106781186547                   0                   0                   0                   0;\n                       0                   0                   0                   0   0.707106781186547  -0.707106781186547                   0                   0;\n                       0                   0                   0                   0                   0                   0   0.707106781186547  -0.707106781186547];   \nelseif (N == 8) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward = [ 0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274   0.353553390593274;\n       0.490392640201615   0.415734806151273   0.277785116509801   0.097545161008064  -0.097545161008064  -0.277785116509801  -0.415734806151273  -0.490392640201615;\n       0.461939766255643   0.191341716182545  -0.191341716182545  -0.461939766255643  -0.461939766255643  -0.191341716182545   0.191341716182545   0.461939766255643;\n       0.415734806151273  -0.097545161008064  -0.490392640201615  -0.277785116509801   0.277785116509801   0.490392640201615   0.097545161008064  -0.415734806151273;\n       0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274   0.353553390593274  -0.353553390593274  -0.353553390593274   0.353553390593274;\n       0.277785116509801  -0.490392640201615   0.097545161008064   0.415734806151273  -0.415734806151273  -0.097545161008064   0.490392640201615  -0.277785116509801;\n       0.191341716182545  -0.461939766255643   0.461939766255643  -0.191341716182545  -0.191341716182545   0.461939766255643  -0.461939766255643   0.191341716182545;\n       0.097545161008064  -0.277785116509801   0.415734806151273  -0.490392640201615   0.490392640201615  -0.415734806151273   0.277785116509801  -0.097545161008064];\nelseif (N == 8) & strcmp(transform_type, 'dst')==1 % hardcoded transform so that the PDE toolbox is not needed to generate it\n    Tforward = [ 0.161229841765317   0.303012985114696   0.408248290463863   0.464242826880013   0.464242826880013   0.408248290463863   0.303012985114696   0.161229841765317;\n       0.303012985114696   0.464242826880013   0.408248290463863   0.161229841765317  -0.161229841765317  -0.408248290463863  -0.464242826880013  -0.303012985114696;\n       0.408248290463863   0.408248290463863                   0  -0.408248290463863  -0.408248290463863                   0   0.408248290463863   0.408248290463863;\n       0.464242826880013   0.161229841765317  -0.408248290463863  -0.303012985114696   0.303012985114696   0.408248290463863  -0.161229841765317  -0.464242826880013;\n       0.464242826880013  -0.161229841765317  -0.408248290463863   0.303012985114696   0.303012985114696  -0.408248290463863  -0.161229841765317   0.464242826880013;\n       0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863                   0   0.408248290463863  -0.408248290463863;\n       0.303012985114696  -0.464242826880013   0.408248290463863  -0.161229841765317  -0.161229841765317   0.408248290463863  -0.464242826880013   0.303012985114696;\n       0.161229841765317  -0.303012985114696   0.408248290463863  -0.464242826880013   0.464242826880013  -0.408248290463863   0.303012985114696  -0.161229841765317];\nelseif (N == 7) & strcmp(transform_type, 'dct')==1 % hardcoded transform so that the signal processing toolbox is not needed to generate it\n    Tforward =[ 0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227   0.377964473009227;\n       0.521120889169602   0.417906505941275   0.231920613924330                   0  -0.231920613924330  -0.417906505941275  -0.521120889169602;\n       0.481588117120063   0.118942442321354  -0.333269317528993  -0.534522483824849  -0.333269317528993   0.118942442321354   0.481588117120063;\n       0.417906505941275  -0.231920613924330  -0.521120889169602                   0   0.521120889169602   0.231920613924330  -0.417906505941275;\n       0.333269317528993  -0.481588117120063  -0.118942442321354   0.534522483824849  -0.118942442321354  -0.481588117120063   0.333269317528993;\n       0.231920613924330  -0.521120889169602   0.417906505941275                   0  -0.417906505941275   0.521120889169602  -0.231920613924330;\n       0.118942442321354  -0.333269317528993   0.481588117120063  -0.534522483824849   0.481588117120063  -0.333269317528993   0.118942442321354];   \nelseif strcmp(transform_type, 'dct') == 1,\n    Tforward    = dct(eye(N));\nelseif strcmp(transform_type, 'dst') == 1,\n    Tforward    = dst(eye(N));\nelseif strcmp(transform_type, 'DCrand') == 1,\n    x = randn(N); x(1:end,1) = 1; [Q,R] = qr(x); \n    if (Q(1) < 0), \n        Q = -Q; \n    end;\n    Tforward = Q';\nelse %% a wavelet decomposition supported by 'wavedec'\n    %%% Set periodic boundary conditions, to preserve bi-orthogonality\n    dwtmode('per','nodisp');  \n    \n    Tforward = zeros(N,N);\n    for i = 1:N\n        Tforward(:,i)=wavedec(circshift([1 zeros(1,N-1)],[dec_levels i-1]), log2(N), transform_type);  %% construct transform matrix\n    end\nend\n\n%%% Normalize the basis elements\nTforward = (Tforward' * diag(sqrt(1./sum(Tforward.^2,2))))'; \n\n%%% Compute the inverse transform matrix\nTinverse = inv(Tforward);\n\nreturn;"
  },
  {
    "path": "BM3D/main.m",
    "content": "clear;clc;\n \npauseTime = 1;\n\ndata_path = \"..\\Set12\";\next = [\"*.jpg\", \"*.png\", \"*.jpeg\"];\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(data_path,ext(i))));\nend\n\nnoise_leval = [10,15,20,25,30,35,40,45,50,55,60,65,70];\n\nfor i = 1:length(noise_leval)\n    PSNRs = [];\n    SSIMs = [];\n    sigma = noise_leval(i);\n    for j = 1:length(filePaths)\n        y = imread(filePaths(j).name);\n        if length(size(y)) > 2\n            y = rgb2gray(y);\n        end\n        y = im2double(y);\n        z = y + (sigma/255)*randn(size(y));\n        % ͼ\n        \n        [PSNR,SSIM,y_est] = BM3D(y, z, sigma, 'np', 0);\n        PSNRs(j) = PSNR;\n        SSIMs(j) = SSIM;\n    \n        imshow(cat(2,im2uint8(y),im2uint8(z),im2uint8(y_est)));\n        title([num2str(sigma),'   ', filePaths(j).name,'    ',num2str(PSNR,'%2.2f'),'dB','    ',num2str(SSIMs(j),'%2.4f')])\n        drawnow;\n        pause(pauseTime)\n    end\n    disp([\"sigma:\", sigma, \" psnr:\", mean(PSNRs), \"  ssim:\", mean(SSIMs)]);\nend\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_FDnCNN_Color.m",
    "content": "% This is the testing demo of Flexible DnCNN (FDnCNN) for denoising noisy color images corrupted by\n% AWGN.\n%\n% To run the code, you should install Matconvnet first. Alternatively, you can use the\n% function `vl_ffdnet_matlab` to perform denoising without Matconvnet.\n%\n% \"Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising\"\n% \"FFDNet: Toward a Fast and Flexible Solution for CNN based Image Denoising\"\n%\n% Denoising\" 2018/05\n% If you have any question, please feel free to contact with me.\n% Kai Zhang (e-mail: cskaizhang@gmail.com)\n\n% clear; clc;\n\nformat compact;\nglobal sigmas; % input noise level or input noise level map\naddpath(fullfile('utilities'));\n\nfolderModel = 'model';\nfolderTest  = 'testsets';\nfolderResult= 'results';\nimageSets   = {'CBSD68','Kodak24','McMaster'}; % testing datasets % see https://github.com/cszn/FFDNet/tree/master/testsets\nsetTestCur  = imageSets{1};      % current testing dataset\n\n\nshowResult  = 1;\nuseGPU      = 1;\npauseTime   = 0;\n\nimageNoiseSigma = 25;  % image noise level\ninputNoiseSigma = 25;  % input noise level\n\nfolderResultCur       =  fullfile(folderResult, [setTestCur,'_',num2str(imageNoiseSigma(1)),'_',num2str(inputNoiseSigma(1))]);\nif ~isdir(folderResultCur)\n    mkdir(folderResultCur)\nend\n\nload(fullfile('model','FDnCNN_color.mat'));\nnet = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n% read images\next         =  {'*.jpg','*.png','*.bmp','*.tif'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,setTestCur,ext{i})));\nend\n\n% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\nSSIMs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    % read images\n    label   = imread(fullfile(folderTest,setTestCur,filePaths(i).name));\n    [w,h,c] = size(label);\n    \n    if c == 3\n        [~,nameCur,extCur] = fileparts(filePaths(i).name);\n        label = im2double(label);\n        \n        % add noise\n        randn('seed',0);\n        noise = bsxfun(@times,randn(size(label)),permute(imageNoiseSigma/255,[3 4 1 2]));\n        input = single(label + noise);\n\n        % tic;\n        if useGPU\n            input = gpuArray(input);\n        end\n        \n        % set noise level map\n        sigmas = inputNoiseSigma/255; % see \"vl_simplenn.m\".\n        \n        % perform denoising\n        res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test'); % matconvnet default\n        % res    = vl_ffdnet_concise(net, input);    % concise version of vl_simplenn for testing FFDNet\n        % res    = vl_ffdnet_matlab(net, input); % use this if you did  not install matconvnet; very slow\n\n        output = res(end).x;\n        \n        if useGPU\n            output = gather(output);\n            input  = gather(input);\n        end\n        %toc;\n        \n        % calculate PSNR, SSIM and save results\n        [PSNRCur, SSIMCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n        if showResult\n            imshow(cat(2,im2uint8(input),im2uint8(label),im2uint8(output)));\n            title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n            %imwrite(im2uint8(output), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma(1),'%02d'),'_' num2str(inputNoiseSigma(1),'%02d'),'_PSNR_',num2str(PSNRCur*100,'%4.0f'), extCur] ));\n            drawnow;\n            pause(pauseTime)\n        end\n        disp([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n        PSNRs(i) = PSNRCur;\n        SSIMs(i) = SSIMCur;\n        \n    end\nend\n\ndisp([mean(PSNRs),mean(SSIMs)]);\n\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_FDnCNN_Color_Clip.m",
    "content": "% This is the testing demo of Flexible DnCNN (FDnCNN) for denoising noisy color images corrupted by\n% AWGN with clipping setting. The noisy input is 8-bit quantized.\n%\n% To run the code, you should install Matconvnet first. Alternatively, you can use the\n% function `vl_ffdnet_matlab` to perform denoising without Matconvnet.\n%\n% \"Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising\"\n% \"FFDNet: Toward a Fast and Flexible Solution for CNN based Image Denoising\"\n%\n% Denoising\" 2018/05\n% If you have any question, please feel free to contact with me.\n% Kai Zhang (e-mail: cskaizhang@gmail.com)\n\n% clear; clc;\n\nformat compact;\nglobal sigmas; % input noise level or input noise level map\naddpath(fullfile('utilities'));\n\nfolderModel = 'model';\nfolderTest  = 'testsets';\nfolderResult= 'results';\nimageSets   = {'CBSD68','Kodak24','McMaster'}; % testing datasets\nsetTestCur  = imageSets{1};      % current testing dataset\n\nshowResult  = 1;\nuseGPU      = 1;\npauseTime   = 0;\n\nimageNoiseSigma = 25;  % image noise level, 25.5 is the default setting of imnoise( ,'gaussian')\ninputNoiseSigma = 25;  % input noise level\n\nfolderResultCur       =  fullfile(folderResult, [setTestCur,'_Clip_',num2str(imageNoiseSigma(1)),'_',num2str(inputNoiseSigma(1))]);\nif ~isdir(folderResultCur)\n    mkdir(folderResultCur)\nend\n\nload(fullfile('model','FDnCNN_Clip_color.mat'));\nnet = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n% read images\next         =  {'*.jpg','*.png','*.bmp','*.tif'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,setTestCur,ext{i})));\nend\n\n% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\nSSIMs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    % read images\n    label   = imread(fullfile(folderTest,setTestCur,filePaths(i).name));\n    [w,h,c] = size(label);\n    \n    if c == 3\n        [~,nameCur,extCur] = fileparts(filePaths(i).name);\n        label = im2single(label);\n        \n        % add noise\n        randn('seed',0);\n        %input = imnoise(label,'gaussian'); % corresponds to imageNoiseSigma = 25.5;\n        input = imnoise(label,'gaussian',0,(imageNoiseSigma/255)^2);\n        \n        % tic;\n        if useGPU\n            input = gpuArray(input);\n        end\n        \n        % set noise level map\n        sigmas = inputNoiseSigma/255; % see \"vl_simplenn.m\".\n        \n        % perform denoising\n        res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test'); % matconvnet default\n        %res    = vl_ffdnet_concise(net, input);    % concise version of vl_simplenn for testing FFDNet\n        %res    = vl_ffdnet_matlab(net, input); % use this if you did  not install matconvnet; very slow . note: you should also comment net = vl_simplenn_tidy(net); and if useGPU net = vl_simplenn_move(net, 'gpu') ; end\n        \n        output = res(end).x;\n        \n        \n        if useGPU\n            output = gather(output);\n            input  = gather(input);\n        end\n        %toc;\n        \n        % calculate PSNR, SSIM and save results\n        [PSNRCur, SSIMCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n        if showResult\n            imshow(cat(2,im2uint8(input),im2uint8(label),im2uint8(output)));\n            title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n            % imwrite(im2uint8(output), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma(1),'%02d'),'_' num2str(inputNoiseSigma(1),'%02d'),'_PSNR_',num2str(PSNRCur*100,'%4.0f'), extCur] ));\n            % imwrite(im2uint8(input), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma(1),'%02d'),'_' num2str(inputNoiseSigma(1),'%02d'), extCur] ));\n            drawnow;\n            pause(pauseTime)\n        end\n        disp([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n        PSNRs(i) = PSNRCur;\n        SSIMs(i) = SSIMCur;\n        \n    end\nend\n\ndisp([mean(PSNRs),mean(SSIMs)]);\n\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_FDnCNN_Gray.m",
    "content": "% This is the testing demo of Flexible DnCNN (FDnCNN) for denoising noisy grayscale images corrupted by\n% AWGN.\n%\n% To run the code, you should install Matconvnet first. Alternatively, you can use the\n% function `vl_ffdnet_matlab` to perform denoising without Matconvnet.\n%\n% \"Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising\"\n% \"FFDNet: Toward a Fast and Flexible Solution for CNN based Image Denoising\"\n%\n%  2018/05\n% If you have any question, please feel free to contact with me.\n% Kai Zhang (e-mail: cskaizhang@gmail.com)\n\n%clear; clc;\nformat compact;\nglobal sigmas; % input noise level or input noise level map\naddpath(fullfile('utilities'));\n\nfolderModel = 'model';\nfolderTest  = 'testsets';\nfolderResult= 'results';\nimageSets   = {'BSD68','Set12'}; % testing datasets\nsetTestCur  = imageSets{2};      % current testing dataset\n\nshowResult  = 1;\nuseGPU      = 1; % CPU or GPU. For single-threaded (ST) CPU computation, use \"matlab -singleCompThread\" to start matlab.\npauseTime   = 0;\n\nimageNoiseSigma = 50;  % image noise level\ninputNoiseSigma = 50;  % input noise level\n\nfolderResultCur       =  fullfile(folderResult, [setTestCur,'_',num2str(imageNoiseSigma),'_',num2str(inputNoiseSigma)]);\nif ~isfolder(folderResultCur)\n    mkdir(folderResultCur)\nend\n\nload(fullfile('model','FDnCNN_gray.mat'));\nnet = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n% read images\next         =  {'*.jpg','*.png','*.bmp'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,setTestCur,ext{i})));\nend\n\n% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\nSSIMs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    % read images\n    label = imread(fullfile(folderTest,setTestCur,filePaths(i).name));\n    [w,h,~]=size(label);\n    if size(label,3)==3\n        label = rgb2gray(label);\n    end\n    \n    [~,nameCur,extCur] = fileparts(filePaths(i).name);\n    label = im2double(label);\n    \n    % add noise\n    randn('seed',0);\n    noise = imageNoiseSigma/255.*randn(size(label));\n    input = single(label + noise);\n    \n    % tic;\n    if useGPU\n        input = gpuArray(input);\n    end\n    \n    % set noise level map\n    sigmas = inputNoiseSigma/255; % see \"vl_simplenn.m\".\n    \n    % perform denoising\n    res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test'); % matconvnet default\n    %res    = vl_ffdnet_concise(net, input);    % concise version of vl_simplenn for testing FFDNet\n    %res    = vl_ffdnet_matlab(net, input); % use this if you did  not install matconvnet; very slow . note: you should also comment net = vl_simplenn_tidy(net); and if useGPU net = vl_simplenn_move(net, 'gpu') ; end\n    \n    output = res(end).x;\n    \n    if useGPU\n        output = gather(output);\n        input  = gather(input);\n    end\n    % toc;\n    % calculate PSNR, SSIM and save results\n    [PSNRCur, SSIMCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n    if showResult\n        imshow(cat(2,im2uint8(input),im2uint8(label),im2uint8(output)));\n        title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n        %imwrite(im2uint8(output), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma,'%02d'),'_' num2str(inputNoiseSigma,'%02d'),'_PSNR_',num2str(PSNRCur*100,'%4.0f'), extCur] ));\n        drawnow;\n        pause(pauseTime)\n    end\n    disp([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n    PSNRs(i) = PSNRCur;\n    SSIMs(i) = SSIMCur;\nend\n\ndisp([mean(PSNRs),mean(SSIMs)]);\n\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_FDnCNN_Gray_Clip.m",
    "content": "% This is the testing demo of Flexible DnCNN (FDnCNN) for denoising noisy grayscale images corrupted by\n% AWGN with clipping setting. The noisy input is 8-bit quantized.\n%\n% \"Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising\"\n% \"FFDNet: Toward a Fast and Flexible Solution for CNN based Image Denoising\"\n%\n%  2018/05\n% If you have any question, please feel free to contact with me.\n% Kai Zhang (e-mail: cskaizhang@gmail.com)\n\n%clear; clc;\nformat compact;\nglobal sigmas; % input noise level or input noise level map\naddpath(fullfile('utilities'));\n\nfolderModel = 'models';\nfolderTest  = 'testsets';\nfolderResult= 'results';\nimageSets   = {'BSD68','Set12'}; % testing datasets\nsetTestCur  = imageSets{2};      % current testing dataset\n\nshowResult  = 1;\nuseGPU      = 1; % CPU or GPU. For single-threaded (ST) CPU computation, use \"matlab -singleCompThread\" to start matlab.\npauseTime   = 0;\n\nimageNoiseSigma = 50;  % image noise level, 25.5 is the default setting of imnoise( ,'gaussian')\ninputNoiseSigma = 50;  % input noise level\n\nfolderResultCur       =  fullfile(folderResult, [setTestCur,'_Clip_',num2str(imageNoiseSigma),'_',num2str(inputNoiseSigma)]);\nif ~isdir(folderResultCur)\n    mkdir(folderResultCur)\nend\n\nload(fullfile('model','FDnCNN_Clip_gray.mat'));\nnet = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n% read images\next         =  {'*.jpg','*.png','*.bmp'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,setTestCur,ext{i})));\nend\n\n% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\nSSIMs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    % read images\n    label = imread(fullfile(folderTest,setTestCur,filePaths(i).name));\n    [w,h,~]=size(label);\n    if size(label,3)==3\n        label = rgb2gray(label);\n    end\n    \n    [~,nameCur,extCur] = fileparts(filePaths(i).name);\n    label = im2single(label);\n    \n    % add noise\n    randn('seed',0);\n    %input = imnoise(label,'gaussian'); % corresponds to imageNoiseSigma = 25.5;\n    input = imnoise(label,'gaussian',0,(imageNoiseSigma/255)^2);\n    \n    % tic;\n    if useGPU\n        input = gpuArray(input);\n    end\n    \n    % set noise level map\n    sigmas = inputNoiseSigma/255; % see \"vl_simplenn.m\".\n    \n    % denoising\n    res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test');\n    output = res(end).x;\n    \n\n    if useGPU\n        output = gather(output);\n        input  = gather(input);\n    end\n    % toc;\n    % calculate PSNR, SSIM and save results\n    [PSNRCur, SSIMCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n    if showResult\n        imshow(cat(2,im2uint8(input),im2uint8(label),im2uint8(output)));\n        title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n        %imwrite(im2uint8(output), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma,'%02d'),'_' num2str(inputNoiseSigma,'%02d'),'_PSNR_',num2str(PSNRCur*100,'%4.0f'), extCur] ));\n        %imwrite(im2uint8(input), fullfile(folderResultCur, [nameCur, '_' num2str(imageNoiseSigma,'%02d'),'_' num2str(inputNoiseSigma,'%02d'), extCur] ));\n        drawnow;\n        pause(pauseTime)\n    end\n    disp([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n    PSNRs(i) = PSNRCur;\n    SSIMs(i) = SSIMCur;\nend\n\ndisp([mean(PSNRs),mean(SSIMs)]);\n\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_test_CDnCNN_Specific.m",
    "content": "% This is the testing demo of CDnCNN for denoising noisy color images corrupted by\n% AWGN.\n\n% clear; clc;\naddpath('utilities');\nfolderTest  = fullfile('testsets','CBSD68'); %%% test dataset\nfolderModel = 'model';\n\nshowResult  = 1;\nuseGPU      = 1;\npauseTime   = 0;\n\n% image noise level\nnoiseSigma  = 25;  \n% model noise level\nmodelSigma  = 25; % from {5, 10, 15, 25, 35, 50}\nload(fullfile(folderModel,'specifics_color',['color_sigma=',num2str(modelSigma,'%02d'),'.mat']));\n\nnet = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\n% move to gpu\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n% read images\next         =  {'*.jpg','*.png','*.bmp'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,ext{i})));\nend\n\n%%% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    % read current image\n    label = imread(fullfile(folderTest,filePaths(i).name));\n    [~,nameCur,extCur] = fileparts(filePaths(i).name);\n    label = im2double(label);\n    \n    % add Gaussian noise\n    randn('seed',0);\n    input = single(label + noiseSigma/255*randn(size(label)));\n    \n    % convert to GPU\n    if useGPU\n        input = gpuArray(input);\n    end\n    \n    res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test');\n    %res = vl_ffdnet_matlab(net, input); %%% use this if you did not install matconvnet.\n    output = input - res(end).x;\n    \n    % convert to CPU\n    if useGPU\n        output = gather(output);\n        input  = gather(input);\n    end\n    \n    % calculate PSNR\n    [PSNRCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n    if showResult\n        imshow(cat(2,im2uint8(label),im2uint8(input),im2uint8(output)));\n        title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB'])\n        drawnow;\n        pause(pauseTime)\n    end\n    PSNRs(i) = PSNRCur;\n    \nend\n\ndisp(mean(PSNRs));\n\n\n"
  },
  {
    "path": "DnCNN/Demo_test_DnCNN.m",
    "content": "\n%%% This is the testing demo for gray image (Gaussian) denoising.\n%%% Training data: 400 images of size 180X180\n% гԽ SCRIPT vl_nnconv Ϊִ:\n% һmatconvnetmatlabļµvl_setupnnͿ\n\n\n% clear; clc;\naddpath('utilities');\nfolderTest  = fullfile('testsets','Set12'); %%% test dataset\n%folderTest  = 'testsets\\BSD68';\nfolderModel = 'model';\nshowResult  = 1;\nuseGPU      = 0;\npauseTime   = 1;\n\n%%% load [specific] Gaussian denoising model\n\n\nsigm = {10,15,20,25,30,35,40,45,50,55,60,65,70};\nfor index=1:length(sigm)\n    noiseSigma  = sigm{index};  %%% image noise level\n    modelSigma  = min(75,max(10,round(noiseSigma/5)*5)); %%% model noise level\n    disp(noiseSigma)\n    load(fullfile(folderModel,'specifics',['sigma=',num2str(modelSigma,'%02d'),'.mat']));\n\n    %%% load [blind] Gaussian denoising model %%% for sigma in [0,55]\n\n    % load(fullfile(folderModel,'GD_Gray_Blind.mat'));\n\n\n    %%%\n    net = vl_simplenn_tidy(net);\n\n    % for i = 1:size(net.layers,2)\n    %     net.layers{i}.precious = 1;\n    % end\n\n    %%% move to gpu\n    if useGPU\n        net = vl_simplenn_move(net, 'gpu') ;\n    end\n\n    %%% read images\n    ext         =  {'*.jpg','*.png','*.bmp'};\n    filePaths   =  [];\n    for i = 1 : length(ext)  % length(ext)\n        filePaths = cat(1,filePaths, dir(fullfile(folderTest,ext{i})));\n    end\n\n    %%% PSNR and SSIM\n    PSNRs = zeros(1,length(filePaths));\n    SSIMs = zeros(1,length(filePaths));\n    % disp(filePaths);\n    for i = 1:length(filePaths)\n\n        %%% read images\n        label = imread(fullfile(folderTest,filePaths(i).name));\n        [~,nameCur,extCur] = fileparts(filePaths(i).name);\n        label = im2double(label);\n\n        randn('seed',0);\n        input = single(label + noiseSigma/255*randn(size(label)));\n\n        %%% convert to GPU\n        if useGPU\n            input = gpuArray(input);\n        end\n\n        res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test');\n        %res = simplenn_matlab(net, input); %%% use this if you did not install matconvnet.\n        output = input - res(end).x;\n\n        %%% convert to CPU\n        if useGPU\n            output = gather(output);\n            input  = gather(input);\n        end\n\n        %%% calculate PSNR and SSIM\n        [PSNRCur, SSIMCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n        if showResult\n            imshow(cat(2,im2uint8(label),im2uint8(input),im2uint8(output)));\n            title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB','    ',num2str(SSIMCur,'%2.4f')])\n            drawnow;\n            pause(pauseTime)\n        end\n        PSNRs(i) = PSNRCur;\n        SSIMs(i) = SSIMCur;\n    end\n\n    disp([mean(PSNRs),mean(SSIMs)]);\n    disp('==========');\nend\n\n\n"
  },
  {
    "path": "DnCNN/Demo_test_DnCNN3.m",
    "content": "\n%%% This is the testing demo for learning a single model for three tasks, including Gaussian denoing, SISR, JPEG image deblocking.\n\n% clear; clc;\n\naddpath('utilities');\n\n%%% testing set\ntasks       = {'GD','SR','DB'}; %%% three tasks\nimageSets   = {'BSD68','Set5','Set14','BSD100','Urben100','classic5','LIVE1'}; %%% testing dataset\n\n%%% setting\ntaskTest    = tasks([1 2 3]); %%% choose the tasks for evaluation\nsetTest     = {imageSets([1]),imageSets([2:5]),imageSets([6 7])}; %%% select the datasets for each tasks\nshowResult  = [1 1 1]; %%% save the restored images\npauseTime   = 1;\nfolderModel = 'model';\nuseGPU      = 1; % 1 or 0, true or false\n\nfolderTest  = 'testsets';\nfolderResult= 'results';\n\nif ~exist(folderResult,'file')\n    mkdir(folderResult);\nend\n\n%%% task GD = Gaussian Denoising\nsigma   = 25;\n\n%%% task SR = Single Image Super-Resolution\nscale   = 3;\n\n%%% task DB = DeBlocking\nQ       = 20;\n\n%%% load DnCNN-3 model\nload(fullfile(folderModel,'DnCNN3.mat'));\n\n%net = vl_simplenn_tidy(net);\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n%%% input (single); output (single); label (ground-truth, uint8)\n%%% input_RGB (uint8); output_RGB (uint8); label_RGB (ground-truth, uint8)\n\n%%%-------------------------------------------------------------------------------------\n%%% Gaussian Denoising (GD)\n%%%-------------------------------------------------------------------------------------\n\nif ismember('GD',taskTest)\n    taskTestCur = 'GD';\n    for n_set = 1 : numel(setTest{1})\n        %%% read images\n        setTestCur = cell2mat(setTest{1}(n_set));\n        disp('-----------------------------------------------');\n        disp(['----',setTestCur,'------Gaussian Denoising-----']);\n        disp('-----------------------------------------------');\n        \n        folderTestCur = fullfile(folderTest,setTestCur);\n        ext                 =  {'*.jpg','*.png','*.bmp'};\n        filepaths           =  [];\n        for i = 1 : length(ext)\n            filepaths = cat(1,filepaths,dir(fullfile(folderTestCur, ext{i})));\n        end\n        \n        eval(['PSNR_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),' = zeros(length(filepaths),1);']);\n        eval(['SSIM_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),' = zeros(length(filepaths),1);']);\n        \n        %%% folder to store results\n        folderResultCur = fullfile(folderResult, [taskTestCur,'_',setTestCur,'_s',num2str(sigma)]);\n        if ~exist(folderResultCur,'file')\n            mkdir(folderResultCur);\n        end\n        \n        for i = 1 : length(filepaths)\n            label  = imread(fullfile(folderTestCur,filepaths(i).name));\n            [~,imageName,ext] = fileparts(filepaths(i).name);\n            chanel = size(label,3);\n            if chanel == 3\n                %%% label (uint8)\n                label = rgb2gray(label);\n            end\n            %%% input (single)\n            randn('seed',0);\n            input = single(im2double(label) + sigma/255*randn(size(label)));\n            \n            if useGPU\n                input = gpuArray(input);\n            end\n            \n            res = vl_simplenn(net, input,[],[],'conserveMemory',true,'mode','test');\n            im = res(end).x;\n            \n            %%% output (single)\n            output = gather(input - im);\n            \n            [PSNR_Cur,SSIM_Cur] = Cal_PSNRSSIM(label,im2uint8(output),0,0);\n            disp(['Denoising     ',num2str(PSNR_Cur,'%2.2f'),'dB','    ',filepaths(i).name]);\n            eval(['PSNR_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),'(',num2str(i),') = PSNR_Cur;']);\n            eval(['SSIM_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),'(',num2str(i),') = SSIM_Cur;']);\n            if showResult(1)\n                imshow(cat(1,cat(2,im2uint8(input),im2uint8(output)),cat(2,im2uint8(abs(input-output)*10),label)));\n                drawnow;\n                title(['Denoising     ',filepaths(i).name,'    ',num2str(PSNR_Cur,'%2.2f'),'dB'],'FontSize',12)\n                pause(pauseTime)\n                %pause()\n                \n                %%% save results\n                imwrite(output,fullfile(folderResultCur,[imageName,'_s',num2str(sigma),'.png']));\n                \n            end\n            \n        end\n        disp(['Average PSNR is ',num2str(mean(eval(['PSNR_',taskTestCur,'_',setTestCur,'_s',num2str(sigma)])),'%2.2f'),'dB']);\n        disp(['Average SSIM is ',num2str(mean(eval(['SSIM_',taskTestCur,'_',setTestCur,'_s',num2str(sigma)])),'%2.4f')]);\n        \n        %%% save PSNR and SSIM metrics\n        save(fullfile(folderResultCur,['PSNR_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),'.mat']),['PSNR_',taskTestCur,'_',setTestCur,'_s',num2str(sigma)])\n        save(fullfile(folderResultCur,['SSIM_',taskTestCur,'_',setTestCur,'_s',num2str(sigma),'.mat']),['SSIM_',taskTestCur,'_',setTestCur,'_s',num2str(sigma)])\n        \n    end\nend\n\n%%%-------------------------------------------------------------------------------------\n%%% Single Image Super-Resolution (SR)\n%%%-------------------------------------------------------------------------------------\n\nif ismember('SR',taskTest)\n    taskTestCur = 'SR';\n    for n_set = 1 : numel(setTest{2})\n        %%% read images\n        setTestCur = cell2mat(setTest{2}(n_set));\n        disp('--------------------------------------------');\n        disp(['----',setTestCur,'-----Super-Resolution-----']);\n        disp('--------------------------------------------');\n        folderTestCur = fullfile(folderTest,setTestCur);\n        ext                 =  {'*.jpg','*.png','*.bmp'};\n        filepaths           =  [];\n        for i = 1 : length(ext)\n            filepaths = cat(1,filepaths,dir(fullfile(folderTestCur, ext{i})));\n        end\n        eval(['PSNR_',taskTestCur,'_',setTestCur,'_x',num2str(scale),' = zeros(length(filepaths),1);']);\n        eval(['SSIM_',taskTestCur,'_',setTestCur,'_x',num2str(scale),' = zeros(length(filepaths),1);']);\n        \n        if fix(scale) == scale\n            crop = scale;\n        else\n            crop = scale*10;\n        end\n        \n        %%% folder to store results\n        folderResultCur = fullfile(folderResult, [taskTestCur,'_',setTestCur,'_x',num2str(scale)]);\n        if ~exist(folderResultCur,'file')\n            mkdir(folderResultCur);\n        end\n        \n        for i = 1 : length(filepaths)\n            \n            HR  = imread(fullfile(folderTestCur,filepaths(i).name));\n            [~,imageName,ext] = fileparts(filepaths(i).name);\n            HR  = modcrop(HR, crop);\n            %%% label_RGB (uint8)\n            label_RGB = HR;\n            chanel = size(HR,3);\n            %%% LR (uint8)\n            LR = imresize(HR,1/scale,'bicubic');\n            if chanel == 3\n                %%% label (single)\n                HR_ycc = single(rgb2ycbcr(im2double(HR)));\n                label  = HR_ycc(:,:,1);\n                %%% input (single)\n                HR_bic     = imresize(im2double(LR),scale,'bicubic');\n                LR_bic_ycc = rgb2ycbcr(HR_bic);\n                input      = im2single(LR_bic_ycc(:,:,1));\n                %%% input_RGB (uint8)\n                input_RGB  = im2uint8(HR_bic);\n            else\n                %%% label (single)\n                label  = im2single(HR);\n                HR_bic = imresize(LR,scale,'bicubic');\n                %%% input (single)\n                input  = im2single(HR_bic);\n                %%% input_RGB (uint8)\n                input_RGB = HR_bic;\n            end\n            \n            if useGPU\n                input = gpuArray(input);\n            end\n            res = vl_simplenn(net, input,[],[],'conserveMemory',true,'mode','test');\n            im = res(end).x;\n            \n            %%% output (single)\n            output = gather(input - im);\n            if chanel == 3\n                %%% output_RGB (uint8)\n                LR_bic_ycc(:,:,1) = double(output);\n                output_RGB = im2uint8(ycbcr2rgb(LR_bic_ycc));\n            else\n                %%% output_RGB (uint8)\n                output_RGB = im2uint8(output);\n            end\n            \n            [PSNR_Cur,SSIM_Cur] = Cal_PSNRSSIM(label*255,output*255,ceil(scale),ceil(scale)); %%% single\n            disp(['Single Image Super-Resolution     ',num2str(PSNR_Cur,'%2.2f'),'dB','    ',filepaths(i).name]);\n            eval(['PSNR_SR_',setTestCur,'_x',num2str(scale),'(',num2str(i),') = PSNR_Cur;']);\n            eval(['SSIM_SR_',setTestCur,'_x',num2str(scale),'(',num2str(i),') = SSIM_Cur;']);\n            if showResult(2)\n                imshow(cat(1,cat(2,input_RGB,output_RGB),cat(2,(output_RGB-input_RGB),label_RGB)));\n                drawnow;\n                title(['Single Image Super-Resolution     ',filepaths(i).name,'    ',num2str(PSNR_Cur,'%2.2f'),'dB'],'FontSize',12)\n                pause(pauseTime)\n                % pause()\n                \n                %%% save results\n                imwrite(output_RGB,fullfile(folderResultCur,[imageName,'_x',num2str(scale),'.png']));\n                \n            end\n            \n        end\n        disp(['Average PSNR is ',num2str(mean(eval(['PSNR_',taskTestCur,'_',setTestCur,'_x',num2str(scale)])),'%2.2f'),'dB']);\n        disp(['Average SSIM is ',num2str(mean(eval(['SSIM_',taskTestCur,'_',setTestCur,'_x',num2str(scale)])),'%2.4f')]);\n        \n        %%% save PSNR and SSIM metrics\n        save(fullfile(folderResultCur,['PSNR_',taskTestCur,'_',setTestCur,'_x',num2str(scale),'.mat']),['PSNR_',taskTestCur,'_',setTestCur,'_x',num2str(scale)])\n        save(fullfile(folderResultCur,['SSIM_',taskTestCur,'_',setTestCur,'_x',num2str(scale),'.mat']),['SSIM_',taskTestCur,'_',setTestCur,'_x',num2str(scale)])\n        \n    end\nend\n\n%%%-------------------------------------------------------------------------------------\n%%% JPEG Image Deblocking (DB)\n%%%-------------------------------------------------------------------------------------\n\nif ismember('DB',taskTest)\n    taskTestCur = 'DB';\n    for n_set = 1 : numel(setTest{3})\n        %%% read image names\n        setTestCur = cell2mat(setTest{3}(n_set));\n        disp('---------------------------------------');\n        disp(['----',setTestCur,'------Deblocking-----']);\n        disp('---------------------------------------');\n        folderTestCur = fullfile(folderTest,setTestCur);\n        ext                 =  {'*.jpg','*.png','*.bmp'};\n        filepaths           =  [];\n        for i = 1 : length(ext)\n            filepaths = cat(1,filepaths,dir(fullfile(folderTestCur, ext{i})));\n        end\n        \n        %%% to store PSNR and SSIM results\n        eval(['PSNR_',taskTestCur,'_',setTestCur,'_q',num2str(Q),' = zeros(length(filepaths),1);']);\n        eval(['SSIM_',taskTestCur,'_',setTestCur,'_q',num2str(Q),' = zeros(length(filepaths),1);']);\n        \n        %%% to store results\n        folderResultCur = fullfile(folderResult, [taskTestCur,'_',setTestCur,'_q',num2str(Q)]);\n        if ~exist(folderResultCur,'file')\n            mkdir(folderResultCur);\n        end\n        \n        for i = 1 : length(filepaths)\n            label  = imread(fullfile(folderTestCur,filepaths(i).name));\n            [~,imageName,ext] = fileparts(filepaths(i).name);\n            chanel = size(label,3);\n            if chanel == 3\n                %%% label (uint8)\n                label = rgb2ycbcr(label);\n                label = label(:,:,1);\n            end\n            %%% input (single)\n            imwrite(label,'test.jpg','jpg','quality',Q);\n            input = im2single(imread('test.jpg'));\n            \n            if useGPU\n                input = gpuArray(input);\n            end\n            res = vl_simplenn(net, input,[],[],'conserveMemory',true,'mode','test');\n            im = res(end).x;\n            \n            %%% output (single)\n            output = gather(input - im);\n            \n            [PSNR_Cur,SSIM_Cur] = Cal_PSNRSSIM(label,im2uint8(output),0,0);\n            disp(['Deblocking     ',num2str(PSNR_Cur,'%2.2f'),'dB','    ',filepaths(i).name]);\n            eval(['PSNR_',taskTestCur,'_',setTestCur,'_q',num2str(Q),'(',num2str(i),') = PSNR_Cur;']);\n            eval(['SSIM_',taskTestCur,'_',setTestCur,'_q',num2str(Q),'(',num2str(i),') = SSIM_Cur;']);\n            \n            if showResult(3)\n                imshow(cat(1,cat(2,im2uint8(input),im2uint8(output)),cat(2,im2uint8(abs(input-output)*10),label)));\n                drawnow;\n                title(['Deblocking     ',filepaths(i).name,'    ',num2str(PSNR_Cur,'%2.2f'),'dB'],'FontSize',12)\n                pause(pauseTime)\n                \n                %%% save results\n                imwrite(output,fullfile(folderResultCur,[imageName,'_q',num2str(Q),'.png']));\n                \n            end\n            \n        end\n        disp(['Average PSNR is ',num2str(mean(eval(['PSNR_',taskTestCur,'_',setTestCur,'_q',num2str(Q)])),'%2.2f'),'dB']);\n        disp(['Average SSIM is ',num2str(mean(eval(['SSIM_',taskTestCur,'_',setTestCur,'_q',num2str(Q)])),'%2.4f')]);\n        \n        %%% save PSNR and SSIM metrics\n        save(fullfile(folderResultCur,['PSNR_',taskTestCur,'_',setTestCur,'_q',num2str(Q),'.mat']),['PSNR_',taskTestCur,'_',setTestCur,'_q',num2str(Q)])\n        save(fullfile(folderResultCur,['SSIM_',taskTestCur,'_',setTestCur,'_q',num2str(Q),'.mat']),['SSIM_',taskTestCur,'_',setTestCur,'_q',num2str(Q)])\n        \n    end\nend\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "DnCNN/Demo_test_DnCNN_C.m",
    "content": "\n%%% This is the testing code demo for color image (Gaussian) denoising.\n%%% The model is trained with 1) noise levels in [0 55]; 2) 432 training images.\n\n\n% clear; clc;\naddpath('utilities');\nfolderTest  = 'testsets\\CBSD68'; %%% test dataset\nfolderModel = 'model';\nnoiseSigma  = 45;  %%% image noise level\nshowResult  = 1;\nuseGPU      = 1;\npauseTime   = 1;\n\n\n%%% load blind Gaussian denoising model (color image)\nload(fullfile(folderModel,'GD_Color_Blind.mat')); %%% for sigma in [0,55]\n\n%%%\n% net = vl_simplenn_tidy(net);\n\n% for i = 1:size(net.layers,2)\n%     net.layers{i}.precious = 1;\n% end\n\n%%% move to gpu\nif useGPU\n    net = vl_simplenn_move(net, 'gpu') ;\nend\n\n%%% read images\next         =  {'*.jpg','*.png','*.bmp'};\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(folderTest,ext{i})));\nend\n\n%%% PSNR and SSIM\nPSNRs = zeros(1,length(filePaths));\n\nfor i = 1:length(filePaths)\n    \n    %%% read current image\n    label = imread(fullfile(folderTest,filePaths(i).name));\n    [~,nameCur,extCur] = fileparts(filePaths(i).name);\n    label = im2double(label);\n    \n    %%% add Gaussian noise\n    randn('seed',0);\n    input = single(label + noiseSigma/255*randn(size(label)));\n    \n    %%% convert to GPU\n    if useGPU\n        input = gpuArray(input);\n    end\n    \n    res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test');\n    %res = simplenn_matlab(net, input); %%% use this if you did not install matconvnet.\n    output = input - res(end).x;\n    \n    %%% convert to CPU\n    if useGPU\n        output = gather(output);\n        input  = gather(input);\n    end\n    \n    %%% calculate PSNR\n    [PSNRCur] = Cal_PSNRSSIM(im2uint8(label),im2uint8(output),0,0);\n    if showResult\n        imshow(cat(2,im2uint8(label),im2uint8(input),im2uint8(output)));\n        title([filePaths(i).name,'    ',num2str(PSNRCur,'%2.2f'),'dB'])\n        drawnow;\n        pause(pauseTime)\n    end\n    PSNRs(i) = PSNRCur;\n    \nend\n\ndisp(mean(PSNRs));\n\n\n"
  },
  {
    "path": "DnCNN/model/README.txt",
    "content": "## Beyond a Gaussian Denoiser: Residual Learning of Deep CNN for Image Denoising\n\n\n### Main Contents\n\n**demos**:  `Demo_test_DnCNN-.m`.\n\n**model**:  including the trained models for Gaussian denoising; a single model for Gaussian denoising, single image super-resolution (SISR) and deblocking.\n\n**testsets**:  BSD68 and Set10 for Gaussian denoising evaluation; Set5, Set14, BSD100 and Urban100 datasets for SISR evaluation; Classic5 and LIVE1 for JPEG image deblocking evaluation.\n\n\n\nTo run the testing demos `Demo_test_DnCNN-.m`, you should first [install](http://www.vlfeat.org/matconvnet/install/) [MatConvNet](http://www.vlfeat.org/matconvnet/).\n\nNote: If you did not install MatConvNet, just replace `res    = vl_simplenn(net,input,[],[],'conserveMemory',true,'mode','test')` with `res = simplenn_matlab(net, input)`.\n\nFor the training code, feel free to contact: cskaizhang@gmail.com\n\n\n\n### Results\n\n#### Gaussian Denoising\n\nThe average PSNR(dB) results of different methods on the BSD6868 dataset.\n\n|  Noise Level | BM3D | WNNM  | EPLL | MLP |  CSF |TNRD  | DnCNN-S | DnCNN-B |\n|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|:-------:|\n| 15  |  31.07  |   31.37   | 31.21  |   -   |  31.24 |  31.42 | **31.73** | **31.61**  |\n| 25  |  28.57  |   28.83   | 28.68  | 28.96 |  28.74 |  28.92 | **29.23** | **29.16**  |\n| 50  |  25.62  |   25.87   | 25.67  | 26.03 |    -   |  25.97 | **26.23** | **26.23**  |\n\n#### Gaussian Denoising, Single ImageSuper-Resolution and JPEG Image Deblocking via a Single (DnCNN-3) Model \n\nAverage PSNR(dB)/SSIM results of different methods for Gaussian denoising with noise level 15, 25 and 50 on BSD68 dataset, single image super-resolution with \nupscaling factors 2, 3 and 40 on Set5, Set14, BSD100 and Urban100 datasets, JPEG image deblocking with quality factors 10, 20, 30 and 40 on Classic5 and LIVE11 datasets.\n\n###### Gaussian Denoising\n|  Dataset    | Noise Level | BM3D | TNRD | DnCNN-3 |\n|:---------:|:---------:|:---------:|:---------:|:---------:|\n|       |  15  | 31.08 / 0.8722 | 31.42 / 0.8826 | 31.46 / 0.8826 |\n| BSD68 |  25  | 28.57 / 0.8017 | 28.92 / 0.8157 | 29.02 / 0.8190 |\n|       |  50  | 25.62 / 0.6869 | 25.97 / 0.7029 | 26.10 / 0.7076 |\n###### Single Image Super-Resolution\n| Dataset | Upscaling Factor | TNRD | VDSR |DnCNN-3|\n|:---------:|:---------:|:---------:|:---------:|:---------:|\n|        | 2 | 36.86 / 0.9556 | 37.56 / 0.9591 | 37.58 / 0.9590 |\n|Set5    | 3 | 33.18 / 0.9152 | 33.67 / 0.9220 | 33.75 / 0.9222 |\n|        | 4 | 30.85 / 0.8732 | 31.35 / 0.8845 | 31.40 / 0.8845 |\n||\n|        | 2 | 32.51 / 0.9069 | 33.02 / 0.9128 | 33.03 / 0.9128 |\n|Set14   | 3 | 29.43 / 0.8232 | 29.77 / 0.8318 | 29.81 / 0.8321 |\n|        | 4 | 27.66 / 0.7563 | 27.99 / 0.7659 | 28.04 / 0.7672 |\n||\n|        | 2 | 31.40 / 0.8878 | 31.89 / 0.8961 | 31.90 / 0.8961 |\n|BSD100  | 3 | 28.50 / 0.7881 | 28.82 / 0.7980 | 28.85 / 0.7981 |\n|        | 4 | 27.00 / 0.7140 | 27.28 / 0.7256 | 27.29 / 0.7253 |\n||\n|        | 2 | 29.70 / 0.8994 | 30.76 / 0.9143 | 30.74 / 0.9139 |\n|Urban100| 3 | 26.42 / 0.8076 | 27.13 / 0.8283 | 27.15 / 0.8276 |\n|        | 4 | 24.61 / 0.7291 | 25.17 / 0.7528 | 25.20 / 0.7521 |\n###### JPEG Image Deblocking\n|  Dataset | Quality Factor | AR-CNN | TNRD | DnCNN-3 |\n|:---------:|:---------:|:---------:|:---------:|:---------:|\n|Classic5| 10 | 29.03 / 0.7929 | 29.28 / 0.7992 | 29.40 / 0.8026 |\n|        | 20 | 31.15 / 0.8517 | 31.47 / 0.8576 | 31.63 / 0.8610 |\n|        | 30 | 32.51 / 0.8806 | 32.78 / 0.8837 | 32.91 / 0.8861 |\n|        | 40 | 33.34 / 0.8953 |       -        | 33.77 / 0.9003 |\n||\n|  LIVE1 | 10 | 28.96 / 0.8076 | 29.15 / 0.8111 | 29.19 / 0.8123 |\n|        | 20 | 31.29 / 0.8733 | 31.46 / 0.8769 | 31.59 / 0.8802 |\n|        | 30 | 32.67 / 0.9043 | 32.84 / 0.9059 | 32.98 / 0.9090 |\n|        | 40 | 33.63 / 0.9198 |       -        | 33.96 / 0.9247 |"
  },
  {
    "path": "DnCNN/model/specifics_color/Add (color) specific models.md",
    "content": "\n"
  },
  {
    "path": "DnCNN/utilities/Cal_PSNRSSIM.m",
    "content": "function [psnr_cur, ssim_cur] = Cal_PSNRSSIM(A,B,row,col)\n\n\n[n,m,ch]=size(B);\nA = A(row+1:n-row,col+1:m-col,:);\nB = B(row+1:n-row,col+1:m-col,:);\nA=double(A); % Ground-truth\nB=double(B); %\n\ne=A(:)-B(:);\nmse=mean(e.^2);\npsnr_cur=10*log10(255^2/mse);\n\nif ch==1\n    [ssim_cur, ~] = ssim_index(A, B);\nelse\n    ssim_cur = (ssim_index(A(:,:,1), B(:,:,1)) + ssim_index(A(:,:,2), B(:,:,2)) + ssim_index(A(:,:,3), B(:,:,3)))/3;\nend\n\n\nfunction [mssim, ssim_map] = ssim_index(img1, img2, K, window, L)\n\n%========================================================================\n%SSIM Index, Version 1.0\n%Copyright(c) 2003 Zhou Wang\n%All Rights Reserved.\n%\n%The author is with Howard Hughes Medical Institute, and Laboratory\n%for Computational Vision at Center for Neural Science and Courant\n%Institute of Mathematical Sciences, New York University.\n%\n%----------------------------------------------------------------------\n%Permission to use, copy, or modify this software and its documentation\n%for educational and research purposes only and without fee is hereby\n%granted, provided that this copyright notice and the original authors'\n%names appear on all copies and supporting documentation. This program\n%shall not be used, rewritten, or adapted as the basis of a commercial\n%software or hardware product without first obtaining permission of the\n%authors. The authors make no representations about the suitability of\n%this software for any purpose. It is provided \"as is\" without express\n%or implied warranty.\n%----------------------------------------------------------------------\n%\n%This is an implementation of the algorithm for calculating the\n%Structural SIMilarity (SSIM) index between two images. Please refer\n%to the following paper:\n%\n%Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, \"Image\n%quality assessment: From error measurement to structural similarity\"\n%IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004.\n%\n%Kindly report any suggestions or corrections to zhouwang@ieee.org\n%\n%----------------------------------------------------------------------\n%\n%Input : (1) img1: the first image being compared\n%        (2) img2: the second image being compared\n%        (3) K: constants in the SSIM index formula (see the above\n%            reference). defualt value: K = [0.01 0.03]\n%        (4) window: local window for statistics (see the above\n%            reference). default widnow is Gaussian given by\n%            window = fspecial('gaussian', 11, 1.5);\n%        (5) L: dynamic range of the images. default: L = 255\n%\n%Output: (1) mssim: the mean SSIM index value between 2 images.\n%            If one of the images being compared is regarded as\n%            perfect quality, then mssim can be considered as the\n%            quality measure of the other image.\n%            If img1 = img2, then mssim = 1.\n%        (2) ssim_map: the SSIM index map of the test image. The map\n%            has a smaller size than the input images. The actual size:\n%            size(img1) - size(window) + 1.\n%\n%Default Usage:\n%   Given 2 test images img1 and img2, whose dynamic range is 0-255\n%\n%   [mssim ssim_map] = ssim_index(img1, img2);\n%\n%Advanced Usage:\n%   User defined parameters. For example\n%\n%   K = [0.05 0.05];\n%   window = ones(8);\n%   L = 100;\n%   [mssim ssim_map] = ssim_index(img1, img2, K, window, L);\n%\n%See the results:\n%\n%   mssim                        %Gives the mssim value\n%   imshow(max(0, ssim_map).^4)  %Shows the SSIM index map\n%\n%========================================================================\n\n\nif (nargin < 2 || nargin > 5)\n    ssim_index = -Inf;\n    ssim_map = -Inf;\n    return;\nend\n\nif (size(img1) ~= size(img2))\n    ssim_index = -Inf;\n    ssim_map = -Inf;\n    return;\nend\n\n[M N] = size(img1);\n\nif (nargin == 2)\n    if ((M < 11) || (N < 11))\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return\n    end\n    window = fspecial('gaussian', 11, 1.5);\t%\n    K(1) = 0.01;\t\t\t\t\t\t\t\t      % default settings\n    K(2) = 0.03;\t\t\t\t\t\t\t\t      %\n    L = 255;                                  %\nend\n\nif (nargin == 3)\n    if ((M < 11) || (N < 11))\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return\n    end\n    window = fspecial('gaussian', 11, 1.5);\n    L = 255;\n    if (length(K) == 2)\n        if (K(1) < 0 || K(2) < 0)\n            ssim_index = -Inf;\n            ssim_map = -Inf;\n            return;\n        end\n    else\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return;\n    end\nend\n\nif (nargin == 4)\n    [H W] = size(window);\n    if ((H*W) < 4 || (H > M) || (W > N))\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return\n    end\n    L = 255;\n    if (length(K) == 2)\n        if (K(1) < 0 || K(2) < 0)\n            ssim_index = -Inf;\n            ssim_map = -Inf;\n            return;\n        end\n    else\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return;\n    end\nend\n\nif (nargin == 5)\n    [H W] = size(window);\n    if ((H*W) < 4 || (H > M) || (W > N))\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return\n    end\n    if (length(K) == 2)\n        if (K(1) < 0 || K(2) < 0)\n            ssim_index = -Inf;\n            ssim_map = -Inf;\n            return;\n        end\n    else\n        ssim_index = -Inf;\n        ssim_map = -Inf;\n        return;\n    end\nend\n\nC1 = (K(1)*L)^2;\nC2 = (K(2)*L)^2;\nwindow = window/sum(sum(window));\nimg1 = double(img1);\nimg2 = double(img2);\n\nmu1   = filter2(window, img1, 'valid');\nmu2   = filter2(window, img2, 'valid');\nmu1_sq = mu1.*mu1;\nmu2_sq = mu2.*mu2;\nmu1_mu2 = mu1.*mu2;\nsigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq;\nsigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq;\nsigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2;\n\nif (C1 > 0 & C2 > 0)\n    ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2));\nelse\n    numerator1 = 2*mu1_mu2 + C1;\n    numerator2 = 2*sigma12 + C2;\n    denominator1 = mu1_sq + mu2_sq + C1;\n    denominator2 = sigma1_sq + sigma2_sq + C2;\n    ssim_map = ones(size(mu1));\n    index = (denominator1.*denominator2 > 0);\n    ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index));\n    index = (denominator1 ~= 0) & (denominator2 == 0);\n    ssim_map(index) = numerator1(index)./denominator1(index);\nend\n\nmssim = mean2(ssim_map);\n\nreturn\n\n\n\n\n\n\n\n"
  },
  {
    "path": "DnCNN/utilities/Merge_Bnorm_Demo.m",
    "content": "\n\n\n\n\nload('sigma=25_Bnorm.mat');\n\n[net] = vl_simplenn_mergebnorm(net);\n\nsave sigma=25 net;\n\n\n"
  },
  {
    "path": "DnCNN/utilities/data_augmentation.m",
    "content": "function image = data_augmentation(image, mode)\n\nif mode == 1\n    return;\nend\n\nif mode == 2 % flipped\n    image = flipud(image);\n    return;\nend\n\nif mode == 3 % rotation 90\n    image = rot90(image,1);\n    return;\nend\n\nif mode == 4 % rotation 90 & flipped\n    image = rot90(image,1);\n    image = flipud(image);\n    return;\nend\n\nif mode == 5 % rotation 180\n    image = rot90(image,2);\n    return;\nend\n\nif mode == 6 % rotation 180 & flipped\n    image = rot90(image,2);\n    image = flipud(image);\n    return;\nend\n\nif mode == 7 % rotation 270\n    image = rot90(image,3);\n    return;\nend\n\nif mode == 8 % rotation 270 & flipped\n    image = rot90(image,3);\n    image = flipud(image);\n    return;\nend\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "DnCNN/utilities/modcrop.m",
    "content": "function imgs = modcrop(imgs, modulo)\nif size(imgs,3)==1\n    sz = size(imgs);\n    sz = sz - mod(sz, modulo);\n    imgs = imgs(1:sz(1), 1:sz(2));\nelse\n    tmpsz = size(imgs);\n    sz = tmpsz(1:2);\n    sz = sz - mod(sz, modulo);\n    imgs = imgs(1:sz(1), 1:sz(2),:);\nend\n\n"
  },
  {
    "path": "DnCNN/utilities/shave.m",
    "content": "function I = shave(I, border)\nI = I(1+border(1):end-border(1), ...\n      1+border(2):end-border(2), :, :);\n"
  },
  {
    "path": "DnCNN/utilities/simplenn_matlab.m",
    "content": "function res = simplenn_matlab(net, input)\n\n%% If you did not install the matconvnet package, you can use this for testing.\n\nn = numel(net.layers);\n\nres = struct('x', cell(1,n+1));\nres(1).x = input;\n\nfor ilayer = 1 : n\n    l = net.layers{ilayer};\n    switch l.type\n        case 'conv'\n            for noutmaps = 1 : size(l.weights{1},4)\n                z = zeros(size(res(ilayer).x,1),size(res(ilayer).x,2),'single');\n                for ninmaps = 1 : size(res(ilayer).x,3)\n                    z = z + convn(res(ilayer).x(:,:,ninmaps), l.weights{1}(:,:,ninmaps,noutmaps),'same');\n                end\n                res(ilayer+1).x(:,:,noutmaps) = z + l.weights{2}(noutmaps);\n            end\n        case 'relu'\n            res(ilayer+1).x = max(res(ilayer).x,0);\n    end\n    res(ilayer).x = [];\nend\n\nend\n"
  },
  {
    "path": "DnCNN/utilities/vl_ffdnet_concise.m",
    "content": "function res = vl_ffdnet_concise(net, x)\n\nglobal sigmas;\nn = numel(net.layers);\nres = struct('x', cell(1,n+1));\nres(1).x = x ;\ncudnn = {'CuDNN'} ;\n%cudnn = {'NoCuDNN'} ;\n\nfor i=1:n\n    l = net.layers{i} ;\n    switch l.type\n        case 'conv'\n            res(i+1).x = vl_nnconv(res(i).x, l.weights{1}, l.weights{2}, ...\n                'pad', l.pad, ...\n                'stride', l.stride, ...\n                'dilate', l.dilate, ...\n                l.opts{:}, ...\n                cudnn{:}) ;\n            \n        case 'concat'\n            if size(sigmas,1)~=size(res(i).x,1)\n                sigmaMap   = bsxfun(@times,ones(size(res(i).x,1),size(res(i).x,2),1,size(res(i).x,4),'single'),permute(sigmas,[3 4 1 2]));\n                res(i+1).x = cat(3,res(i).x,sigmaMap);\n            else\n                res(i+1).x = cat(3,res(i).x,sigmaMap);\n            end\n\n        case 'SubP'\n            res(i+1).x = vl_nnSubP(res(i).x, [],'scale',l.scale);\n  \n        case 'relu'\n            res(i+1).x = max(res(i).x,0) ;\n    end\n        res(i).x = [] ;\nend\n\n\n"
  },
  {
    "path": "DnCNN/utilities/vl_ffdnet_matlab.m",
    "content": "function res = vl_ffdnet_matlab(net, input)\n\n%% If you did not install the matconvnet package, you can use this for testing.\n\nglobal sigmas;\nn = numel(net.layers);\nres = struct('x', cell(1,n+1));\nres(1).x = input;\n\nfor i = 1 : n\n    l = net.layers{i};\n    switch l.type\n        \n        case 'conv'\n            disp(['Processing ... ',int2str(i),'/',int2str(n)]);\n            for noutmaps = 1 : size(l.weights{1},4)\n                z = zeros(size(res(i).x,1),size(res(i).x,2),'single');\n                for ninmaps = 1 : size(res(i).x,3)\n                    z = z + convn(res(i).x(:,:,ninmaps), rot90(l.weights{1}(:,:,ninmaps,noutmaps),2),'same'); % 180 degree rotation for kernel\n                end\n                res(i+1).x(:,:,noutmaps) = z + l.weights{2}(noutmaps);\n            end\n            \n        case 'relu'\n            res(i+1).x = max(res(i).x,0);\n            \n        case 'concat'\n            if size(sigmas,1)~=size(res(i).x,1)\n                sigmaMap   = bsxfun(@times,ones(size(res(i).x,1),size(res(i).x,2),1,size(res(i).x,4),'single'),permute(sigmas,[3 4 1 2]));\n                res(i+1).x = cat(3,res(i).x,sigmaMap);\n            else\n                res(i+1).x = cat(3,res(i).x,sigmaMap);\n            end\n            \n        case 'SubP'\n            res(i+1).x = vl_nnSubP(res(i).x, [],'scale',l.scale);\n            \n    end\n    res(i).x = [];\nend\n\nend\n"
  },
  {
    "path": "DnCNN/utilities/vl_simplenn.m",
    "content": "function res = vl_simplenn(net, x, dzdy, res, varargin)\n%VL_SIMPLENN  Evaluate a SimpleNN network.\n%   RES = VL_SIMPLENN(NET, X) evaluates the convnet NET on data X.\n%   RES = VL_SIMPLENN(NET, X, DZDY) evaluates the convnent NET and its\n%   derivative on data X and output derivative DZDY (foward+bacwkard pass).\n%   RES = VL_SIMPLENN(NET, X, [], RES) evaluates the NET on X reusing the\n%   structure RES.\n%   RES = VL_SIMPLENN(NET, X, DZDY, RES) evaluates the NET on X and its\n%   derivatives reusing the structure RES.\n%\n%   This function process networks using the SimpleNN wrapper\n%   format. Such networks are 'simple' in the sense that they consist\n%   of a linear sequence of computational layers. You can use the\n%   `dagnn.DagNN` wrapper for more complex topologies, or write your\n%   own wrapper around MatConvNet computational blocks for even\n%   greater flexibility.\n\n% Copyright (C) 2014-15 Andrea Vedaldi.\n% All rights reserved.\n%\n% This file is part of the VLFeat library and is made available under\n% the terms of the BSD license (see the COPYING file).\nglobal sigmas;\nopts.conserveMemory = false ;\nopts.sync = false ;\nopts.mode = 'normal' ;\nopts.accumulate = false ;\nopts.cudnn = true ;\nopts.backPropDepth = +inf ;\nopts.skipForward = false ;\nopts.parameterServer = [] ;\nopts.holdOn = false ;\nopts = vl_argparse(opts, varargin);\n\nn = numel(net.layers) ;\nassert(opts.backPropDepth > 0, 'Invalid `backPropDepth` value (!>0)');\nbackPropLim = max(n - opts.backPropDepth + 1, 1);\n\nif (nargin <= 2) || isempty(dzdy)\n    doder = false ;\n    if opts.skipForward\n        error('simplenn:skipForwardNoBackwPass', ...\n            '`skipForward` valid only when backward pass is computed.');\n    end\nelse\n    doder = true ;\nend\n\nif opts.cudnn\n    cudnn = {'CuDNN'} ;\n    bnormCudnn = {'NoCuDNN'} ; % ours seems slighty faster\nelse\n    cudnn = {'NoCuDNN'} ;\n    bnormCudnn = {'NoCuDNN'} ;\nend\n\nswitch lower(opts.mode)\n    case 'normal'\n        testMode = false ;\n    case 'test'\n        testMode = true ;\n    otherwise\n        error('Unknown mode ''%s''.', opts. mode) ;\nend\n\ngpuMode = isa(x, 'gpuArray') ;\n\nif nargin <= 3 || isempty(res)\n    if opts.skipForward\n        error('simplenn:skipForwardEmptyRes', ...\n            'RES structure must be provided for `skipForward`.');\n    end\n    res = struct(...\n        'x', cell(1,n+1), ...\n        'dzdx', cell(1,n+1), ...\n        'dzdw', cell(1,n+1), ...\n        'aux', cell(1,n+1), ...\n        'stats', cell(1,n+1), ...\n        'time', num2cell(zeros(1,n+1)), ...\n        'backwardTime', num2cell(zeros(1,n+1))) ;\nend\n\nif ~opts.skipForward\n    res(1).x = x ;\nend\n\n% -------------------------------------------------------------------------\n%                                                              Forward pass\n% -------------------------------------------------------------------------\n\nfor i=1:n\n    if opts.skipForward, break; end;\n    l = net.layers{i} ;\n    %res(i).time = tic ;\n    switch l.type\n        case 'conv'\n            res(i+1).x = vl_nnconv(res(i).x, l.weights{1}, l.weights{2}, ...\n                'pad', l.pad, ...\n                'stride', l.stride, ...\n                'dilate', l.dilate, ...\n                l.opts{:}, ...\n                cudnn{:}) ;\n            \n        case 'concat'\n            if size(sigmas,1)~=size(res(i).x,1)\n                sigmaMap   = bsxfun(@times,ones(size(res(i).x,1),size(res(i).x,2),1,size(res(i).x,4)),permute(sigmas,[3 4 1 2])) ;\n                res(i+1).x = vl_nnconcat({res(i).x,sigmaMap}) ;\n            else\n                res(i+1).x = vl_nnconcat({res(i).x,sigmas}) ;\n            end\n            \n        case 'SubP'\n            res(i+1).x = vl_nnSubP(res(i).x, [],'scale',l.scale) ;\n        case 'relu'\n            leak = {} ; \n            res(i+1).x = vl_nnrelu(res(i).x,[],leak{:}) ;\n    end\n    \n    % optionally forget intermediate results\n    needsBProp = doder && i >= backPropLim;\n    forget = opts.conserveMemory && ~needsBProp ;\n    if i > 1\n        lp = net.layers{i-1} ;\n        % forget RELU input, even for BPROP\n        forget = forget && (~needsBProp || (strcmp(l.type, 'relu') && ~lp.precious)) ;\n        forget = forget && ~(strcmp(lp.type, 'loss') || strcmp(lp.type, 'softmaxloss')) ;\n        forget = forget && ~lp.precious ;\n    end\n    if forget\n        res(i).x = [] ;\n    end\n    \n    if gpuMode && opts.sync\n        wait(gpuDevice) ;\n    end\n    %res(i).time = toc(res(i).time) ;\nend\n"
  },
  {
    "path": "DnCNN/utilities/vl_simplenn_mergebnorm.m",
    "content": "function [net1] = vl_simplenn_mergebnorm(net)\n\n%% merge bnorm parameters into adjacent Conv layer\n\nfor i = 1:numel(net.layers)\n    if strcmp(net.layers{i}.type, 'conv')\n        net.layers{i}.weightDecay(2) = 1;\n    end\nend\n\nfor i = 1:numel(net.layers)\n    if strcmp(net.layers{i}.type, 'bnorm')\n        ws = net.layers{i}.weights{1};\n        bs = net.layers{i}.weights{2};\n        mu_sigmas = net.layers{i}.weights{3};\n        for j = 1:numel(ws)\n            net.layers{i-1}.weights{1}(:,:,:,j) =single(double(net.layers{i-1}.weights{1}(:,:,:,j))*double(ws(j))/(double(mu_sigmas(j,2))));\n            net.layers{i-1}.weights{2}(j) =single(double(bs(j)) - double(ws(j))*double(mu_sigmas(j,1))/(double(mu_sigmas(j,2))));\n        end\n        net.layers{i-1}.learningRate(2) = 1;\n    end\nend\n\nnet1 = net;\nnet1.layers = {};\nnet1 = rmfield(net1,'meta');\nfor i = 1:numel(net.layers)\n    if ~strcmp(net.layers{i}.type, 'bnorm')\n        net1.layers{end+1} = net.layers{i};\n    end\nend\n\nnet1.layers = net1.layers(1:end-1);\n\n\nend\n"
  },
  {
    "path": "README.md",
    "content": "### 1. 项目介绍\n\n#### 1.1 项目的背景\n\n该项目是为了研究基于深度卷积神经网络的图像去噪算法，是利用DnCNN模型，但是为了比较该算法的效果，另外实现了四种传统的图像去噪算法（均值滤波、中值滤波、非局部均值滤波NLM和三维块匹配滤波BM3D）作为对照组。\n\n#### 1.2 噪声强度和类型\n\n项目中实现五种算法对噪声强度为10,15,20...60,65,70的高斯白噪声进行处理。\n\n#### 1.3 评价指标\n\n图像去噪后，如何评估算法去噪效果的好坏呢？项目中采用峰值信噪比PSNR和结构相似性SSIM作为评价指标。一般来说，PSNR越大，去噪效果越好。SSIM取值为0到1，越接近1，表示效果越好。\n\n### 2. 数据集介绍\n\n该项目中只是对Set12数据集进行处理，也就是项目中的Set12目录下的12张图片。如果觉得数据量不够充分，可以自行添加其他数据集，在代码中修改一下数据集的目录即可。\n\n### 3. 代码介绍\n\n对于均值滤波、中值滤波、和NLM，MATLAB都已经实现了，所以我们直接调用MATLAB自带的函数就可以。\n\nBM3D和DnCNN的代码都是从别人那儿clone下来，做了一些小的修改。\n\n五种算法都是对Set12数据集进行去噪，去噪的结果并没有保存，只是在运行过程中能看到去噪前和去噪后的图像对比，感兴趣的朋友可以自己将图像保存下来观察。\n\n### 4. 代码运行\n\n五种算法分别在五个不同的目录中，所以你只需要进行对应的目录，运行代码即可。\n\n+ 均值滤波、中值滤波、NLM算法对应的目录分别为avefilter、medainfilter、nlm-image-denoising。每个目录下只有一个.m文件，所以只需要运行对应的文件即可。\n+ BM3D对应的目录是BM3D，运行该目录下的main.m程序即可。\n+ DnCNN对应的目录是DnCNN，运行该目录下的Demo_test_DnCNN.m程序即可，该算法目录中对应的还有好几个代码，都是原项目中有的，我没有动过，感兴趣的朋友可以自己看看。\n\n"
  },
  {
    "path": "avefilter/avefilt.m",
    "content": "clear,clc;\npauseTime = 1;\n\ndata_path = \"..\\Set12\";\next = [\"*.jpg\", \"*.png\", \"*.jpeg\"];\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(data_path,ext(i))));\nend\nnoise_leval = [10,15,20,25,30,35,40,45,50,55,60,65,70];\n\nfor ii = 1:length(noise_leval)\n    PSNRs = zeros(1, length(filePaths));\n    SSIMs = zeros(1, length(filePaths));\n    sigma = noise_leval(ii);\n    for jj = 1:length(filePaths)\n        % ԭͼ\n        originImage = im2double(imread(filePaths(jj).name));\n        % Ӹ˹\n        imageWithNoise = single(originImage + sigma/255*randn(size(originImage)));\n        [rows, cols] = size(originImage);\n        y = imageWithNoise;\n\n        % ֵ˲㷨\n        % ָģߴ\n        boxSize = 3;\n        template = zeros(boxSize);\n        for i = 1:rows-boxSize+1\n            for j = 1:cols-boxSize+1\n                % ȡģ\n                template = imageWithNoise(i:i+(boxSize-1),j:j+(boxSize-1));\n                % þֵģĵֵ\n                s = sum(template(:));\n                y(i+(boxSize-1)/2,j+(boxSize-1)/2) = s/boxSize^2;\n            end\n        end\n\n        % psnrssim\n%         se2 = (y - originImage).^2;\n%         MSE2 = sum(se2(:)) / (rows * cols);\n        PSNRs(jj) = psnr(im2uint8(originImage), im2uint8(y));\n        SSIMs(jj) = ssim(im2uint8(originImage), im2uint8(y));\n        imshow(cat(2,im2uint8(originImage),im2uint8(imageWithNoise),im2uint8(y)));\n        title(['sigma=',num2str(sigma),'  ',filePaths(jj).name,'  psnr=',num2str(PSNRs(jj),'%2.2f'),'dB','  ssim=',num2str(SSIMs(jj),'%2.4f')])\n        drawnow;\n        pause(pauseTime)\n    %     fprintf('˲MSE=%f\\n', MSE2);\n    end\n    disp([\"sigma:\",sigma,\"psnr:\",mean(PSNRs),\"ssim:\", mean(SSIMs)]);\nend\n\n"
  },
  {
    "path": "medianfilter/medianfilt.m",
    "content": "clear,clc;\npauseTime = 1;\n\ndata_path = \"..\\Set12\";\next = [\"*.jpg\", \"*.png\", \"*.jpeg\"];\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(data_path,ext(i))));\nend\nnoise_leval = [10,15,20,25,30,35,40,45,50,55,60,65,70];\n\nfor ii = 1:length(noise_leval)\n    PSNRs = zeros(1, length(filePaths));\n    SSIMs = zeros(1, length(filePaths));\n    sigma = noise_leval(ii);\n    for jj = 1:length(filePaths)\n        % ԭͼ\n        originImage = im2double(imread(filePaths(jj).name));\n        % Ӹ˹\n        imageWithNoise = single(originImage + sigma/255*randn(size(originImage)));\n        [rows, cols] = size(originImage);\n        y = imageWithNoise;\n\n        % ֵ˲㷨\n        % ָģߴ\n        boxSize = 3;\n        template = zeros(boxSize);\n        for i = 1:rows-boxSize+1\n            for j = 1:cols-boxSize+1\n                % ȡģ\n                template = imageWithNoise(i:i+(boxSize-1),j:j+(boxSize-1));\n                % ֵ滻ģĵֵ\n                m = median(template(:));\n                y(i+(boxSize-1)/2,j+(boxSize-1)/2) = m;\n            end\n        end\n\n        % psnrssim\n        PSNRs(jj) = psnr(im2uint8(originImage), im2uint8(y));\n        SSIMs(jj) = ssim(im2uint8(originImage), im2uint8(y));\n        imshow(cat(2,im2uint8(originImage),im2uint8(imageWithNoise),im2uint8(y)));\n        title(['sigma=',num2str(sigma),'  ',filePaths(jj).name,'  psnr=',num2str(PSNRs(jj),'%2.2f'),'dB','  ssim=',num2str(SSIMs(jj),'%2.4f')])\n        drawnow;\n        pause(pauseTime)\n    end\n    disp([\"sigma:\",sigma,\"psnr:\",mean(PSNRs),\"ssim:\", mean(SSIMs)]);\nend\n\n"
  },
  {
    "path": "nlm-image-denoising/NLmeansfilt.m",
    "content": "clear,clc;\npauseTime = 1;\n\ndata_path = \"..\\Set12\";\next = [\"*.jpg\", \"*.png\", \"*.jpeg\"];\nfilePaths   =  [];\nfor i = 1 : length(ext)\n    filePaths = cat(1,filePaths, dir(fullfile(data_path,ext(i))));\nend\nnoise_leval = [10,15,20,25,30,35,40,45,50,55,60,65,70];\n\nfor ii = 1:length(noise_leval)\n    PSNRs = zeros(1, length(filePaths));\n    SSIMs = zeros(1, length(filePaths));\n    sigma = noise_leval(ii);\n    for jj = 1:length(filePaths)\n        disp(['ڴͼƬ', filePaths(jj).name]);\n        % ԭͼ\n        originImage = im2double(imread(filePaths(jj).name));\n        % Ӹ˹\n        imageWithNoise = single(originImage + sigma/255*randn(size(originImage)));\n%         imageWithNoise = imnoise(originImage,'gaussian',0,(sigma/255)^2);\n        % NL-Means˲\n%         y = NLmeans(imageWithNoise,2,7,sigma/100);\n        y = imnlmfilt(imageWithNoise);\n        % psnrssim\n        PSNRs(jj) = psnr(im2uint8(originImage), im2uint8(y));\n        SSIMs(jj) = ssim(im2uint8(originImage), im2uint8(y));\n        imshow(cat(2,im2uint8(originImage),im2uint8(imageWithNoise),im2uint8(y)));\n        title(['sigma=',num2str(sigma),'  ',filePaths(jj).name,'  psnr=',num2str(PSNRs(jj),'%2.2f'),'dB','  ssim=',num2str(SSIMs(jj),'%2.4f')])\n        drawnow;\n        pause(pauseTime)\n    end\n    disp([\"sigma:\",sigma,\"psnr:\",mean(PSNRs),\"ssim:\", mean(SSIMs)]);\nend\n"
  }
]