@manual{lu2016libadmm,
author = {Lu, Canyi},
title = {A Library of {ADMM} for Sparse and Low-rank Optimization},
organization = {National University of Singapore},
month = {June},
year = {2016},
note = {\url{https://github.com/canyilu/LibADMM}}
}
@article{lu2018unified,
author = {Lu, Canyi and Feng, Jiashi and Yan, Shuicheng and Lin, Zhouchen},
title = {A Unified Alternating Direction Method of Multipliers by Majorization Minimization},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
publisher = {IEEE},
year = {2018},
volume = {40},
number = {3},
pages = {527—-541},
}
## 4. Version History
- Version 1.0 was released on June, 2016.
- Version 1.1 was released on June, 2018. Some key differences are below:
+ Add a new model about low-rank tensor recovery from Gaussian measurements based on tensor nuclear norm and the corresponding function lrtr_Gaussian_tnn.m
+ Update several functions to improve the efficiency, including prox_tnn.m, tprod.m, tran.m, tubalrank.m, and nmodeproduct.m
+ Update the three example functions: example_sparse_models.m, example_low_rank_matrix_models.m, and example_low_rank_tensor_models.m
+ Remove the test on image data and some unnecessary functions
## References
================================================
FILE: algorithms/comp_loss.m
================================================
function out = comp_loss(E,loss)
switch loss
case 'l1'
out = norm(E(:),1);
case 'l21'
out = 0;
for i = 1 : size(E,2)
out = out + norm(E(:,i));
end
case 'l2'
out = 0.5*norm(E,'fro')^2;
end
================================================
FILE: algorithms/elasticnet.m
================================================
function [X,obj,err,iter] = elasticnet(A,B,lambda,opts)
% Solve the elastic net minimization problem by ADMM
%
% min_X ||X||_1+lambda*||X||_F^2, s.t. AX=B
%
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% obj - objective function value
% err - residual ||AX-B||_F
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
Z = X;
Y1 = zeros(d,nb);
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% update X
X = prox_elasticnet(Z-Y2/mu,1/mu,lambda/mu);
% update Z
Z = invAtAI*(-(A'*Y1-Y2)/mu+AtB+X);
dY1 = A*Z-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = norm(X(:),1)+lambda*norm(X,'fro')^2;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = norm(X(:),1)+lambda*norm(X,'fro')^2;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/elasticnetR.m
================================================
function [X,E,obj,err,iter] = elasticnetR(A,B,lambda1,lambda2,opts)
% Solve the elastic net regularized minimization problem by ADMM
%
% min_{X,E} loss(E)+lambda1*||X||_1+lambda2*||X||_F^2, s.t. AX+E=B
% loss(E) = ||E||_1 or 0.5*||E||_F^2
%
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda1 - >=0, parameter
% lambda2 - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% E - d*nb matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1'; % default
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
E = zeros(d,nb);
Z = X;
Y1 = E;
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Zk = Z;
% first super block {X,E}
X = prox_elasticnet(Z-Y2/mu,lambda1/mu,lambda2/mu);
if strcmp(loss,'l1')
E = prox_l1(B-A*Z-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
E = mu*(B-A*Z-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {Z}
Z = invAtAI*(-A'*(Y1/mu+E)+AtB+Y2/mu+X);
dY1 = A*Z+E-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_loss(E,loss)+lambda1*norm(X(:),1)+lambda2*norm(X,'fro')^2;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_loss(E,loss)+lambda1*norm(X(:),1)+lambda2*norm(X,'fro')^2;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/fusedl1.m
================================================
function [x,obj,err,iter] = fusedl1(A,b,lambda,opts)
% Solve the fused Lasso (Fused L1) minimization problem by ADMM
%
% min_x ||x||_1 + lambda*\sum_{i=2}^p |x_i-x_{i-1}|,
% s.t. Ax=b
%
% ---------------------------------------------
% Input:
% A - d*n matrix
% b - d*1 vector
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% x - n*1 vector
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 20/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(A);
x = zeros(n,1);
z = x;
Y1 = zeros(d,1);
Y2 = x;
Atb = A'*b;
I = eye(n);
invAtAI = (A'*A+I)\I;
% parameters for "flsa" (from SLEP package)
tol2 = 1e-10; % the duality gap for termination
max_step = 50; % the maximal number of iterations
x0 = zeros(n-1,1); % the starting point
iter = 0;
for iter = 1 : max_iter
xk = x;
zk = z;
% update x.
% flsa solves min_x 1/2||x-v||_2^2+lambda1*||x||_1+lambda2*\sum_{i=2}^p |x_i-x_{i-1}|
x = flsa(z-Y2/mu,x0,1/mu,lambda/mu,n,max_step,tol2,1,6);
% update z
z = invAtAI*(-A'*Y1/mu+Atb+Y2/mu+x);
dY1 = A*z-b;
dY2 = x-z;
chgx = max(abs(xk-x));
chgz = max(abs(zk-z));
chg = max([chgx chgz max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_fusedl1(x,1,lambda);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_fusedl1(x,1,lambda);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function f = comp_fusedl1(x,lambda1,lambda2)
% compute f = lambda1*||x||_1 + lambda2*\sum_{i=2}^p |x_i-x_{i-1}|.
% x - p*1 vector
f = 0;
p = length(x);
for i = 2 : p
f = f+abs(x(i)-x(i-1));
end
f = lambda1*norm(x,1)+lambda2*f;
================================================
FILE: algorithms/fusedl1R.m
================================================
function [x,e,obj,err,iter] = fusedl1R(A,b,lambda1,lambda2,opts)
% Solve the fused Lasso regularized minimization problem by ADMM
%
% min_{x,e} loss(e) + lambda1*||x||_1 + lambda2*\sum_{i=2}^p |x_i-x_{i-1}|,
% loss(e) = ||e||_1 or 0.5*||e||_2^2
%
% ---------------------------------------------
% Input:
% A - d*n matrix
% b - d*1 vector
% lambda1 - >=0, parameter
% lambda2 - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(e) = ||e||_1
% 'l2': loss(E) = 0.5*||e||_2^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% x - n*1 vector
% e - d*1 vector
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 20/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1'; % default
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(A);
x = zeros(n,1);
e = zeros(d,1);
z = x;
Y1 = e;
Y2 = x;
Atb = A'*b;
I = eye(n);
invAtAI = (A'*A+I)\I;
% parameters for "flsa" (from SLEP package)
tol2 = 1e-10; % the duality gap for termination
max_step = 50; % the maximal number of iterations
x0 = zeros(n-1,1); % the starting point
iter = 0;
for iter = 1 : max_iter
xk = x;
ek = e;
zk = z;
% first super block {x,e}
% flsa solves min_x 1/2||x-v||_2^2+lambda1*||x||_1+lambda2*\sum_{i=2}^p |x_i-x_{i-1}|,
x = flsa(z-Y2/mu,x0,lambda1/mu,lambda2/mu,n,max_step,tol2,1,6);
if strcmp(loss,'l1')
e = prox_l1(b-A*z-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
e = mu*(b-A*z-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {Z}
z = invAtAI*(-A'*(Y1/mu+e)+Atb+Y2/mu+x);
dY1 = A*z+e-b;
dY2 = x-z;
chgx = max(abs(xk-x));
chge = max(abs(ek-e));
chgz = max(abs(zk-z));
chg = max([chgx chge chgz max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_loss(e,loss)+comp_fusedl1(x,lambda1,lambda2);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_loss(e,loss)+comp_fusedl1(x,lambda1,lambda2);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function f = comp_fusedl1(x,lambda1,lambda2)
% compute f = lambda1*||x||_1 + lambda2*\sum_{i=2}^p |x_i-x_{i-1}|.
% x - p*1 vector
f = 0;
p = length(x);
for i = 2 : p
f = f+abs(x(i)-x(i-1));
end
f = lambda1*norm(x,1)+lambda2*f;
================================================
FILE: algorithms/groupl1.m
================================================
function [X,obj,err,iter] = groupl1(A,B,G,opts)
% Solve the group l1-minimization problem by ADMM
%
% min_X \sum_{i=1}^n\sum_{g in G} ||(x_i)_g||_2, s.t. AX=B
%
% x_i is the i-th column of X
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% G - a cell indicates a partition of 1:na
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% obj - objective function value
% err - residual ||AX-B||_F
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
Z = X;
Y1 = zeros(d,nb);
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% update X
for i = 1 : nb
X(:,i) = prox_gl1(Z(:,i)-Y2(:,i)/mu,G,1/mu);
end
% update Z
Z = invAtAI*(-(A'*Y1-Y2)/mu+AtB+X);
dY1 = A*Z-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = compute_obj(X,G);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = compute_obj(X,G);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function obj = compute_obj(X,G)
obj = 0;
for i = 1 : size(X,2)
x = X(:,i);
for j = 1 : length(G)
obj = obj + norm(x(G{j}));
end
end
================================================
FILE: algorithms/groupl1R.m
================================================
function [X,E,obj,err,iter] = groupl1R(A,B,G,lambda,opts)
% Solve the group l1 norm regularized minimization problem by M-ADMM
%
% min_{X,E} loss(E)+lambda*\sum_{i=1}^n\sum_{g in G} ||(x_i)_g||_2, s.t. AX+E=B
% x_i is the i-th column of X
% loss(E) = ||E||_1 or 0.5*||E||_F^2
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% G - a cell indicates a partition of 1:na
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% E - d*nb matrix
% obj - objective function value
% err - residual ||AX+E-B||_F
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
E = zeros(d,nb);
Z = X;
Y1 = E;
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Zk = Z;
% first super block {X,E}
for i = 1 : nb
X(:,i) = prox_gl1(Z(:,i)-Y2(:,i)/mu,G,1/mu);
end
if strcmp(loss,'l1')
E = prox_l1(B-A*Z-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
E = mu*(B-A*Z-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {Z}
Z = invAtAI*(-A'*(Y1/mu+E)+AtB+Y2/mu+X);
dY1 = A*Z+E-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_loss(E,loss)+lambda*compute_groupl1(X,G);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_loss(E,loss)+lambda*compute_groupl1(X,G);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function obj = compute_groupl1(X,G)
obj = 0;
for i = 1 : size(X,2)
x = X(:,i);
for j = 1 : length(G)
obj = obj + norm(x(G{j}));
end
end
================================================
FILE: algorithms/igc.m
================================================
function [L,S,obj,err,iter] = igc(A,C,lambda,opts)
% Reference: Chen, Yudong, Sujay Sanghavi, and Huan Xu. Improved graph clustering.
% IEEE Transactions on Information Theory 60.10 (2014): 6440-6455.
%
% min_{L,S} ||L||_*+lambda*||C \cdot S||_1, s.t. A=L+S, 0<=L<=1.
%
% ---------------------------------------------
% Input:
% A - d*n matrix
% C - d*n matrix
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% L - d*n matrix
% S - d*n matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 19/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
C = abs(C);
[d,n] = size(A);
L = zeros(d,n);
S = L;
Z = L;
Y1 = L;
Y2 = L;
iter = 0;
for iter = 1 : max_iter
Lk = L;
Sk = S;
Zk = Z;
% first super block {L,S}
[L,nuclearnormL] = prox_nuclear(Z-Y2/mu,1/mu);
S = prox_l1(-Z+A-Y1/mu,C*(lambda/mu));
% second super block {Z}
Z = project_box((-S+A+L+(Y2-Y1)/mu)/2,0,1);
dY1 = Z+S-A;
dY2 = L-Z;
chgL = max(max(abs(Lk-L)));
chgS = max(max(abs(Sk-S)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgL chgS chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormL+lambda*sum(sum(C.*abs(S)));
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormL+lambda*sum(sum(C.*abs(S)));
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/ksupport.m
================================================
function [X,err,iter] = ksupport(A,B,k,opts)
% Solve the k support norm minimization problem by ADMM
%
% min_X 0.5*||vec(X)||_ksp^2, s.t. AX=B
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% k - >0, integer, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% err - residual
% iter - number of iterations
%
% version 1.0 - 27/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
Z = X;
Y1 = zeros(d,nb);
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% update X
temp = Z-Y2/mu;
temp = prox_ksupport(temp(:),k,1/mu);
X = reshape(temp,na,nb);
% update Z
Z = invAtAI*(-A'*Y1/mu+AtB+Y2/mu+X);
dY1 = A*Z-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/ksupportR.m
================================================
function [X,E,err,iter] = ksupportR(A,B,lambda,k,opts)
% Solve the l1 norm regularized minimization problem by M-ADMM
%
% min_{X,E} loss(E)+0.5*||vec(X)||_ksp^2, s.t. AX+E=B
% loss(E) = ||E||_1 or 0.5*||E||_F^2
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda - >=0, parameter
% k - >0, integer, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% E - d*nb matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 27/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
E = zeros(d,nb);
Z = X;
Y1 = E;
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Zk = Z;
% first super block {X,E}
temp = Z-Y2/mu;
temp = prox_ksupport(temp(:),k,lambda/mu);
X = reshape(temp,na,nb);
if strcmp(loss,'l1')
E = prox_l1(B-A*Z-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
E = mu*(B-A*Z-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {Z}
Z = invAtAI*(-A'*(Y1/mu+E)+AtB+Y2/mu+X);
dY1 = A*Z+E-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/l1.m
================================================
function [X,obj,err,iter] = l1(A,B,opts)
% Solve the l1-minimization problem by ADMM
%
% min_X ||X||_1, s.t. AX=B
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% obj - objective function value
% err - residual ||AX-B||_F
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
Z = X;
Y1 = zeros(d,nb);
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% update X
X = prox_l1(Z-Y2/mu,1/mu);
% update Z
Z = invAtAI*(-A'*Y1/mu+AtB+Y2/mu+X);
dY1 = A*Z-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = norm(X(:),1);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = norm(X(:),1);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/l1R.m
================================================
function [X,E,obj,err,iter] = l1R(A,B,lambda,opts)
% Solve the l1 norm regularized minimization problem by M-ADMM
%
% min_{X,E} loss(E)+lambda*||X||_1, s.t. AX+E=B
% loss(E) = ||E||_1 or 0.5*||E||_F^2
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - na*nb matrix
% E - d*nb matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(na,nb);
E = zeros(d,nb);
Z = X;
Y1 = E;
Y2 = X;
AtB = A'*B;
I = eye(na);
invAtAI = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Zk = Z;
% first super block {X,E}
X = prox_l1(Z-Y2/mu,lambda/mu);
if strcmp(loss,'l1')
E = prox_l1(B-A*Z-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
E = mu*(B-A*Z-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {Z}
Z = invAtAI*(-A'*(Y1/mu+E)+AtB+Y2/mu+X);
dY1 = A*Z+E-B;
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_loss(E,loss)+lambda*norm(X(:),1);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_loss(E,loss)+lambda*norm(X(:),1);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/latlrr.m
================================================
function [Z,L,obj,err,iter] = latlrr(X,lambda,opts)
% Solve the Latent Low-Rank Representation by M-ADMM
%
% min_{Z,L,E} ||Z||_*+||L||_*+lambda*loss(E),
% s.t., XZ+LX-X=E.
% loss(E) = ||E||_1 or 0.5*||E||_F^2 or ||E||_{2,1}
% ---------------------------------------------
% Input:
% X - d*n matrix
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% 'l21': loss(E) = ||E||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% Z - n*n matrix
% L - d*d matrix
% E - d*n matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 19/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
eta1 = 1.02*2*norm(X,2)^2; % for Z
eta2 = eta1; % for L
eta3 = 1.02*2; % for E
[d,n] = size(X);
E = zeros(d,n);
Z = zeros(n,n);
L = zeros(d,d);
Y = E;
XtX = X'*X;
XXt = X*X';
iter = 0;
for iter = 1 : max_iter
Lk = L;
Ek = E;
Zk = Z;
% first super block {Z}
[Z,nuclearnormZ] = prox_nuclear(Zk-(X'*(Y/mu+L*X-X-E)+XtX*Z)/eta1,1/(mu*eta1));
% second super block {L,E}
temp = Lk-((Y/mu+X*Z-Ek)*X'+Lk*XXt-XXt)/eta2;
[L,nuclearnormL] = prox_nuclear(temp,1/(mu*eta2));
if strcmp(loss,'l1')
E = prox_l1(Ek+(Y/mu+X*Z+Lk*X-X-Ek)/eta3,lambda/(mu*eta3));
elseif strcmp(loss,'l21')
E = prox_l21(Ek+(Y/mu+X*Z+Lk*X-X-Ek)/eta3,lambda/(mu*eta3));
elseif strcmp(loss,'l2')
E = (Y+mu*(X*Z+Lk*X-X+(eta3-1)*Ek))/(lambda+mu*eta3);
else
error('not supported loss function');
end
dY = X*Z+L*X-X-E;
chgL = max(max(abs(Lk-L)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgL chgE chgZ max(abs(dY(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormZ+nuclearnormL+lambda*comp_loss(E,loss);
err = norm(dY,'fro')^2;
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormZ+nuclearnormZ+lambda*comp_loss(E,loss);
err = norm(dY,'fro')^2;
function out = comp_loss(E,loss)
switch loss
case 'l1'
out = norm(E(:),1);
case 'l21'
out = 0;
for i = 1 : size(E,2)
out = out + norm(E(:,i));
end
case 'l2'
out = 0.5*norm(E,'fro')^2;
end
================================================
FILE: algorithms/lrmc.m
================================================
function [X,obj,err,iter] = lrmc(MM,omega,opts)
% Solve the Low-Rank Matrix Completion (LRMC) problem by ADMM
%
% min_X ||X||_*, s.t. P_Omega(X) = P_Omega(M)
%
% ---------------------------------------------
% Input:
% MM - d*n matrix
% omega - index of the observed entries
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d*n matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 22/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(MM);
M = zeros(d,n);
M(omega) = MM(omega);
X = zeros(d,n);
E = X;
Y = X;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
% update X
[X,nuclearnormX] = prox_nuclear(-(E-M+Y/mu),1/mu);
% update E
E = -(X-M+Y/mu);
E(omega) = 0;
dY = X+E-M;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chg = max([chgX chgE max(abs(dY(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormX;
err = norm(dY,'fro');
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormX;
err = norm(dY,'fro');
================================================
FILE: algorithms/lrmcR.m
================================================
function [X,E,obj,err,iter] = lrmcR(M,omega,lambda,opts)
% Solve the Noisy Low-Rank Matrix Completion (LRMC) problem by ADMM
%
% min_{X,E} ||X||_*+lambda*loss(E), s.t. P_Omega(X) + E = M.
% loss(E) = ||E||_1 or 0.5*||E||_F^2 or ||E||_{2,1}
%
% ---------------------------------------------
% Input:
% M - d*n matrix
% omega - index of the observed entries
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% 'l21': loss(E) = ||E||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d*n matrix
% E - d*n matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 23/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(M);
X = zeros(d,n);
Z = X;
E = X;
Y1 = X;
Y2 = X;
omegac = setdiff(1:d*n,omega);
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
Ek = E;
% first super block {X,E}
[X,nuclearnormX] = prox_nuclear(Z-Y2/mu,1/mu);
temp = M-Y1/mu;
temp(omega) = temp(omega)-Z(omega);
if strcmp(loss,'l1')
E = prox_l1(temp,lambda/mu);
elseif strcmp(loss,'l21')
E = prox_l21(temp,lambda/mu);
elseif strcmp(loss,'l2')
E = temp*(mu/(lambda+mu));
else
error('not supported loss function');
end
% second super block {Z}
Z(omega) = (-E(omega)+M(omega)-(Y1(omega)-Y2(omega))/mu+X(omega))/2;
Z(omegac) = X(omegac)+Y2(omegac)/mu;
dY1 = E-M;
dY1(omega) = dY1(omega)+Z(omega);
dY2 = X-Z;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormX+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormX+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/lrr.m
================================================
function [X,E,obj,err,iter] = lrr(A,B,lambda,opts)
% Solve the Low-Rank Representation minimization problem by M-ADMM
%
% min_{X,E} ||X||_*+lambda*loss(E), s.t. A=BX+E
% loss(E) = ||E||_1 or 0.5*||E||_F^2 or ||E||_{2,1}
%
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1': loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% 'l21' (default): loss(E) = ||E||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - nb*na matrix
% E - d*na matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l21';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(nb,na);
E = zeros(d,na);
J = X;
Y1 = E;
Y2 = X;
BtB = B'*B;
BtA = B'*A;
I = eye(nb);
invBtBI = (BtB+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Jk = J;
% first super block {J,E}
[J,nuclearnormJ] = prox_nuclear(X+Y2/mu,1/mu);
if strcmp(loss,'l1')
E = prox_l1(A-B*X+Y1/mu,lambda/mu);
elseif strcmp(loss,'l21')
E = prox_l21(A-B*X+Y1/mu,lambda/mu);
elseif strcmp(loss,'l2')
E = mu*(A-B*X+Y1/mu)/(lambda+mu);
else
error('not supported loss function');
end
% second super block {X}
X = invBtBI*(B'*(Y1/mu-E)+BtA-Y2/mu+J);
dY1 = A-B*X-E;
dY2 = X-J;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgJ = max(max(abs(Jk-J)));
chg = max([chgX chgE chgJ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormJ+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormJ+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function out = comp_loss(E,loss)
switch loss
case 'l1'
out = norm(E(:),1);
case 'l21'
out = 0;
for i = 1 : size(E,2)
out = out + norm(E(:,i));
end
case 'l2'
out = 0.5*norm(E,'fro')^2;
end
================================================
FILE: algorithms/lrsr.m
================================================
function [X,E,obj,err,iter] = lrsr(A,B,lambda1,lambda2,opts)
% Solve the Low-Rank and Sparse Representation (LRSR) minimization problem by M-ADMM
%
% min_{X,E} ||X||_*+lambda1*||X||_1+lambda2*loss(E), s.t. A=BX+E
% loss(E) = ||E||_1 or 0.5*||E||_F^2 or ||E||_{2,1}
% ---------------------------------------------
% Input:
% A - d*na matrix
% B - d*nb matrix
% lambda1 - >0, parameter
% lambda2 - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1': loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% 'l21' (default): loss(E) = ||E||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - nb*na matrix
% E - d*na matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l21';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,na] = size(A);
[~,nb] = size(B);
X = zeros(nb,na);
E = zeros(d,na);
Z = X;
J = X;
Y1 = E;
Y2 = X;
Y3 = X;
BtB = B'*B;
BtA = B'*A;
I = eye(nb);
invBtBI = (BtB+2*I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
Ek = E;
Jk = J;
% first super block {Z,J,E}
[Z,nuclearnormZ] = prox_nuclear(X+Y2/mu,1/mu);
J = prox_l1(X+Y3/mu,lambda1/mu);
if strcmp(loss,'l1')
E = prox_l1(A-B*X+Y1/mu,lambda2/mu);
elseif strcmp(loss,'l21')
E = prox_l21(A-B*X+Y1/mu,lambda2/mu);
elseif strcmp(loss,'l2')
E = mu*(A-B*X+Y1/mu)/(lambda2+mu);
else
error('not supported loss function');
end
% second super block {X}
X = invBtBI*(B'*(Y1/mu-E)+BtA-(Y2+Y3)/mu+Z+J);
dY1 = A-B*X-E;
dY2 = X-Z;
dY3 = X-J;
chgX = max(max(abs(Xk-X)));
chgE = max(max(abs(Ek-E)));
chgZ = max(max(abs(Zk-Z)));
chgJ = max(max(abs(Jk-J)));
chg = max([chgX chgE chgZ chgJ max(abs(dY1(:))) max(abs(dY2(:))) max(abs(dY3(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormZ+lambda1*norm(J(:),1)+lambda2*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2+norm(dY3,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormZ+lambda1*norm(J(:),1)+lambda2*comp_loss(E,loss);
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2+norm(dY3,'fro')^2);
function out = comp_loss(E,normtype)
switch normtype
case 'l1'
out = norm(E(:),1);
case 'l21'
out = 0;
for i = 1 : size(E,2)
out = out + norm(E(:,i));
end
case 'l2'
out = 0.5*norm(E,'fro')^2;
end
================================================
FILE: algorithms/lrtcR_snn.m
================================================
function [X,err,iter] = lrtcR_snn(M,omega,alpha,opts)
% Solve the Noisy Low-Rank Tensor Completion (LRTC) based on Sum of Nuclear Norm (SNN) problem by M-ADMM
%
% min_{X,E} \sum_i \alpha_i*||X_{i(i)}||_* + loss(E),
% s.t. P_Omega(X) + E = M.
% loss(E) = ||E||_1 or 0.5*||E||_F^2
%
% ---------------------------------------------
% Input:
% M - d1*d2*...dk tensor
% omega - index of the observed entries
% alpha - k*1 vector, parameters
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d1*d2*...*dk tensor
% err - residual
% iter - number of iterations
%
% version 1.0 - 24/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(M);
k = length(dim);
omegac = setdiff(1:prod(dim),omega);
X = zeros(dim);
Y = cell(k,1);
Z = Y;
E = X;
Y2 = E;
for i = 1 : k
Y{i} = X;
Z{i} = X;
end
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
Zk = Z;
% first super block {Z_i,E}
sumtemp = zeros(dim);
for i = 1 : k
Z{i} = Fold(prox_nuclear(Unfold(X+Y{i}/mu,dim,i), alpha(i)/mu),dim,i);
sumtemp = sumtemp + Z{i} - Y{i}/mu;
end
if strcmp(loss,'l1')
E = prox_l1(-X+M-Y2/mu,1/mu);
elseif strcmp(loss,'l2')
E = (-X+M-Y2/mu)*(mu/(1+mu));
else
error('not supported loss function');
end
% second super block {X}
X(omega) = (sumtemp(omega)-Y2(omega)/mu-E(omega)+M(omega))/(k+1);
X(omegac) = sumtemp(omegac)/k;
chg = max([max(abs(Xk(:)-X(:))), max(abs(Ek(:)-E(:))) ]);
err = 0;
for i = 1 : k
dY = X-Z{i};
err = err+norm(dY(:))^2;
Y{i} = Y{i}+mu*dY;
chg = max([chg,max(abs(dY(:))), max(abs((Zk{i}(:)-Z{i}(:))))]);
end
dY = E-M;
dY(omega) = dY(omega)+X(omega);
chg = max(chg,max(abs(dY(:))));
Y2 = Y2 + mu*dY;
err = sqrt(err+norm(dY(:))^2);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', err=' num2str(chg)]);
end
end
if chg < tol
break;
end
mu = min(rho*mu,max_mu);
end
================================================
FILE: algorithms/lrtcR_tnn.m
================================================
function [X,E,obj,err,iter] = lrtcR_tnn(M,omega,lambda,opts)
% Solve the Noisy Low-Rank Tensor Completion (LRTC) problem by ADMM
%
% min_{X,E} ||X||_*+lambda*loss(E), s.t. P_Omega(X) + E = M.
% loss(E) = ||E||_1 or 0.5*||E||_F^2
%
% ---------------------------------------------
% Input:
% M - d1*d2*d3 tensor
% omega - index of the observed entries
% lambda - >=0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d1*d2*d3 tensor
% E - d1*d2*d3 tensor
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 27/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(M);
X = zeros(dim);
Z = X;
E = X;
Y1 = X;
Y2 = X;
omegac = setdiff(1:prod(dim),omega);
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
Ek = E;
% first super block {X,E}
[X,tnnX] = prox_tnn(Z-Y2/mu,1/mu);
temp = M-Y1/mu;
temp(omega) = temp(omega)-Z(omega);
if strcmp(loss,'l1')
E = prox_l1(temp,lambda/mu);
elseif strcmp(loss,'l2')
E = temp*(mu/(lambda+mu));
else
error('not supported loss function');
end
% second super block {Z}
Z(omega) = (-E(omega)+M(omega)-(Y1(omega)-Y2(omega))/mu+X(omega))/2;
Z(omegac) = X(omegac)+Y2(omegac)/mu;
dY1 = E-M;
dY1(omega) = dY1(omega)+Z(omega);
dY2 = X-Z;
chgX = max(abs(Xk(:)-X(:)));
chgE = max(abs(Ek(:)-E(:)));
chgZ = max(abs(Zk(:)-Z(:)));
chg = max([chgX chgE chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = tnnX+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1(:))^2+norm(dY2(:))^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = tnnX+lambda*comp_loss(E,loss);
err = sqrt(norm(dY1(:))^2+norm(dY2(:))^2);
================================================
FILE: algorithms/lrtc_snn.m
================================================
function [X,err,iter] = lrtc_snn(M,omega,alpha,opts)
% Solve the Low-Rank Tensor Completion (LRTC) based on Sum of Nuclear Norm (SNN) problem by M-ADMM
%
% min_X \sum_i \alpha_i*||X_{i(i)}||_*, s.t. P_Omega(X) = P_Omega(M)
%
% ---------------------------------------------
% Input:
% M - d1*d2*...*dk tensor
% omega - index of the observed entries
% alpha - k*1 vector, parameters
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d1*d2*...*dk tensor
% err - residual
% iter - number of iterations
%
% version 1.0 - 24/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(M);
k = length(dim);
omegac = setdiff(1:prod(dim),omega);
X = zeros(dim);
X(omega) = M(omega);
Y = cell(k,1);
Z = Y;
for i = 1 : k
Y{i} = X;
Z{i} = X;
end
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% first super block {Z_i}
sumtemp = zeros(1,length(omegac));
for i = 1 : k
Z{i} = Fold(prox_nuclear(Unfold(X+Y{i}/mu,dim,i), alpha(i)/mu),dim,i);
sumtemp = sumtemp + Z{i}(omegac) - Y{i}(omegac)/mu;
end
% second super block {X}
X(omegac) = sumtemp/k;
chg = max(abs(Xk(:)-X(:)));
err = 0;
for i = 1 : k
dY = X-Z{i};
err = err+norm(dY(:))^2;
Y{i} = Y{i}+mu*dY;
chg = max([chg, max(abs(dY(:))), max(abs(Zk{i}(:)-Z{i}(:)))]);
end
err = sqrt(err);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', err=' num2str(err)]);
end
end
if chg < tol
break;
end
mu = min(rho*mu,max_mu);
end
================================================
FILE: algorithms/lrtc_tnn.m
================================================
function [X,obj,err,iter] = lrtc_tnn(M,omega,opts)
% Solve the Low-Rank Tensor Completion (LRTC) based on Tensor Nuclear Norm (TNN) problem by M-ADMM
%
% min_X ||X||_*, s.t. P_Omega(X) = P_Omega(M)
%
% ---------------------------------------------
% Input:
% M - d1*d2*d3 tensor
% omega - index of the observed entries
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - d1*d2*d3 tensor
% err - residual
% obj - objective function value
% iter - number of iterations
%
% version 1.0 - 25/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(M);
k = length(dim);
omegac = setdiff(1:prod(dim),omega);
X = zeros(dim);
X(omega) = M(omega);
E = zeros(dim);
Y = E;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Ek = E;
% update X
[X,tnnX] = prox_tnn(-E+M+Y/mu,1/mu);
% update E
E = M-X+Y/mu;
E(omega) = 0;
dY = M-X-E;
chgX = max(abs(Xk(:)-X(:)));
chgE = max(abs(Ek(:)-E(:)));
chg = max([chgX chgE max(abs(dY(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = tnnX;
err = norm(dY(:));
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = tnnX;
err = norm(dY(:));
================================================
FILE: algorithms/lrtr_Gaussian_tnn.m
================================================
function [X,obj,err,iter] = lrtr_Gaussian_tnn(A,b,Xsize,opts)
% Low tubal rank tensor recovery from Gaussian measurements by tensor
% nuclear norm minimization
%
% min_X ||X||_*, s.t. A*vec(X) = b
%
% ---------------------------------------------
% Input:
% A - m*n matrix
% b - m*1 vector
% Xsize - Structure value in Matlab. The fields
% (Xsize.n1,Xsize.n2,Xsize.n3) give the size of X.
%
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% X - n1*n2*n3 tensor (n=n1*n2*n3)
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 09/10/2017
%
% Written by Canyi Lu (canyilu@gmail.com)
%
% References:
% Canyi Lu, Jiashi Feng, Zhouchen Lin, Shuicheng Yan
% Exact Low Tubal Rank Tensor Recovery from Gaussian Measurements
% International Joint Conference on Artificial Intelligence (IJCAI). 2018
tol = 1e-8;
max_iter = 1000;
rho = 1.1;
mu = 1e-6;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
n1 = Xsize.n1;
n2 = Xsize.n2;
n3 = Xsize.n3;
X = zeros(n1,n2,n3);
Z = X;
m = length(b);
Y1 = zeros(m,1);
Y2 = X;
I = eye(n1*n2*n3);
invA = (A'*A+I)\I;
iter = 0;
for iter = 1 : max_iter
Xk = X;
Zk = Z;
% update X
[X,Xtnn] = prox_tnn(Z-Y2/mu,1/mu);
% update Z
vecZ = invA*(A'*(-Y1/mu+b)+Y2(:)/mu+X(:));
Z = reshape(vecZ,n1,n2,n3);
dY1 = A*vecZ-b;
dY2 = X-Z;
chgX = max(abs(Xk(:)-X(:)));
chgZ = max(abs(Zk(:)-Z(:)));
chg = max([chgX chgZ max(abs(dY1)) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = Xtnn;
err = norm(dY1)^2+norm(dY2(:))^2;
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = Xtnn;
err = norm(dY1)^2+norm(dY2(:))^2;
================================================
FILE: algorithms/mlap.m
================================================
function [Z,E,obj,err,iter] = mlap(X,lambda,alpha,opts)
% Solve the Multi-task Low-rank Affinity Pursuit (MLAP) minimization problem by M-ADMM
%
% Reference: Cheng, Bin, Guangcan Liu, Jingdong Wang, Zhongyang Huang, and Shuicheng Yan.
% Multi-task low-rank affinity pursuit for image segmentation. ICCV, 2011.
%
% min_{Z_i,E_i} \sum_{i=1}^K (||Z_i||_*+lambda*loss(E_i))+alpha*||Z||_{2,1},
% s.t. X_i=X_i*Z_i+E_i, i=1,...,K.
% loss(E) = ||E||_1 or 0.5*||E||_F^2 or ||E||_{2,1}
%
% ---------------------------------------------
% Input:
% X - d*n*K tensor
% lambda - >0, parameter
% alpha - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1': loss(E) = ||E||_1
% 'l2': loss(E) = 0.5*||E||_F^2
% 'l21' (default): loss(E) = ||E||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% Z - n*n*K tensor
% E - d*n*K tensor
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l21';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n,K] = size(X);
Z = zeros(n,n,K);
E = zeros(d,n,K);
J = Z;
S = Z;
Y = E;
W = Z;
V = Z;
dY = Y;
XmXS = E;
XtX = zeros(n,n,K);
invXtXI = zeros(n,n,K);
I = eye(n);
for i = 1 : K
XtX(:,:,i) = X(:,:,i)'*X(:,:,i);
invXtXI(:,:,i) = (XtX(:,:,i)+I)\I;
end
nuclearnormJ = zeros(K,1);
iter = 0;
for iter = 1 : max_iter
Zk = Z;
Ek = E;
Jk = J;
Sk = S;
% first super block {J,S}
for i = 1 : K
[J(:,:,i),nuclearnormJ(i)] = prox_nuclear(Z(:,:,i)+W(:,:,i)/mu,1/mu);
S(:,:,i) = invXtXI(:,:,i)*(XtX(:,:,i)-X(:,:,i)'*(E(:,:,i)-Y(:,:,i)/mu)+Z(:,:,i)+(V(:,:,i)-W(:,:,i))/mu);
end
% second super block {Z,E}
Z = prox_tensor_l21((J+S-(W+V)/mu)/2,alpha/(2*mu));
for i = 1 : K
XmXS(:,:,i) = X(:,:,i)-X(:,:,i)*S(:,:,i);
end
if strcmp(loss,'l1')
for i = 1 : K
E(:,:,i) = prox_l1(XmXS(:,:,i)+Y(:,:,i)/mu,lambda/mu);
end
elseif strcmp(loss,'l21')
for i = 1 : K
E(:,:,i) = prox_l21(XmXS(:,:,i)+Y(:,:,i)/mu,lambda/mu);
end
elseif strcmp(loss,'l2')
for i = 1 : K
E = (XmXS(:,:,i)+Y(:,:,i)/mu) / (lambda/mu+1);
end
else
error('not supported loss function');
end
dY = XmXS-E;
dW = Z-J;
dV = Z-S;
chgZ = max(abs(Zk(:)-Z(:)));
chgE = max(abs(Ek(:)-E(:)));
chgJ = max(abs(Jk(:)-J(:)));
chgS = max(abs(Sk(:)-S(:)));
chg = max([chgZ chgE chgJ chgS max(abs(dY(:))) max(abs(dW(:))) max(abs(dV(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = sum(nuclearnormJ)+lambda*comp_loss(E,loss)+alpha*comp_loss(Z,'l21');
err = sqrt(norm(dY(:))^2+norm(dW(:))^2+norm(dV(:))^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
W = W + mu*dW;
V = V + mu*dV;
mu = min(rho*mu,max_mu);
end
obj = sum(nuclearnormJ)+lambda*comp_loss(E,loss)+alpha*comp_loss(Z,'l21');
err = sqrt(norm(dY(:))^2+norm(dW(:))^2+norm(dV(:))^2);
function X = prox_tensor_l21(B,lambda)
% proximal operator of tensor l21-norm, i.e., the sum of the l2 norm of all
% tubes of a tensor.
%
% X - n1*n2*n3 tensor
% B - n1*n2*n3 tensor
%
% min_X lambda*\sum_{i=1}^n1\sum_{j=1}^n2 ||X(i,j,:)||_2 + 0.5*||X-B||_F^2
[n1,n2,n3] = size(B);
X = zeros(n1,n2,n3);
for i = 1 : n1
for j = 1 : n2
v = B(i,j,:);
nxi = norm(v(:));
if nxi > lambda
X(i,j,:) = (1-lambda/nxi)*B(i,j,:);
end
end
end
================================================
FILE: algorithms/rmsc.m
================================================
function [L,S,obj,err,iter] = rmsc(X,lambda,opts)
% Solve the Robust Multi-view Spectral Clustering (RMSC) problem by M-ADMM
%
% min_{L,S_i} ||L||_*+lambda*\sum_i ||S_i||_1,
% s.t. X_i=L+S_i, i=1,...,m, L>=0, L1=1.
% ---------------------------------------------
% Input:
% X - d*n*m tensor
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% L - d*n matrix
% S - d*n*m tensor
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 19/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n,m] = size(X);
L = zeros(d,n);
S = zeros(d,n,m);
Z = L;
Y = S;
dY = S;
Y2 = L;
iter = 0;
for iter = 1 : max_iter
Lk = L;
Sk = S;
Zk = Z;
% first super block {Z,S_i}
[Z,nuclearnormZ] = prox_nuclear(L+Y2/mu,1/mu);
for i = 1 : m
S(:,:,i) = prox_l1(-L+X(:,:,i)-Y(:,:,i)/mu,lambda/mu);
end
% second super block {L}
temp = (sum(X-S-Y/mu,3)+Z-Y2/mu)/(m+1);
L = project_simplex(temp);
for i = 1 : m
dY(:,:,i) = L+S(:,:,i)-X(:,:,i);
end
dY2 = L-Z;
chgL = max(abs(Lk(:)-L(:)));
chgZ = max(abs(Zk(:)-Z(:)));
chgS = max(abs(Sk(:)-S(:)));
chg = max([chgL chgS chgZ max(abs(dY(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormZ+lambda*norm(S(:),1);
err = sqrt(norm(dY(:))^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormZ+lambda*norm(S(:),1);
err = sqrt(norm(dY(:))^2+norm(dY2,'fro')^2);
================================================
FILE: algorithms/rpca.m
================================================
function [L,S,obj,err,iter] = rpca(X,lambda,opts)
% Solve the Robust Principal Component Analysis minimization problem by M-ADMM
%
% min_{L,S} ||L||_*+lambda*loss(S), s.t. X=L+S
% loss(S) = ||S||_1 or ||S||_{2,1}
%
% ---------------------------------------------
% Input:
% X - d*n matrix
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(S) = ||S||_1
% 'l21': loss(S) = ||S||_{2,1}
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% L - d*n matrix
% S - d*n matrix
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 19/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(X);
L = zeros(d,n);
S = L;
Y = L;
iter = 0;
for iter = 1 : max_iter
Lk = L;
Sk = S;
% update L
[L,nuclearnormL] = prox_nuclear(-S+X-Y/mu,1/mu);
% update S
if strcmp(loss,'l1')
S = prox_l1(-L+X-Y/mu,lambda/mu);
elseif strcmp(loss,'l21')
S = prox_l21(-L+X-Y/mu,lambda/mu);
else
error('not supported loss function');
end
dY = L+S-X;
chgL = max(max(abs(Lk-L)));
chgS = max(max(abs(Sk-S)));
chg = max([chgL chgS max(abs(dY(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnormL+lambda*comp_loss(S,loss);
err = norm(dY,'fro');
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = nuclearnormL+lambda*comp_loss(S,loss);
err = norm(dY,'fro');
function out = comp_loss(E,loss)
switch loss
case 'l1'
out = norm(E(:),1);
case 'l21'
out = 0;
for i = 1 : size(E,2)
out = out + norm(E(:,i));
end
end
================================================
FILE: algorithms/sparsesc.m
================================================
function [P,obj,err,iter] = sparsesc(L,lambda,k,opts)
% Solve the Sparse Spectral Clustering problem
%
% min_P
+lambda*||P||_1, s.t. 0\preceq P \preceq I, Tr(P)=k
%
% Reference: Canyi Lu, Shuicheng Yan, Zhouchen Lin, Convex Sparse Spectral
% Clustering: Single-view to Multi-view, TIP, 2016
%
% ---------------------------------------------
% Input:
% L - n*n normalized Laplacian matrix matrix
% k - integer
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% P - n*n matrix
% obj - objective function value
% err - residual ||AX-B||_F
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
n = size(L,1);
P = zeros(n);
Q = P;
Y = P;
iter = 0;
for iter = 1 : max_iter
Pk = P;
Qk = Q;
% update P
P = prox_l1(Q-(Y+L)/mu,lambda/mu);
% update Q
temp = P+Y/mu;
temp = (temp+temp')/2;
Q = project_fantope(temp,k);
dY = P-Q;
chgP = max(max(abs(Pk-P)));
chgQ = max(max(abs(Qk-Q)));
chg = max([chgP chgQ max(abs(dY(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = trace(P'*L)+lambda*norm(Q(:),1);
err = norm(dY,'fro');
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = trace(P'*L)+lambda*norm(Q(:),1);
err = norm(dY,'fro');
================================================
FILE: algorithms/tracelasso.m
================================================
function [x,obj,err,iter] = tracelasso(A,b,opts)
% Solve the trace Lasso minimization problem by ADMM
%
% min_x ||A*Diag(x)||_*, s.t. Ax=b
%
% ---------------------------------------------
% Input:
% A - d*n matrix
% b - d*1 vector
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% x - n*1 vector
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(A);
x = zeros(n,1);
Z = zeros(d,n);
Y1 = zeros(d,1);
Y2 = Z;
Atb = A'*b;
AtA = A'*A;
invAtA = (AtA+diag(diag(AtA)))\eye(n);
iter = 0;
for iter = 1 : max_iter
xk = x;
Zk = Z;
% update x
x = invAtA*(-A'*Y1/mu+Atb+diagAtB(A,-Y2/mu+Z));
% update Z
[Z,nuclearnorm] = prox_nuclear(A*diag(x)+Y2/mu,1/mu);
dY1 = A*x-b;
dY2 = A*diag(x)-Z;
chgx = max(abs(xk-x));
chgZ = max(abs(Zk-Z));
chg = max([chgx chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = nuclearnorm;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = nuclearnorm;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function v = diagAtB(A,B)
% A, B - d*n matrices
% v = diag(A'*B), n*1 vector
n = size(A,2);
v = zeros(n,1);
for i = 1 : n
v(i) = A(:,i)'*B(:,i);
end
================================================
FILE: algorithms/tracelassoR.m
================================================
function [x,e,obj,err,iter] = tracelassoR(A,b,lambda,opts)
% Solve the trace Lasso regularized minimization problem by M-ADMM
%
% min_{x,e} loss(e)+lambda*||A*Diag(x)||_*, s.t. Ax+e=b
% loss(e) = ||e||_1 or 0.5*||e||_2^2
% ---------------------------------------------
% Input:
% A - d*n matrix
% b - d*1 vector
% opts - Structure value in Matlab. The fields are
% opts.loss - 'l1' (default): loss(e) = ||e||_1
% 'l2': loss(e) = 0.5*||e||_2^2
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% x - n*1 vector
% e - d*1 vector
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
loss = 'l1';
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'loss'); loss = opts.loss; end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
[d,n] = size(A);
x = zeros(n,1);
Z = zeros(d,n);
e = zeros(d,1);
Y1 = e;
Y2 = Z;
Atb = A'*b;
AtA = A'*A;
invAtA = (AtA+diag(diag(AtA)))\eye(n);
iter = 0;
for iter = 1 : max_iter
xk = x;
ek = e;
Zk = Z;
% first super block {Z,e}
[Z,nuclearnorm] = prox_nuclear(A*diag(x)-Y2/mu,lambda/mu);
if strcmp(loss,'l1')
e = prox_l1(b-A*x-Y1/mu,1/mu);
elseif strcmp(loss,'l2')
e = mu*(b-A*x-Y1/mu)/(1+mu);
else
error('not supported loss function');
end
% second super block {x}
x = invAtA*(-A'*(Y1/mu+e)+Atb+diagAtB(A,Y2/mu+Z));
dY1 = A*x+e-b;
dY2 = Z-A*diag(x);
chgx = max(abs(xk-x));
chge = max(abs(ek-e));
chgZ = max(max(abs(Zk-Z)));
chg = max([chgx chge chgZ max(abs(dY1(:))) max(abs(dY2(:)))]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = comp_loss(e,loss)+lambda*nuclearnorm;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y1 = Y1 + mu*dY1;
Y2 = Y2 + mu*dY2;
mu = min(rho*mu,max_mu);
end
obj = comp_loss(e,loss)+lambda*nuclearnorm;
err = sqrt(norm(dY1,'fro')^2+norm(dY2,'fro')^2);
function v = diagAtB(A,B)
% A, B - d*n matrices
% v = diag(A'*B), n*1 vector
n = size(A,2);
v = zeros(n,1);
for i = 1 : n
v(i) = A(:,i)'*B(:,i);
end
================================================
FILE: algorithms/trpca_snn.m
================================================
function [L,E,err,iter] = trpca_snn(X,alpha,opts)
% Solve the Tensor Robust Principal Component Analysis (TRPCA) based on Sum of Nuclear Norm (SNN) problem by M-ADMM
%
% min_{L,E} \sum_i \alpha_i*||L_{i(i)}||_* + ||E||_1,
% s.t. X = L + E.
%
% ---------------------------------------------
% Input:
% X - d1*d2*...dk tensor
% alpha - k*1 vector, parameters
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% L - d1*d2*...*dk tensor
% E - d1*d2*...*dk tensor
% err - residual
% iter - number of iterations
%
% version 1.0 - 24/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(X);
k = length(dim);
E = zeros(dim);
Y = cell(k,1);
L = Y;
for i = 1 : k
Y{i} = E;
L{i} = E;
end
iter = 0;
for iter = 1 : max_iter
Lk = L;
Ek = E;
% first super block {L_i}
sumtemp = zeros(dim);
for i = 1 : k
L{i} = Fold(prox_nuclear(Unfold(X-E-Y{i}/mu,dim,i), alpha(i)/mu),dim,i);
sumtemp = sumtemp + L{i} + Y{i}/mu;
end
% second super block {E}
E = prox_l1(X-sumtemp/k,1/(mu*k));
chg = max(abs(Ek(:)-E(:)));
err = 0;
for i = 1 : k
dY = L{i}+E-X;
err = err+norm(dY(:))^2;
Y{i} = Y{i}+mu*dY;
chg = max([chg, max(abs(dY(:))), max(abs(Lk{i}(:)-L{i}(:)))]);
end
err = sqrt(err);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', err=' num2str(err)]);
end
end
if chg < tol
break;
end
mu = min(rho*mu,max_mu);
end
L = L{1};
================================================
FILE: algorithms/trpca_tnn.m
================================================
function [L,S,obj,err,iter] = trpca_tnn(X,lambda,opts)
% Solve the Tensor Robust Principal Component Analysis based on Tensor Nuclear Norm problem by ADMM
%
% min_{L,S} ||L||_*+lambda*||S||_1, s.t. X=L+S
%
% ---------------------------------------------
% Input:
% X - d1*d2*d3 tensor
% lambda - >0, parameter
% opts - Structure value in Matlab. The fields are
% opts.tol - termination tolerance
% opts.max_iter - maximum number of iterations
% opts.mu - stepsize for dual variable updating in ADMM
% opts.max_mu - maximum stepsize
% opts.rho - rho>=1, ratio used to increase mu
% opts.DEBUG - 0 or 1
%
% Output:
% L - d1*d2*d3 tensor
% S - d1*d2*d3 tensor
% obj - objective function value
% err - residual
% iter - number of iterations
%
% version 1.0 - 19/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
% References:
% [1] Canyi Lu, Jiashi Feng, Yudong Chen, Wei Liu, Zhouchen Lin and Shuicheng
% Yan, Tensor Robust Principal Component Analysis with A New Tensor Nuclear
% Norm, arXiv preprint arXiv:1804.03728, 2018
% [2] Canyi Lu, Jiashi Feng, Yudong Chen, Wei Liu, Zhouchen Lin and Shuicheng
% Yan, Tensor Robust Principal Component Analysis: Exact Recovery of Corrupted
% Low-Rank Tensors via Convex Optimization, arXiv preprint arXiv:1804.03728, 2018
%
tol = 1e-8;
max_iter = 500;
rho = 1.1;
mu = 1e-4;
max_mu = 1e10;
DEBUG = 0;
if ~exist('opts', 'var')
opts = [];
end
if isfield(opts, 'tol'); tol = opts.tol; end
if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end
if isfield(opts, 'rho'); rho = opts.rho; end
if isfield(opts, 'mu'); mu = opts.mu; end
if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end
if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end
dim = size(X);
L = zeros(dim);
S = L;
Y = L;
iter = 0;
for iter = 1 : max_iter
Lk = L;
Sk = S;
% update L
[L,tnnL] = prox_tnn(-S+X-Y/mu,1/mu);
% update S
S = prox_l1(-L+X-Y/mu,lambda/mu);
dY = L+S-X;
chgL = max(abs(Lk(:)-L(:)));
chgS = max(abs(Sk(:)-S(:)));
chg = max([ chgL chgS max(abs(dY(:))) ]);
if DEBUG
if iter == 1 || mod(iter, 10) == 0
obj = tnnL+lambda*norm(S(:),1);
err = norm(dY(:));
disp(['iter ' num2str(iter) ', mu=' num2str(mu) ...
', obj=' num2str(obj) ', err=' num2str(err)]);
end
end
if chg < tol
break;
end
Y = Y + mu*dY;
mu = min(rho*mu,max_mu);
end
obj = tnnL+lambda*norm(S(:),1);
err = norm(dY(:));
================================================
FILE: example_low_rank_matrix_models.m
================================================
%
% References:
%
% C. Lu. A Library of ADMM for Sparse and Low-rank Optimization. National University of Singapore, June 2016.
% https://github.com/canyilu/LibADMM.
% C. Lu, J. Feng, S. Yan, Z. Lin. A Unified Alternating Direction Method of Multipliers by Majorization
% Minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, pp. 527-541, 2018
%
addpath(genpath(cd))
clear
%% Examples for testing the low-rank matrix based models
% For detailed description of the sparse models, please refer to the Manual.
%% generate toy data
d = 10;
na = 200;
nb = 100;
A = randn(d,na);
X = randn(na,nb);
B = A*X;
b = B(:,1);
opts.tol = 1e-6;
opts.max_iter = 1000;
opts.rho = 1.2;
opts.mu = 1e-3;
opts.max_mu = 1e10;
opts.DEBUG = 0;
%% RPCA
n1 = 100;
n2 = 200;
r = 10;
L = rand(n1,r)*rand(r,n2); % low-rank part
p = 0.1;
m = p*n1*n2;
temp = rand(n1*n2,1);
[~,I] = sort(temp);
I = I(1:m);
Omega = zeros(n1,n2);
Omega(I) = 1;
E = sign(rand(n1,n2)-0.5);
S = Omega.*E; % sparse part, S = P_Omega(E)
Xn = L+S;
lambda = 1/sqrt(max(n1,n2));
opts.loss = 'l1';
opts.DEBUG = 1;
tic
[Lhat,Shat,obj,err,iter] = rpca(Xn,lambda,opts);
toc
rel_err_L = norm(L-Lhat,'fro')/norm(L,'fro')
rel_err_S = norm(S-Shat,'fro')/norm(S,'fro')
err
iter
%% low rank matrix completion (lrmc) and regularized lrmc
n1 = 100;
n2 = 200;
r = 5;
X = rand(n1,r)*rand(r,n2);
p = 0.6;
omega = find(rand(n1,n2)
#include
#include
#include "mex.h"
using namespace std;
struct mypair
{
double number;
int index;
void setval(double n, int i)
{
number=n;
index=i;
}
};
bool mycompare(mypair l, mypair r)
{
return (l.numberN)){
cout<<"impossible sum constraint!\n"< v(N);
for(i=0;i=v[i-1].number+1)){
j=i;
flag=true;
break;
}
// i0) && (v[j-1].number+gamma<1)) {flag=true;break;}
}
else {
if ((v[i].number+gamma>0) && (v[j-1].number+gamma<1) && (v[j].number+gamma>=1)) {flag=true; break;}
}
else
if (j==N) {
if ((v[i-1].number+gamma<=0) && (v[i].number+gamma>0) && (v[j-1].number+gamma<1)) {flag=true;break;}
}
else {
if ((v[i-1].number+gamma<=0) && (v[i].number+gamma>0) && (v[j-1].number+gamma<1) && (v[j].number+gamma>=1)) {flag=true;break;}
}
}
if(flag) break;
}
// get the solution in original order.
*e=0;
int k;
for(k=0;kM)?N:M;
plhs[0] = mxCreateDoubleMatrix((mwSize)M, (mwSize)N, mxREAL);
plhs[1] = mxCreateDoubleMatrix((mwSize)1, (mwSize)1, mxREAL);
double * y=mxGetPr(prhs[0]);
double s=mxGetScalar(prhs[1]);
double * x=mxGetPr(plhs[0]);
double * e =mxGetPr(plhs[1]);
cappedsimplexprojection(Length, y, s, x, e);
}
/*
* int main(int argc,char * argv[])
* {
*
* int N=6;
*
* double y[6]={0.5377, 1.8339, -2.2588, 0.8622, 0.3188, -1.3077};
* double s=10;
* double d[6]={0.2785, 0.5469, 0.9575, 0.9649, 0.1576, 0.9706};
*
* double x[6];
* double alpha;
*
* cappedsimplexprojection(N, y, s, d, x, &alpha);
*
* cout<n)
error('the sum constraint is infeasible!\n');
end
if k==0;
e=0.5*sum((x-y0).^2);
return;
end
if k==n
x=ones(n,1);
e=0.5*sum((x-y0).^2);
return;
end
[y,idx]=sort(y0,'ascend');
% Test the possiblity of a==b are integers.
if k==round(k)
b=n-k;
if y(b+1)-y(b)>=1
x(idx(b+1:end))=1;
e=0.5*sum((x-y0).^2);
return;
end
end
% Assume a=0.
s=cumsum(y);
y=[y;inf];
for b=1:n
% Hypothesized gamma.
gamma = (k+b-n-s(b)) / b;
if ((y(1)+gamma)>0) && ((y(b)+gamma)<1) && ((y(b+1)+gamma)>=1)
xtmp=[y(1:b)+gamma; ones(n-b,1)];
x(idx)=xtmp;
e=0.5*sum((x-y0).^2);
return;
end
end
% Now a>=1;
for a=1:n
for b=a+1:n
% Hypothesized gamma.
gamma = (k+b-n+s(a)-s(b))/(b-a);
if ((y(a)+gamma)<=0) && ((y(a+1)+gamma)>0) && ((y(b)+gamma)<1) && ((y(b+1)+gamma)>=1)
xtmp=[zeros(a,1); y(a+1:b)+gamma; ones(n-b,1)];
x(idx)=xtmp;
e=0.5*sum((x-y0).^2);
return;
end
end
end
================================================
FILE: proximal_operators/flsa.c
================================================
#include
#include
#include
#include
#include
#include "matrix.h"
#include "flsa.h"
/*
Functions contained in "flsa.h"
1. The algorithm for sloving (1) with a given (labmda1, lambda2)
void flsa(double *x, double *z, double *info,
double * v, double *z0,
double lambda1, double lambda2, int n,
int maxStep, double tol, int tau, int flag)
*/
/*
We solve the Fused Lasso Signal Approximator (FLSA) problem:
min_x 1/2 \|x-v\|^2 + lambda1 * \|x\|_1 + lambda2 * \|A x\|_1, (1)
It can be shown that, if x* is the solution to
min_x 1/2 \|x-v\|^2 + lambda2 \|A x\|_1, (2)
then
x**= sgn(x*) max(|x*|-lambda_1, 0) (3)
is the solution to (1).
By some derivation (see the description in sfa.h), (2) can be solved by
x*= v - A^T z*,
where z* is the optimal solution to
min_z 1/2 z^T A AT z - < z, A v>,
subject to \|z\|_{infty} \leq lambda2 (4)
*/
/*
In flsa, we solve (1) corresponding to a given (lambda1, lambda2)
void flsa(double *x, double *z, double *gap,
double * v, double *z0,
double lambda1, double lambda2, int n,
int maxStep, double tol, int flag)
Output parameters:
x: the solution to problem (1)
z: the solution to problem (4)
infor: the information about running the subgradient finding algorithm
infor[0] = gap: the computed gap (either the duality gap
or the summation of the absolute change of the adjacent solutions)
infor[1] = steps: the number of iterations
infor[2] = lambad2_max: the maximal value of lambda2_max
infor[3] = numS: the number of elements in the support set
Input parameters:
v: the input vector to be projected
z0: a guess of the solution of z
lambad1: the regularization parameter
labmda2: the regularization parameter
n: the length of v and x
maxStep: the maximal allowed iteration steps
tol: the tolerance parameter
flag: the flag for initialization and deciding calling sfa
switch (flag)
>0: sfa
<0: sfa_ls
switch ( abs(flag))
case 1, 2, 3, or 4:
z0 is a "good" starting point
(such as the warm-start of the previous solution,
or the user want to test the performance of this starting point;
the starting point shall be further projected to the L_{infty} ball,
to make sure that it is feasible)
case 11, 12, 13, or 14: z0 is a "random" guess, and thus not used
(we shall initialize z with zero if lambda2 is less than 0.5 *zMax
and otherwise initialize z with zero with the solution of the linear system;
this solution is projected to the L_{infty} ball)
*/
/*
We write the wrapper for calling from Matlab
void flsa(double *x, double *z, double *gap,
double * v, double *z0,
double lambda1, double lambda2, int n,
int maxStep, double tol, int flag)
*/
void mexFunction (int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/*set up input arguments */
double* v= mxGetPr(prhs[0]);
double* z0= mxGetPr(prhs[1]);
double lambda1= mxGetScalar(prhs[2]);
double lambda2= mxGetScalar(prhs[3]);
int n= (int ) mxGetScalar(prhs[4]);
int maxStep= (int) mxGetScalar(prhs[5]);
double tol= mxGetScalar(prhs[6]);
int tau= (int) mxGetScalar(prhs[7]);
int flag= (int) mxGetScalar(prhs[8]);
double *x, *z, *infor;
/* set up output arguments */
plhs[0] = mxCreateDoubleMatrix( n, 1, mxREAL);
plhs[1] = mxCreateDoubleMatrix( n-1, 1, mxREAL);
plhs[2] = mxCreateDoubleMatrix( 1, 4, mxREAL);
x= mxGetPr(plhs[0]);
z= mxGetPr(plhs[1]);
infor=mxGetPr(plhs[2]);
flsa(x, z, infor,
v, z0,
lambda1, lambda2, n,
maxStep, tol, tau, flag);
}
================================================
FILE: proximal_operators/flsa.h
================================================
#include
#include
#include
#include
#include
#include "matrix.h"
#include "sfa.h"
/*
Files contained in this header file sfa.h:
1. Algorithms for solving the linear system A A^T z0 = Av (see the description of A from the following context)
void Thomas(double *zMax, double *z0,
double * Av, int nn)
void Rose(double *zMax, double *z0,
double * Av, int nn)
int supportSet(double *x, double *v, double *z,
double *g, int * S, double lambda, int nn)
void dualityGap(double *gap, double *z,
double *g, double *s, double *Av,
double lambda, int nn)
void dualityGap2(double *gap, double *z,
double *g, double *s, double *Av,
double lambda, int nn)
2. The Subgraident Finding Algorithm (SFA) for solving problem (4) (refer to the description of the problem for detail)
int sfa(double *x, double *gap,
double *z, double *z0, double * v, double * Av,
double lambda, int nn, int maxStep,
double *s, double *g,
double tol, int tau, int flag)
int sfa_special(double *x, double *gap,
double *z, double * v, double * Av,
double lambda, int nn, int maxStep,
double *s, double *g,
double tol, int tau)
int sfa_one(double *x, double *gap,
double *z, double * v, double * Av,
double lambda, int nn, int maxStep,
double *s, double *g,
double tol, int tau)
*/
/*
In this file, we solve the Fused Lasso Signal Approximator (FLSA) problem:
min_x 1/2 \|x-v\|^2 + lambda1 * \|x\|_1 + lambda2 * \|A x\|_1, (1)
It can be shown that, if x* is the solution to
min_x 1/2 \|x-v\|^2 + lambda2 \|A x\|_1, (2)
then
x**= sgn(x*) max(|x*|-lambda_1, 0) (3)
is the solution to (1).
By some derivation (see the description in sfa.h), (2) can be solved by
x*= v - A^T z*,
where z* is the optimal solution to
min_z 1/2 z^T A AT z - < z, A v>,
subject to \|z\|_{infty} \leq lambda2 (4)
*/
/*
In flsa, we solve (1) corresponding to a given (lambda1, lambda2)
void flsa(double *x, double *z, double *gap,
double * v, double *z0,
double lambda1, double lambda2, int n,
int maxStep, double tol, int flag)
Output parameters:
x: the solution to problem (1)
z: the solution to problem (4)
infor: the information about running the subgradient finding algorithm
infor[0] = gap: the computed gap (either the duality gap
or the summation of the absolute change of the adjacent solutions)
infor[1] = steps: the number of iterations
infor[2] = lambad2_max: the maximal value of lambda2_max
infor[3] = numS: the number of elements in the support set
Input parameters:
v: the input vector to be projected
z0: a guess of the solution of z
lambad1: the regularization parameter
labmda2: the regularization parameter
n: the length of v and x
maxStep: the maximal allowed iteration steps
tol: the tolerance parameter
tau: the program sfa is checked every tau iterations for termination
flag: the flag for initialization and deciding calling sfa
switch ( flag )
1-4, 11-14: sfa
switch ( flag )
case 1, 2, 3, or 4:
z0 is a "good" starting point
(such as the warm-start of the previous solution,
or the user want to test the performance of this starting point;
the starting point shall be further projected to the L_{infty} ball,
to make sure that it is feasible)
case 11, 12, 13, or 14: z0 is a "random" guess, and thus not used
(we shall initialize z as follows:
if lambda2 >= 0.5 * lambda_2^max, we initialize the solution of the linear system;
if lambda2 < 0.5 * lambda_2^max, we initialize with zero
this solution is projected to the L_{infty} ball)
switch( flag )
5, 15: sfa_special
switch( flag )
5: z0 is a good starting point
15: z0 is a bad starting point, use the solution of the linear system
switch( flag )
6, 16: sfa_one
switch( flag )
6: z0 is a good starting point
16: z0 is a bad starting point, use the solution of the linear system
Revision made on October 31, 2009.
The input variable z0 is not modified after calling sfa. For this sake, we allocate a new variable zz to replace z0.
*/
void flsa(double *x, double *z, double *infor,
double * v, double *z0,
double lambda1, double lambda2, int n,
int maxStep, double tol, int tau, int flag){
int i, nn=n-1, m;
double zMax, temp;
double *Av, *g, *s;
int iterStep, numS;
double gap;
double *zz; /*to replace z0, so that z0 shall not revised after */
Av=(double *) malloc(sizeof(double)*nn);
/*
Compute Av= A*v (n=4, nn=3)
A= [ -1 1 0 0;
0 -1 1 0;
0 0 -1 1]
*/
for (i=0;i= zMax, which leads to a solution with same entry values
2) lambda2 < zMax, which needs to first run sfa, and then perform soft thresholding
*/
/*
First case: lambda2 >= zMax
*/
if (lambda2 >= zMax){
temp=0;
m=n%5;
if (m!=0){
for (i=0;i lambda1)
temp= temp-lambda1;
else
if (temp < -lambda1)
temp= temp+lambda1;
else
temp=0;
m=n%7;
if (m!=0){
for (i=0;i lambda2)
z[i]=lambda2;
else
if (z0[i]<-lambda2)
z[i]=-lambda2;
else
z[i]=z0[i];
}
}
else{
if (lambda2 >= 0.5 * zMax){
for (i=0;i lambda2)
z[i]=lambda2;
else
if (z[i]<-lambda2)
z[i]=-lambda2;
}
}
else{
for (i=0;i=1 && flag<=4){
zz =(double *) malloc(sizeof(double)*nn);
for (i=0;i lambda1)
x[i]-=lambda1;
else
if (x[i]<-lambda1)
x[i]+=lambda1;
else
x[i]=0;
free(Av);
free(g);
free(s);
infor[0]=gap;
infor[1]=iterStep;
infor[2]=zMax;
infor[3]=numS;
}
================================================
FILE: proximal_operators/project_box.m
================================================
function x = project_box(b,l,u)
% Project a point onto a box
% min_x ||x-b||_2, s.t., l <= x <= u
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
x = max(l,min(b,u));
================================================
FILE: proximal_operators/project_fantope.m
================================================
function X = project_fantope(Q,k)
% Project a point onto the Fantope
% Q - a symmetric matrix
%
% min_X ||X-Q||_F, s.t. 0\succeq X \succeq I, Tr(X)=k.
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
[U,D] = eig(Q);
Dr = cappedsimplexprojection(diag(D),k);
% Dr = cappedsimplexprojection_matlab(diag(D),k);
X = U*diag(Dr)*U';
================================================
FILE: proximal_operators/project_simplex.m
================================================
function X = project_simplex(B)
% Project onto the probability simplex
% min_X ||X-B||_F
% s.t Xe=e, X>=0 where e is the constant one vector.
%
% ---------------------------------------------
% Input:
% B - n*d matrix
%
% Output:
% X - n*d matrix
%
[n,m] = size(B);
A = repmat(1:m,n,1);
B_sort = sort(B,2,'descend');
cum_B = cumsum(B_sort,2);
sigma = B_sort-(cum_B-1)./A;
tmp = sigma>0;
idx = sum(tmp,2);
tmp = B_sort-sigma;
sigma = diag(tmp(:,idx));
sigma = repmat(sigma,1,m);
X = max(B-sigma,0);
================================================
FILE: proximal_operators/prox_elasticnet.m
================================================
function x = prox_elasticnet(b,lambda1,lambda2)
% The proximal operator of the elastic net
%
% min_x lambda1*||x||_1+0.5*lambda2*||x||_2^2+0.5*||x-b||_2^2
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
x = (max(0,b-lambda1)+min(0,b+lambda1))/(lambda2+1);
================================================
FILE: proximal_operators/prox_gl1.m
================================================
function x = prox_gl1(b,G,lambda)
% The proximal operator of the group l1 norm
%
% min_x lambda*\sum_{g in G} ||x_g||_2+0.5*||x-b||_2^2
% ---------------------------------------------
% Input:
% b - d*1 vector
% G - a cell indicates a partition of 1:d
%
% Output:
% x - d*1 vector
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
x = zeros(size(b));
for i = 1 : length(G)
nxg = norm(b(G{i}));
if nxg > lambda
x(G{i}) = b(G{i})*(1-lambda/nxg);
end
end
================================================
FILE: proximal_operators/prox_ksupport.m
================================================
function B = prox_ksupport(v,k,lambda)
% The proximal operator of the k support norm of a vector
%
% min_x 0.5*lambda*||x||_{ksp}^2+0.5*||x-v||_2^2
%
% version 1.0 - 27/06/2016
%
% Written by Hanjiang Lai
%
% Reference:
% Lai H, Pan Y, Lu C, et al. Efficient k-support matrix pursuit, ECCV, 2014: 617-631.
%
L = 1/lambda;
d = length(v);
if k >= d
B = L*v/(1+L);
return;
elseif k <= 1
k = 1;
end
[z, ind] = sort(abs(v), 'descend');
z = z*L;
ar = cumsum(z);
z(d+1) = -inf;
diff = 0;
err = inf;
found = false;
for r=k-1:-1:0
[l,T] = bsearch(z,ar,k-r,d,diff,k,r,L);
if ( ((L+1)*T >= (l-k+(L+1)*r+L+1)*z(k-r)) && ...
(((k-r-1 == 0) || (L+1)*T < (l-k+(L+1)*r+L+1)*z(k-r-1)) ) )
found = true;
break;
end
diff = diff + z(k-r);
if k-r-1 == 0
err_tmp = max(0,(l-k+(L+1)*r+L+1)*z(k-r) - (L+1)*T);
else
err_tmp = max(0,(l-k+(L+1)*r+L+1)*z(k-r) -(L+1)*T) + max(0, - (l-k+(L+1)*r+L+1)*z(k-r-1) + (L+1)*T);
end
if err > err_tmp
err_r = r; err_l = l; err_T = T; err = err_tmp;
end
end
if found == false
r = err_r; l = err_l; T = err_T;
end
% fprintf('r = %d, l = %d \n',r,l);
p(1:k-r-1) = z(1:k-r-1)/(L+1);
p(k-r:l) = T / (l-k+(L+1)*r+L+1);
p(l+1:d) = z(l+1:d);
p = p';
% [dummy, rev]=sort(ind,'ascend');
rev(ind) = 1:d;
p = sign(v) .* p(rev);
B = v - 1/L*p;
end
function [l,T] = bsearch(z,array,low,high,diff,k,r,L)
if z(low) == 0
l = low;
T = 0;
return;
end
%z(mid) * tmp - (array(mid) - diff) > 0
%z(mid+1) * tmp - (array(mid+1) - diff) <= 0
while( low < high )
mid = floor( (low + high)/2 ) + 1;
tmp = (mid - k + r + 1 + L*(r+1));
if z(mid) * tmp - (array(mid) - diff) > 0
low = mid;
else
high = mid - 1;
end
end
l = low;
T = array(low) - diff;
end
================================================
FILE: proximal_operators/prox_l1.m
================================================
function x = prox_l1(b,lambda)
% The proximal operator of the l1 norm
%
% min_x lambda*||x||_1+0.5*||x-b||_2^2
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
x = max(0,b-lambda)+min(0,b+lambda);
================================================
FILE: proximal_operators/prox_l21.m
================================================
function X = prox_l21(B,lambda)
% The proximal operator of the l21 norm of a matrix
% l21 norm is the sum of the l2 norm of all columns of a matrix
%
% min_X lambda*||X||_{2,1}+0.5*||X-B||_2^2
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
X = zeros(size(B));
for i = 1 : size(X,2)
nxi = norm(B(:,i));
if nxi > lambda
X(:,i) = (1-lambda/nxi)*B(:,i);
end
end
================================================
FILE: proximal_operators/prox_nuclear.m
================================================
function [X,nuclearnorm] = prox_nuclear(B,lambda)
% The proximal operator of the nuclear norm of a matrix
%
% min_X lambda*||X||_*+0.5*||X-B||_F^2
%
% version 1.0 - 18/06/2016
%
% Written by Canyi Lu (canyilu@gmail.com)
%
[U,S,V] = svd(B,'econ');
S = diag(S);
svp = length(find(S>lambda));
if svp>=1
S = S(1:svp)-lambda;
X = U(:,1:svp)*diag(S)*V(:,1:svp)';
nuclearnorm = sum(S);
else
X = zeros(size(B));
nuclearnorm = 0;
end
================================================
FILE: proximal_operators/prox_tnn.m
================================================
function [X,tnn,trank] = prox_tnn(Y,rho)
% The proximal operator of the tensor nuclear norm of a 3 way tensor
%
% min_X rho*||X||_*+0.5*||X-Y||_F^2
%
% Y - n1*n2*n3 tensor
%
% X - n1*n2*n3 tensor
% tnn - tensor nuclear norm of X
% trank - tensor tubal rank of X
%
% version 2.1 - 14/06/2018
%
% Written by Canyi Lu (canyilu@gmail.com)
%
%
% References:
% Canyi Lu, Tensor-Tensor Product Toolbox. Carnegie Mellon University.
% June, 2018. https://github.com/canyilu/tproduct.
%
% Canyi Lu, Jiashi Feng, Yudong Chen, Wei Liu, Zhouchen Lin and Shuicheng
% Yan, Tensor Robust Principal Component Analysis with A New Tensor Nuclear
% Norm, arXiv preprint arXiv:1804.03728, 2018
%
[n1,n2,n3] = size(Y);
X = zeros(n1,n2,n3);
Y = fft(Y,[],3);
tnn = 0;
trank = 0;
% first frontal slice
[U,S,V] = svd(Y(:,:,1),'econ');
S = diag(S);
r = length(find(S>rho));
if r>=1
S = S(1:r)-rho;
X(:,:,1) = U(:,1:r)*diag(S)*V(:,1:r)';
tnn = tnn+sum(S);
trank = max(trank,r);
end
% i=2,...,halfn3
halfn3 = round(n3/2);
for i = 2 : halfn3
[U,S,V] = svd(Y(:,:,i),'econ');
S = diag(S);
r = length(find(S>rho));
if r>=1
S = S(1:r)-rho;
X(:,:,i) = U(:,1:r)*diag(S)*V(:,1:r)';
tnn = tnn+sum(S)*2;
trank = max(trank,r);
end
X(:,:,n3+2-i) = conj(X(:,:,i));
end
% if n3 is even
if mod(n3,2) == 0
i = halfn3+1;
[U,S,V] = svd(Y(:,:,i),'econ');
S = diag(S);
r = length(find(S>rho));
if r>=1
S = S(1:r)-rho;
X(:,:,i) = U(:,1:r)*diag(S)*V(:,1:r)';
tnn = tnn+sum(S);
trank = max(trank,r);
end
end
tnn = tnn/n3;
X = ifft(X,[],3);
================================================
FILE: readme.txt
================================================
LibADMM: A Library of ADMM for Sparse and Low-rank Optimization
This package solves several sparse and low-rank optimization problems by M-ADMM proposed in our work
C. Lu, J. Feng, S. Yan, Z. Lin. A Unified Alternating Direction Method of Multipliers by Majorization Minimization. IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, pp. 527-541, 2018
The folder "LibADMM" contains three subfolders:
1. algorithms: the main solvers.
2. proximal_operators: the proximal operators of several functions used in the subproblems of M-ADMM.
3. tensor_tools: some basic tools for tensors.
Besides the subfolders, we also three functions, "test_sparse_models.m", "test_low_rank_matrix_models.m", and "test_low_rank_tensor_models.m" which provide the examples for all the solvers implemented in this package.
You are also suggested to read the manual at https://canyilu.github.io/publications/2016-software-LibADMM.pdf.
For any problems, please contact Canyi Lu (canyilu@gmail.com).
Version 1.0 (Jun, 2016)
Version 1.1 (Jun, 2018)
- add a new model about low-rank tensor recovery from Gaussian measurements based on tensor nuclear norm and the corresponding function lrtr_Gaussian_tnn.m
- update several functions to improve the efficiency, including prox_tnn.m, tprod.m, tran.m, tubalrank.m, and nmodeproduct.m
- update the three example functions: example_sparse_models.m, example_low_rank_matrix_models.m, and example_low_rank_tensor_models.m
- remove the test on image data and some unnecessary functions
================================================
FILE: tensor_tools/Fold.m
================================================
function [X] = Fold(X, dim, i)
dim = circshift(dim, [1-i, 1-i]);
X = shiftdim(reshape(X, dim), length(dim)+1-i);
================================================
FILE: tensor_tools/Unfold.m
================================================
function [X] = Unfold( X, dim, i )
X = reshape(shiftdim(X,i-1), dim(i), []);
================================================
FILE: tensor_tools/nmodeproduct.m
================================================
function B = nmodeproduct(A,M,n)
% Calculates the n-Mode Product of a Tensor A and a Matrix M
%
% B = nmodeproduct(A,M,n)
%
% B = A (x)_n M .. According to the Definition in De Lathauwer (2000)
%
% with:
% A: (I_1 x I_2 x .. I_n x .. I_N) .. -> n is in [1..N]
% M: (J x I_n)
% B: (I_1 x I_2 x .. J x .. I_N)
%
% note: "(x)_n" is the operator between the tensor and the matrix
%
% v0.001 2009 by Fabian Schneiter
%
% check inputs:
dimvec = size(A);
n = fix(n);
if (length(dimvec) tol);