-
Notifications
You must be signed in to change notification settings - Fork 3
/
mssvdd.m
69 lines (61 loc) · 2.86 KB
/
mssvdd.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
% Input: Traindata = Contains training data for each modality as a cell Traindata{data1},Traindata{data2}
% Trainlabel = Contains training labels for each modality as a cell Trainlabel{data1},Trainlabel{data1}
% maxIter = Maximum number of iterations
% numofmodes = Total number of modalities
% Cval = Value of hyperparameter C
% omega = Regularizaterm term
% Bta = Controling the importance of the regularization term
% eta = Used as step size for gradient
% D = dimensionality of data in original feature space
% d = dimensionality of data in lower dimension
% Output: Model = Trained model
% Q = Contains projection matrices for each modality, Q{modality1},Q{modality2}
function [Model,Q]=mssvdd(Traindata,Trainlabel,maxIter,numofmodes,Cval,omega,Bta,eta,D,d)
D{1}=size(Traindata{1},1);
D{2}=size(Traindata{2},1);
%Initialize the Q{i} for each modality
for i=1:numofmodes
tempQ = pca(Traindata{i}');
Q{i}=tempQ(:,1:d)';
reducedData{i} = Q{i} * Traindata{i};
end
for ii=1:maxIter
ReducedcombinedTraindata=[];
combinedTrainLabel=[];
for i=1: numofmodes
ReducedcombinedTraindata=[ReducedcombinedTraindata reducedData{i}];
combinedTrainLabel=[combinedTrainLabel; Trainlabel];
end
Model = svmtrain(combinedTrainLabel, ReducedcombinedTraindata', ['-s 5 -t 0 -c ',num2str(Cval)]);
%Get the bigAlphaVector which is vector of all alphas for all concatenated data
Alphaindex=Model.sv_indices; %Indices where alpha is non-zero
AlphaValue=Model.sv_coef; %values of Alpha
BigAlphavector=zeros(size(ReducedcombinedTraindata,2),1); %Generate a vector of zeros
for qq=1:size(Alphaindex,1)
BigAlphavector(Alphaindex(qq))=AlphaValue(qq);
end
%Here get the Alphavector{i} for corresponding modality
j=0; i=1;
while(j<size(BigAlphavector,1))
Alphavector{i}= BigAlphavector(j+1:j+size(reducedData{i},2));
j=j+size(reducedData{i},2);
i=i+1;
end
const_d= constraintmssvdd(omega,Cval,Q,Traindata,Alphavector,numofmodes); %Type of regularization term used
for M_num=1:numofmodes %M_num = mode number
%compute the gradient and update the matrix Q{i}
Sum1=2*Q{M_num}*Traindata{M_num}*diag(Alphavector{M_num})*Traindata{M_num}';
loopsum2j=zeros(d,D{M_num});
for j=1:numofmodes
sum=Q{j}*Traindata{j}*Alphavector{j};
loopsum2j=loopsum2j+sum;
end
Sum2=2*loopsum2j*diag(Alphavector{M_num}'*Traindata{M_num}');
Grad{M_num} = Sum1-Sum2+(Bta*const_d{M_num});
Q{M_num} = Q{M_num} - eta*Grad{M_num}; % I
%orthogonalize and normalize Q{i}
Q{M_num} = OandN_Q(Q{M_num});
reducedData{M_num} = Q{M_num} * Traindata{M_num};
end
end
end