|
a |
|
b/Semantic Features/NotUsed/svr.m |
|
|
1 |
function [ nsv , beta , bias ] = svr (X,Y,ker ,C, loss ,e) |
|
|
2 |
%Note: As of 7/17/2013 this code is broken and has not be used once - ES |
|
|
3 |
|
|
|
4 |
% SVR Support Vector Regression |
|
|
5 |
% |
|
|
6 |
% Usage : [ nsv beta bias ] = svr (X,Y,ker ,C ,loss ,e) |
|
|
7 |
% |
|
|
8 |
% Parameters : X - Training inputs |
|
|
9 |
% Y - Training targets |
|
|
10 |
% ker - kernel function |
|
|
11 |
% C - upper bound ( non - separable case ) |
|
|
12 |
% loss - loss function |
|
|
13 |
% e - insensitivity |
|
|
14 |
% nsv - number of support vectors |
|
|
15 |
% beta - Difference of Lagrange Multipliers |
|
|
16 |
% bias - bias term |
|
|
17 |
% |
|
|
18 |
% Author : Steve Gunn ( srg@ecs . soton .ac .uk ) |
|
|
19 |
if ( nargin < 3 | nargin > 6) % check correct number of arguments |
|
|
20 |
help svr |
|
|
21 |
else |
|
|
22 |
fprintf ('Support Vector Regressing ....\ n') |
|
|
23 |
fprintf (' ______________________________ \n') |
|
|
24 |
n = size (X ,1); |
|
|
25 |
if ( nargin <6) e =0.0; , end |
|
|
26 |
if ( nargin <5) loss =' eInsensitive ';, end |
|
|
27 |
if ( nargin <4) C= Inf ;, end |
|
|
28 |
if ( nargin <3) ker ='linear ';, end |
|
|
29 |
|
|
|
30 |
% Construct the Kernel matrix |
|
|
31 |
|
|
|
32 |
fprintf (' Constructing ...\ n'); |
|
|
33 |
H = zeros (n,n); |
|
|
34 |
for i =1: n |
|
|
35 |
for j =1: n |
|
|
36 |
H(i,j ) = svkernel (ker ,X(i ,:) ,X(j ,:)); |
|
|
37 |
end |
|
|
38 |
end |
|
|
39 |
|
|
|
40 |
% Set up the parameters for the Optimisation problem |
|
|
41 |
switch lower ( loss ) |
|
|
42 |
case ' einsensitive ', |
|
|
43 |
Hb = [ H -H; -H H]; |
|
|
44 |
c = [( e* ones (n ,1) - Y ); ( e* ones (n ,1) + Y )]; |
|
|
45 |
vlb = zeros (2*n ,1); % Set the bounds : alphas >= 0 |
|
|
46 |
vub = C* ones (2*n ,1); % alphas <= C |
|
|
47 |
x0 = zeros (2*n ,1); % The starting point is [0 0 0 0] |
|
|
48 |
neqcstr = nobias ( ker ); % Set the number of equality constraints (1 or 0) |
|
|
49 |
if neqcstr |
|
|
50 |
A = [ ones (1,n) - ones (1,n)]; , b = 0; % Set the constraint Ax = b |
|
|
51 |
else |
|
|
52 |
A = []; , b = []; |
|
|
53 |
end |
|
|
54 |
case 'quadratic ', |
|
|
55 |
Hb = H + eye (n )/(2* C); |
|
|
56 |
c = -Y; |
|
|
57 |
vlb = -1 e30 * ones (n ,1); |
|
|
58 |
|
|
|
59 |
vub = 1 e30 * ones (n ,1); |
|
|
60 |
x0 = zeros (n ,1); % The starting point is [0 0 0 0] |
|
|
61 |
neqcstr = nobias ( ker ); % Set the number of equality constraints (1 or 0) |
|
|
62 |
if neqcstr |
|
|
63 |
A = ones (1,n); , b = 0; % Set the constraint Ax = b |
|
|
64 |
else |
|
|
65 |
A = []; , b = []; |
|
|
66 |
end |
|
|
67 |
otherwise , disp ('Error : Unknown Loss Function \n'); |
|
|
68 |
end |
|
|
69 |
|
|
|
70 |
% Add small amount of zero order regularisation to |
|
|
71 |
% avoid problems when Hessian is badly conditioned . |
|
|
72 |
% Rank is always less than or equal to n. |
|
|
73 |
% Note that adding to much reg will peturb solution |
|
|
74 |
|
|
|
75 |
Hb = Hb + 1e -10 * eye ( size (Hb )); |
|
|
76 |
|
|
|
77 |
% Solve the Optimisation Problem |
|
|
78 |
fprintf ('Optimising ...\ n'); |
|
|
79 |
st = cputime ; |
|
|
80 |
|
|
|
81 |
[ alpha lambda how ] = qp(Hb , c , A , b , vlb , vub , x0 , neqcstr ); |
|
|
82 |
|
|
|
83 |
fprintf ('Execution time : %4.1 f seconds \n',cputime - st ); |
|
|
84 |
fprintf ('Status : % s\n',how ); |
|
|
85 |
|
|
|
86 |
switch lower ( loss ) |
|
|
87 |
case ' einsensitive ', |
|
|
88 |
beta = alpha (1: n) - alpha (n +1:2* n); |
|
|
89 |
case 'quadratic ', |
|
|
90 |
beta = alpha ; |
|
|
91 |
end |
|
|
92 |
fprintf ('|w0 |^2 : % f\n',beta '*H* beta ); |
|
|
93 |
fprintf ('Sum beta : % f\n',sum ( beta )); |
|
|
94 |
|
|
|
95 |
% Compute the number of Support Vectors |
|
|
96 |
epsilon = svtol ( abs ( beta )); |
|
|
97 |
svi = find ( abs ( beta ) > epsilon ); |
|
|
98 |
nsv = length ( svi ); |
|
|
99 |
fprintf ('Support Vectors : % d (%3.1 f %%)\ n',nsv ,100* nsv /n); |
|
|
100 |
|
|
|
101 |
% Implicit bias , b0 |
|
|
102 |
bias = 0; |
|
|
103 |
|
|
|
104 |
% Explicit bias , b0 |
|
|
105 |
if nobias ( ker ) ~= 0 |
|
|
106 |
switch lower ( loss ) |
|
|
107 |
case ' einsensitive ', |
|
|
108 |
% find bias from average of support vectors with interpolation error e |
|
|
109 |
% SVs with interpolation error e have alphas : 0 < alpha < C |
|
|
110 |
svii = find ( abs ( beta ) > epsilon & abs ( beta ) < (C - epsilon )); |
|
|
111 |
if length ( svii ) > 0 |
|
|
112 |
bias = (1/ length ( svii ))* sum (Y( svii ) - e* sign ( beta ( svii )) - H(svii , svi )* beta ( svi )); |
|
|
113 |
else |
|
|
114 |
fprintf ('No support vectors with interpolation error e - cannot compute bias .\n'); |
|
|
115 |
bias = ( max (Y)+ min (Y ))/2; |
|
|
116 |
end |
|
|
117 |
case 'quadratic ', |
|
|
118 |
bias = mean (Y - H* beta ); |
|
|
119 |
end |
|
|
120 |
end |
|
|
121 |
end |