Switch to unified view

a b/classification/RUSboost/plotEnsError.m
1
function plotEnsError( allModels,ist,X,Y,figureName)
2
%PLOTENSERROR Plots the ensemble error for each semantic rating in a single
3
%figure.
4
%
5
%   param allModels
6
%       a cell array containing a model for each semantic characteristic.
7
%   param ist
8
%       a cell matrix indicating which values are testing for each semantic 
9
%       characteristic. Sidenote: I think training error can be plotted the
10
%       same way by passing the allTraining cell matrix instead of the 
11
%       allTesting cell matrix.
12
%   param X
13
%      This is the data that is input matrix that contains image features
14
%      calculated from nodules in the LIDC.
15
%   param Y
16
%      This is a cell matrix that contains semantic ratings from the LIDC.
17
%
18
%   param figureName
19
%      A string indicating the name of the figure. Example: 'Testing Error'
20
21
22
%% Setup
23
24
    %Semantic labels
25
    label = {'Subtlety', 'Sphericity', 'Margin', 'Lobulation',... 
26
              'Spiculation', 'Texture', 'Malignancy'};
27
          
28
    n = size(allModels,2);
29
    
30
    subplotX = 4;
31
    subplotY = 2;
32
     
33
    figure('name',figureName);    
34
%% Plot error rate of all models
35
    for i=1:n
36
        
37
        %build testing sets
38
        testingY = Y(:,i);
39
        testingX = X(ist{i},:);
40
        testingY = testingY(ist{i});
41
        
42
        %plot the error
43
        subplot(subplotY,subplotX,i);
44
        %This is for if you do kfold cross-validation
45
        %L = kfoldLoss(allModels{i},'mode','average');
46
        L = loss(allModels{i},testingX,testingY,'mode','cumulative');
47
        L = 1-L;
48
        plot(L);
49
        title(label{i});
50
        grid on;
51
        xlabel('Number of iterations');
52
        ylabel('Test accuracy');
53
    end
54
55
end
56