-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSection_4_2.R
More file actions
178 lines (145 loc) · 6.29 KB
/
Section_4_2.R
File metadata and controls
178 lines (145 loc) · 6.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
# SECTION 4.2: Medical Data Classification with Structural Information
# ======================================== #
# Load the necessary packages #
# ======================================== #
library(SQUIC)
#install.packages("Matrix",repos = "http://cran.us.r-project.org")
library(Matrix)
#install.packages("devtools",repos = "http://cran.us.r-project.org")
library(devtools)
#devtools::install_github('ramhiser/datamicroarray')
library(datamicroarray)
#install.packages("caret",dependencies = TRUE, repos = "http://cran.us.r-project.org")
library(caret)
#install.packages("ggplot2",repos = "http://cran.us.r-project.org")
library(ggplot2)
# install.packages("emstreeR",repos = "http://cran.us.r-project.org")
library(emstreeR)
# ======================================== #
# ======================================== #
# Linear DA Function #
# ======================================== #
Linear_DA <- function(Data_Train, labels_train,
Data_Test, labels_test, Theta_est)
# Data_Train = input training data in the form p (dimensions) x n (samples)
# labels_train = vector with the labels of the training data
# Data_Test = input testing data in the form p (dimensions) x n (samples)
# labels_test = vector with the labels of the training data
# Theta_est = estimated precision matrix
{
# Initialize
N_train = length(labels_train);
N_test = length(labels_test);
N_classes = max(labels_train);
p = dim(Data_Train)[1];
rho_k = matrix(0, ncol = N_classes, nrow = N_test)
for(k in 1:N_classes){
# prior probability for class k
prior = length(which(labels_train == k))/N_train
# sample mean for class k
mu = rowMeans(Data_Train[,which(labels_train == k)])
mu_mat = kronecker(matrix(1,1,ncol(Data_Test)), mu);
# x - \mu
x_minus_mu = Data_Test - 0.5 * mu_mat
# Linear Discriminant function
rho = diag(t(x_minus_mu) %*% Theta_est %*% mu_mat + log(prior))
rho_k[,k] = rho
}
# find the predicted classes
predicted = c();
for(i in 1:N_test){
predicted[i]=which.max(rho_k[i,])
}
# Evaluate accuracy of labelling assignment
Results = confusionMatrix(factor(predicted),factor(labels_test))
return(list(Results = Results, predicted = predicted))
}
# datasets under consideration
datasets = c('burczynski','yeoh','shipp','alon')
# optimal scalar tuning parameters
lambdas = c(0.7, 0.8, 0.8, 0.8)
# parameters for the estimation of the matrix tuning parameter
eta = 0.1; lambda_bias = 0.95;
ACC_Bias = vector(); ACC_No_Bias = vector(); time_Bias = vector(); time_No_Bias = vector();
# Load Data (n x p)
N_data = length(datasets);
data(list=datasets, package = 'datamicroarray')
for (i in 1:N_data){
set.seed(1991)
curr_dataset = datasets[i];
Data = eval(as.symbol(curr_dataset))$x;
Data = data.matrix(Data, rownames.force = NA);
labels = as.numeric(eval(as.symbol(curr_dataset))$y);
# Split data in test & train
trainIndex = createDataPartition(labels, p = .7, list = FALSE)
trainIndex = as.numeric(trainIndex)
Data_Train = t(Data[ trainIndex,])
Data_Test = t(Data[-trainIndex,])
labels_train = labels[trainIndex]
labels_test = labels[-trainIndex]
# Normalizing data
N_train = dim(Data_Train)[2];
a = (apply(Data_Train,1,var))*(N_train-1)/N_train;
a = 1/sqrt(a);
D = Matrix::Diagonal(x=a);
iD = Matrix::Diagonal(x=1/a);
Data_Train_scale = D%*%Data_Train;
# No Graphical Bias
lambda_no_bias = lambdas[i];
res_Null = SQUIC(Y = Data_Train_scale, lambda = lambda_no_bias, M = NULL, verbose = 1)
time_No_Bias[i] = res_Null$info_time_total;
Theta_Null = D %*% res_Null$X %*% D;
# Linear Discriminant Analysis
LDA_Null = Linear_DA(Data_Train, labels_train,
Data_Test, labels_test, Theta_Null)
ACC_No_Bias[i] = LDA_Null$Results$overall[1];
# Load the MST graphical bias
# A_tree = eval(as.name(paste(as.symbol(curr_dataset), '_tree', sep="")));
p = nrow(Data_Train_scale);
mst = ComputeMST(as.matrix(Data_Train_scale));
G = sparseMatrix(dims = c(p,p), i=mst$to, j=mst$from , x=rep(1, p));
G = forceSymmetric(G,uplo="L") + Diagonal(nrow(G));
# Tree Bias
M_tree = eta*G;
res_tree = SQUIC(Y = Data_Train_scale, lambda = lambda_bias, M = M_tree, verbose = 1)
time_Bias[i] = res_tree$info_time_total;
Theta_tree = D %*% res_tree$X %*% D;
# Linear Discriminant Analysis
LDA_tree = Linear_DA(Data_Train, labels_train,
Data_Test, labels_test, Theta_tree)
ACC_Bias[i] = LDA_tree$Results$overall[1];
}
# Plot time and ACC for all datasets
times_all = cbind(time_No_Bias,time_Bias);
ACC_all = cbind(ACC_No_Bias,ACC_Bias);
rownames(times_all) <- datasets; rownames(ACC_all) <- datasets;
# Open the pdf file
pdf("Figure4a.pdf")
barplot(t(times_all), beside=TRUE, font.axis=2,
# main = "Figure 4a: Timings for the LDA experiments",
xlab="Dataset", ylab="Time (sec)",
# cex.axis=1.5, cex.names = 1.5, cex.main = 1.5, cex.lab=1.5,
col=c("chartreuse4","brown4"),
ylim=c(0,max(times_all)+20), legend=c("Scalar Tuning Parameter", "Matrix Tuning Parameter"),
args.legend = list(x = "top", inset=c(-0.0, -0.00),
bty = "n", ncol = 2)
);
# Close the pdf file
dev.off()
# Open the pdf file
pdf("Figure4b.pdf")
barplot(t(ACC_all),beside=TRUE, col=c("chartreuse4","brown4"),
# main = "Figure 4b: Accuracy for the LDA experiments",
font.axis=2, ylab="ACC", xlab="Dataset",
legend=c("Scalar Tuning Parameter", "Matrix Tuning Parameter"),
ylim=c(0.5,1), xpd = FALSE,
args.legend = list(x = "top", inset=c(-0.0, -0.00),
bty = "n", ncol = 2)
);
dev.off()
print('===================================')
print(paste('Sec 4.2: LDA with SQUIC'))
print('===================================')
LDA_Table = data.frame(datasets, lambdas,
times_all, ACC_all);
print(LDA_Table, digits = 3)