######## R program by: Andrada E. Ivanescu ######## R program for Sec 4.3. Functional predictors sampled with moderate sparsity library(SemiPar) library(mgcv) library(refund) seed.start = 500 B=1 #number of simulated datasets by = 0.1 # density grid on which functions are observed t=seq(0.1,6, by=by) # grid on which response curves are observed s=seq(0.1,5, by=by) # grid on which X1(s) predictor curves are observed r=seq(0.1,7, by=by) # grid on which X2(r) predictor curves are observed n=200 # number of curves VarE=1 VarX1=0.2^2 VarX2=0.2^2 # keep track of the seed for reproducibility seeds = matrix(seed.start + 0:(B-1), nrow=B, ncol=1) ###----------------------intercept ---------------------------------### trueBeta0<-matrix(2*exp(-1*(t-2.5)^2),ncol=1); trueBeta0<-matrix(trueBeta0,nrow=1); trueBeta0_mat<-matrix(trueBeta0,nrow=n,ncol=length(t),byrow=T) ###---------------------bivariate beta param -----------------------### trueBeta1<-matrix(0,nrow=length(s),ncol=length(t)) trueBeta2<-matrix(0,nrow=length(r),ncol=length(t)) for(s1 in 1:length(s)){ for(t1 in 1:length(t)){ trueBeta1[s1,t1]<-sin(2*s[s1]*pi/10)*cos(2*t[t1]*pi/6) }} for(t1 in 1:length(t)){ for(r1 in 1:length(r)){ trueBeta2[r1,t1]<-sqrt(r[r1]*t[t1])/4.2 }} ############################################################# ############## Initialize matrices to ####################### ####### store estimates for each simulated dataset ######## ############################################################# SNR<-matrix(nrow=B,ncol=1) timesX1<-matrix(nrow=B,ncol=1) timesX2<-matrix(nrow=B,ncol=1) m100_hatBeta0<-matrix(nrow=B, ncol=length(t)) m100_hatBeta1_sparse<-array(dim=c(B,nrow=length(s) , ncol=length(t))) m100_hatBeta2_sparse<-array(dim=c(B,nrow=length(r) , ncol=length(t))) betaHat0.mult<-matrix(nrow=B, ncol=length(t)) betaHat1.mult<-array(dim=c(B,nrow=length(s) , ncol=length(t))) betaHat2.mult<-array(dim=c(B,nrow=length(r) , ncol=length(t))) ISE_Beta0<-matrix(nrow=B, ncol=1) ISE_Beta1<-matrix(nrow=B, ncol=1) ISE_Beta2<-matrix(nrow=B, ncol=1) ISE_Beta0_pfr<-matrix(nrow=B, ncol=1) ISE_Beta1_pfr<-matrix(nrow=B, ncol=1) ISE_Beta2_pfr<-matrix(nrow=B, ncol=1) ICBeta0<-matrix(nrow=B, ncol=1) ICBeta1<-matrix(nrow=B, ncol=1) ICBeta2<-matrix(nrow=B, ncol=1) ICBeta0_pfr<-matrix(nrow=B, ncol=1) ICBeta1_pfr<-matrix(nrow=B, ncol=1) ICBeta2_pfr<-matrix(nrow=B, ncol=1) covBeta0<-matrix(0,nrow=B, ncol=length(t)) covBeta1<-array(0,dim=c(B,nrow=length(s) , ncol=length(t))) covBeta2<-array(0,dim=c(B,nrow=length(r) , ncol=length(t))) IWBeta0<-matrix(nrow=B, ncol=1) IWBeta1<-matrix(nrow=B, ncol=1) IWBeta2<-matrix(nrow=B, ncol=1) IWBeta0_pfr<-matrix(nrow=B, ncol=1) IWBeta1_pfr<-matrix(nrow=B, ncol=1) IWBeta2_pfr<-matrix(nrow=B, ncol=1) IR2<-matrix(nrow=B, ncol=1) IR2_pfr<-matrix(nrow=B, ncol=1) ###################################################################### ################# pfr function (Goldsmith et al. 2011) ############## pfr <- function(Y, covariates=NULL, funcs, kz=30, kb=30, smooth.cov=FALSE, family="gaussian", ...) { require(mgcv) kb = min(kz, kb) n = length(Y) t = seq(0, 1, length = dim(funcs)[2]) p = ifelse(is.null(covariates), 0, dim(covariates)[2]) ## get the eigen decomposition of the smoothed variance matrix if(smooth.cov){ G2 <- var(funcs) M <- length(t) diag(G2)= rep(NA, M) g2 <- as.vector(G2) ## define a N*N knots for bivariate smoothing N <- 10 ## bivariate smoothing using the gamm function t1 <- rep(t, each=M) t2 <- rep(t, M) newdata <- data.frame(t1 = t1, t2 = t2) K.0 <- matrix(predict(gamm(as.vector(g2) ~ te(t1, t2, k = N))$gam, newdata), M, M) # smooth K.0 K.0 <- (K.0 + t(K.0)) / 2 eigenDecomp <- eigen(K.0) } else { varFuncs <- var(funcs) eigenDecomp <- eigen(varFuncs) } psi = eigenDecomp$vectors[,1:kz] # set the basis to be used for beta(t) num=kb-2 qtiles <- seq(0, 1, length = num + 2)[-c(1, num + 2)] knots <- quantile(t, qtiles) phi = cbind(1, t, sapply(knots, function(k) ((t - k > 0) * (t - k)))) ## set up the design and penalty matrix C = funcs %*% eigenDecomp$vectors[ ,1:kz ] J = t(eigenDecomp$vectors[,1:kz]) %*% phi CJ = C %*% J X = cbind(1, covariates, CJ) D = diag(c(rep(0, 1+p+2), rep(1, kb-2))) ## fit the model gam = gam(Y~X-1, paraPen=list(X=list(D)), family=family, method="REML", ...) ## get the coefficient and betaHat estimates coefs = gam$coef fitted.vals <- as.matrix(X[,1:length(coefs)]) %*% coefs beta.covariates = coefs[1:(p+1)] betaHat <- phi %*% coefs[-(1:(p+1))] ## get the covariance matrix of the estimated functional coefficient varBeta=gam$Vp[-(1:(1+p)),-(1:(1+p))] varBetaHat=phi%*%varBeta%*%t(phi) ## construct upper and lower bounds for betahat Bounds = cbind(betaHat + 1.96*(sqrt(diag(varBetaHat))), betaHat - 1.96*(sqrt(diag(varBetaHat)))) ret <- list(gam, fitted.vals, betaHat, beta.covariates, X, phi, psi, varBetaHat, Bounds) names(ret) <- c("gam", "fitted.vals", "betaHat", "beta.covariates", "X", "phi", "psi", "varBetaHat", "Bounds") ret } ###################################################################### ####------------------- Start B simulations ----------------------#### ###################################################################### for(b in 1:B){ set.seed(seeds[b]) ###---------------------X1 funct pred -----------------------### ###----------------------- mean 0 ---------------------------### ###----------------- adapted from Goldsmith -----------------### funcs1.true <- matrix(0, nrow=n, ncol=length(s)) for(i2 in 1:n){ for(j2 in 1:2){ e=rnorm(2, 0, 1/j2^(2)) funcs1.true[i2,]=funcs1.true[i2,]+e[1]*cos((2*pi/10)*s*j2) funcs1.true[i2,]=funcs1.true[i2,]+e[2]*sin((2*pi/10)*s*j2) } } X1=funcs1.true ###---------------------X2 funct pred -----------------------### ###----------------------- mean 0 ---------------------------### ###-------- adapted from Brownian Bridge (BB)----------------### ###------- for BB, see Shorack (2000), p. 304 ---------------### K<-4; M<-length(r); N<-n Un<-matrix(0,nrow=K,ncol=N) for(n in 1:N){Un[,n]<-rnorm(K,mean=0,sd=1)} U<-matrix(0,nrow=K,ncol=N) for(k in 1:K){U[k,]<-(Un[k,])/(k*pi)} SIB<-matrix(0,nrow=K,ncol=M) for(j in 1:K){ SIB[j,]<-2*(sqrt(2))*sin(1*pi*j*r/7) } BB<-t(SIB)%*%U funcs2.true=t(BB) X2=funcs2.true ####################### generate outcomes ######################### L1 <- matrix(by, ncol=length(s), nrow=n) L2 <- matrix(by, ncol=length(r), nrow=n) LX1 <- L1*X1 LX2 <- L2*X2 eta <- LX1 %*% trueBeta1 + LX2 %*% trueBeta2 ################################################################################# errors=matrix(scale(rnorm(n*length(t),mean=0,sd=sqrt(VarE))), nr=n) outcomes <- trueBeta0_mat+ eta + errors SNR[b]<-sd(as.vector(trueBeta0_mat+eta))/sd(as.vector(errors)) ################## X1 and X2 are observed with noise: V1 and V2 ################ V1=funcs1.true+matrix(rnorm(length(X1), mean=0, sd=sqrt(VarX1)),nrow=n,ncol=length(s)) V2=funcs2.true+matrix(rnorm(length(X2), mean=0, sd=sqrt(VarX2)),nrow=n,ncol=length(r)) ######### R code smoothing approach used in Goldsmith et al. (2011) ######### to reconstruct the trajectories for each functional predictor ########## SPARSE NGRIDPTS=12 # number of sampled points for each function N.fit=length(s) # number of points in the grid for the estimated functions ## sparse data N=NGRIDPTS M=n J=1 tstart=0.1 tlength=5 N.smooth.single=20 N.smooth=10 unique.average <- function(index, y) { n <- sum(!is.na(y)) y.new <- rep(NA, n) index.new <- rep(NA, n) count <- rep(0, n) y.new[1] <- y[1] index.new[1] <- index[1] count[1] <- 1 current <- 1 if(n > 1) { for(i in 2:n) { if(index[i]==index[i-1]) { count[i] <- count[i-1] + 1 y.new[current] <- ( y.new[current] * count[i-1] + y[i] )/count[i] count[i-1] <- count[i] } else { current <- current + 1 y.new[current] <- y[i] count[i] <- 1 index.new[current] <- index[i] } } } return(list(index=index.new, y=y.new)) } sampled_s=t(sapply(1:n, function(u) sort(sample(1:length(s), size=NGRIDPTS )))) tx=t(sapply(1:n, function(u) s[sampled_s[u,]])) y=t(sapply(1:n, function(u) V1[u,sampled_s[u,]])) t.knot <- seq(tstart, tstart + tlength-by, length=N.smooth.single) t.fit<-s N.row <- dim(y)[1] y1 <- matrix(0, M, N) index <- matrix(0, M, N) t.fit.matrix <- matrix(rep(t.fit, each=N), nrow=N) for(m in 1:M ) { row.ind <- m index.temp <- apply( (t.fit.matrix - tx[row.ind, ])^2, 1, which.min ) fit <- unique.average(index.temp, y[row.ind,] ) y1[row.ind,] <- fit$y index[row.ind, ] <- fit$index } n.grid <- apply(!is.na(y1), 1, sum) library(SemiPar) mu.fit <- rep(0, N.fit) mu.obs <- matrix(0, M, N) ### divide the interval into "N.fit" bins to reduce computational burden for smoothing mu.sum <- rep(0, N.fit) mu.count <- rep(0, N.fit) mu.mean <- rep(0, N.fit) for (m in 1:M) { row.ind <- m row.len <- n.grid[row.ind] mu.count[ index[row.ind, 1:row.len] ] <- mu.count[ index[row.ind, 1:row.len] ] + 1 mu.sum[ index[row.ind, 1:row.len] ] <- mu.sum[ index[row.ind, 1:row.len] ] + y1[row.ind, 1:row.len ] } mu.mean <- ifelse(mu.count==0, NA, mu.sum/mu.count) temp.data <- data.frame(y2 = mu.mean, x2 = t.fit ) attach(temp.data, pos=3) fit.mu <- spm( y2~f(x2,knots=t.knot) ,omit.missing=TRUE) newx <- data.frame(x2=t.fit) mu.fit <- predict(fit.mu,newx) detach(pos=3) mu.obs <- matrix( mu.fit[ index ], ncol=N) resd <- matrix(0, nrow=M, ncol=N) resd <- y - mu.obs ### divide the interval into "N.fit" bins to reduce computational burden for smoothing cov.sum <- matrix(0, N.fit, N.fit) cov.count <- matrix(0, N.fit, N.fit) cov.mean <- matrix(0, N.fit, N.fit) for (m in 1:M) { row.ind <- m len.ind <- n.grid[row.ind] temp.ind <- index[row.ind, 1:len.ind] cov.count[ temp.ind, temp.ind ] <- cov.count[ temp.ind, temp.ind ] + 1 cov.sum[ temp.ind, temp.ind ] <- cov.sum[ temp.ind, temp.ind ] + resd[row.ind, 1:len.ind ] %*% t( resd[row.ind, 1:len.ind]) } cov.mean <- ifelse(cov.count==0, NA, cov.sum/cov.count) cov.mean.nodiag <- cov.mean diag(cov.mean.nodiag) <- rep(NA, N.fit) resd.pair <- cbind( as.vector(cov.mean.nodiag), rep(t.fit, each=N.fit), rep(t.fit, N.fit) ) resd.pair.diag <- cbind( diag(cov.mean), t.fit) G <- matrix(0, N.fit, N.fit) ### load the "SemiPar" package which contains functions for semiparametric smoothing ### using linear mixed models t.smooth <- seq(tstart, tstart + tlength-by ,length=N.smooth) myknots.t <- data.frame(x1t=rep(t.smooth, each=N.smooth), x2t=rep(t.smooth, N.smooth)) data.gt <- data.frame(gt=resd.pair[,1], x1t=resd.pair[,2], x2t=resd.pair[,3]) attach(data.gt, pos=4) fit.t <- spm(gt ~ f(x1t, x2t, knots=myknots.t),omit.missing=TRUE) newdata.t <- data.frame(x1t=rep(t.fit, each=N.fit),x2t=rep(t.fit, N.fit)) pred.t <- predict(fit.t, newdata.t) detach(pos=4) s.g <- matrix(pred.t, N.fit) G <- ( s.g + t(s.g) )/2 eigenDecomp <- eigen(G) par(mfrow=c(2,3)) for(i in 1:6) { plot(eigenDecomp$vectors[,i], type="l") } temp.data <- data.frame(var.y = as.vector(resd^2), var.x = as.vector(tx) ) attach(temp.data, pos=6) fit.var <- spm( var.y ~ f(var.x,knots=t.knot) ,omit.missing=TRUE) newx <- data.frame(var.x=t.fit) var.fit <- predict(fit.var,newx) detach(pos=6) var.noise <- mean( var.fit - diag(matrix(pred.t,N.fit)) ) var.noise ############################################### if(var.noise<0){ timesX1[b]<-var.noise var.noise=.002 } ############################################### fpca1.value <- eigenDecomp$values* tlength / N.fit fpca1.value <- ifelse(fpca1.value>=0, fpca1.value, 0) K1=min(35, N) fpca1.vectors <- eigenDecomp$vectors[, 1:K1]*sqrt(N.fit/tlength) ### The eigenfunctions are unique only up to a change of signs. ### Select the signs of eigenfunctions so that the integration over the domain ### is non-negative for(i2 in 1:K1) { v2 <- fpca1.vectors[,i2] tempsign <- sum(v2) fpca1.vectors[,i2] <- ifelse(tempsign<0, -1,1) * v2 } lam1=fpca1.value[1:K1] phi1=fpca1.vectors ### ### Estimatimate principal component scores and predict various functions ### library(MASS) index <- matrix(0, M, N) mu.hat <- matrix(0, M, N) y.center <- matrix(0, M, N) phi1.hat <- array(0, dim=c( M, N, K1) ) t.fit.matrix <- matrix(rep(t.fit, each=N), nrow=N) for(m in 1:M ) { row.ind <- m index <- apply( (t.fit.matrix - tx[row.ind, ])^2, 1, which.min ) y.center[row.ind, ] <- y[ row.ind ,] - mu.fit[ index ] phi1.hat[row.ind, , ] <- phi1[index, ] } est.si1 <- matrix(0, nrow=M, ncol=K1) est.si1.sd <- matrix(0, nrow=M, ncol=K1) y.hat.center <- matrix(0, M, N.fit) y.hat <- matrix(0, M, N.fit) y.hat.subject <- matrix(0, M, N.fit) y.hat.sd <- matrix(0, M, N.fit) y.hat.subject.sd <- matrix(0, M, N.fit) for(m in 1:M) { cov.y <- matrix(0, N, N) cov.xi.y <- matrix(0, K1, N ) ind1 <- m cov.y [ 1:N , 1:N ] <- phi1.hat[ind1,,] %*% diag( lam1) %*% t( phi1.hat[ind1,,] ) + diag( rep(var.noise, N) ) ind1 <- m cov.xi.y[1:K1, 1:N ] <- diag( lam1) %*% t( phi1.hat[ind1,,] ) temp.mat <- cov.xi.y %*% ginv(cov.y) score <- temp.mat %*% as.vector( t(y.center[m,]) ) var.score <- diag( c(lam1 ) ) - temp.mat %*% t(cov.xi.y) est.si1[m ,] <- score[1:K1] y.hat.subject[m, ] <- mu.fit + phi1 %*% est.si1[m,] y.hat.subject.sd[m,] <- sqrt( diag( phi1 %*% var.score[1:K1, 1:K1] %*% t( phi1 ) ) ) row.ind <- m y.hat.center[ row.ind ,] <- phi1 %*% est.si1[m,] y.hat[row.ind,] <- y.hat.center[ row.ind ,] + mu.fit temp1 <- cbind( phi1) temp2 <- var.score[ c(1:K1) , c(1:K1) ] y.hat.sd[row.ind, ] <- sqrt( diag( temp1 %*% temp2 %*% t(temp1) ) ) } SX1<-y.hat ############################################### # end sparse data estimation procedure ############################################### ################## sparse data plot par(mfrow=c(1,2)) ## X1 range_ylim = c(-2.5,1.5) power10 = floor(range_ylim[1]) axisx_range = seq(s[1]-0.1, s[length(s)], by = 1) axisx_label =axisx_range axisy_range = seq(from=range_ylim[1], to=range_ylim[2], length.out=4) axisy_label = round(axisy_range,1) for(j in 2:3){ plot(s,SX1[j,],type="l",col="red",ylim=c(-2.5,1.5),ylab="",main="",lwd=2,axes=F,cex.lab=1.5); axis(1,at=axisx_range ,col="grey35",labels=axisx_label,cex=1.2, font=2.5,cex.axis=1.3) axis(2,at=axisy_range ,col="grey35",labels=axisy_label,cex=1.2, font=2.5,cex.axis=1.3) lines(tx[j,],y[j,],type="p",col="red",pch=21, cex=1); lines(s,X1[j,],type="l",col="black",lty=3,lwd=2) } ############################################### NGRIDPTS=25 # number of sampled points for each function N.fit=length(r) # number of points in the grid for the estimated functions ## sparse data N=NGRIDPTS M=n J=1 tstart=0.1 tlength=7 N.smooth.single=20 N.smooth=20 unique.average <- function(index, y) { n <- sum(!is.na(y)) y.new <- rep(NA, n) index.new <- rep(NA, n) count <- rep(0, n) y.new[1] <- y[1] index.new[1] <- index[1] count[1] <- 1 current <- 1 if(n > 1) { for(i in 2:n) { if(index[i]==index[i-1]) { count[i] <- count[i-1] + 1 y.new[current] <- ( y.new[current] * count[i-1] + y[i] )/count[i] count[i-1] <- count[i] } else { current <- current + 1 y.new[current] <- y[i] count[i] <- 1 index.new[current] <- index[i] } } } return(list(index=index.new, y=y.new)) } sampled_r=t(sapply(1:n, function(u) sort(sample(1:length(r), size=NGRIDPTS )))) tx=t(sapply(1:n, function(u) r[sampled_r[u,]])) y=t(sapply(1:n, function(u) V2[u,sampled_r[u,]])) t.knot <- seq(tstart, tstart + tlength-by, length=N.smooth.single) t.fit<-r N.row <- dim(y)[1] y1 <- matrix(0, M, N) index <- matrix(0, M, N) t.fit.matrix <- matrix(rep(t.fit, each=N), nrow=N) for(m in 1:M ) { row.ind <- m index.temp <- apply( (t.fit.matrix - tx[row.ind, ])^2, 1, which.min ) fit <- unique.average(index.temp, y[row.ind,] ) y1[row.ind,] <- fit$y index[row.ind, ] <- fit$index } n.grid <- apply(!is.na(y1), 1, sum) library(SemiPar) mu.fit <- rep(0, N.fit) mu.obs <- matrix(0, M, N) ### divide the interval into "N.fit" bins to reduce computational burden for smoothing mu.sum <- rep(0, N.fit) mu.count <- rep(0, N.fit) mu.mean <- rep(0, N.fit) for (m in 1:M) { row.ind <- m row.len <- n.grid[row.ind] mu.count[ index[row.ind, 1:row.len] ] <- mu.count[ index[row.ind, 1:row.len] ] + 1 mu.sum[ index[row.ind, 1:row.len] ] <- mu.sum[ index[row.ind, 1:row.len] ] + y1[row.ind, 1:row.len ] } mu.mean <- ifelse(mu.count==0, NA, mu.sum/mu.count) temp.data <- data.frame(y2 = mu.mean, x2 = t.fit ) attach(temp.data, pos=3) fit.mu <- spm( y2~f(x2,knots=t.knot) ,omit.missing=TRUE) newx <- data.frame(x2=t.fit) mu.fit <- predict(fit.mu,newx) detach(pos=3) mu.obs <- matrix( mu.fit[ index ], ncol=N) resd <- matrix(0, nrow=M, ncol=N) resd <- y - mu.obs ### divide the interval into "N.fit" bins to reduce computational burden for smoothing cov.sum <- matrix(0, N.fit, N.fit) cov.count <- matrix(0, N.fit, N.fit) cov.mean <- matrix(0, N.fit, N.fit) for (m in 1:M) { row.ind <- m len.ind <- n.grid[row.ind] temp.ind <- index[row.ind, 1:len.ind] cov.count[ temp.ind, temp.ind ] <- cov.count[ temp.ind, temp.ind ] + 1 cov.sum[ temp.ind, temp.ind ] <- cov.sum[ temp.ind, temp.ind ] + resd[row.ind, 1:len.ind ] %*% t( resd[row.ind, 1:len.ind]) } cov.mean <- ifelse(cov.count==0, NA, cov.sum/cov.count) cov.mean.nodiag <- cov.mean diag(cov.mean.nodiag) <- rep(NA, N.fit) resd.pair <- cbind( as.vector(cov.mean.nodiag), rep(t.fit, each=N.fit), rep(t.fit, N.fit) ) resd.pair.diag <- cbind( diag(cov.mean), t.fit) G <- matrix(0, N.fit, N.fit) ### load the "SemiPar" package which contains functions for semiparametric smoothing ### using linear mixed models t.smooth <- seq(tstart, tstart + tlength-by ,length=N.smooth) myknots.t <- data.frame(x1t=rep(t.smooth, each=N.smooth), x2t=rep(t.smooth, N.smooth)) data.gt <- data.frame(gt=resd.pair[,1], x1t=resd.pair[,2], x2t=resd.pair[,3]) attach(data.gt, pos=4) fit.t <- spm(gt ~ f(x1t, x2t, knots=myknots.t),omit.missing=TRUE) newdata.t <- data.frame(x1t=rep(t.fit, each=N.fit),x2t=rep(t.fit, N.fit)) pred.t <- predict(fit.t, newdata.t) detach(pos=4) s.g <- matrix(pred.t, N.fit) G <- ( s.g + t(s.g) )/2 eigenDecomp <- eigen(G) par(mfrow=c(2,3)) for(i in 1:6) { plot(eigenDecomp$vectors[,i], type="l") } temp.data <- data.frame(var.y = as.vector(resd^2), var.x = as.vector(tx) ) attach(temp.data, pos=6) fit.var <- spm( var.y ~ f(var.x,knots=t.knot) ,omit.missing=TRUE) newx <- data.frame(var.x=t.fit) var.fit <- predict(fit.var,newx) detach(pos=6) var.noise <- mean( var.fit - diag(matrix(pred.t,N.fit)) ) ############################################### if(var.noise<0){ timesX2[b]<-var.noise var.noise=.002 } ############################################### fpca1.value <- eigenDecomp$values* tlength / N.fit fpca1.value <- ifelse(fpca1.value>=0, fpca1.value, 0) K1=min(35, N) fpca1.vectors <- eigenDecomp$vectors[, 1:K1]*sqrt(N.fit/tlength) ### The eigenfunctions are unique only up to a change of signs. ### Select the signs of eigenfunctions so that the integration over the domain ### is non-negative for(i2 in 1:K1) { v2 <- fpca1.vectors[,i2] tempsign <- sum(v2) fpca1.vectors[,i2] <- ifelse(tempsign<0, -1,1) * v2 } lam1=fpca1.value[1:K1] phi1=fpca1.vectors ### ### Estimatimate principal component scores and predict various functions ### library(MASS) index <- matrix(0, M, N) mu.hat <- matrix(0, M, N) y.center <- matrix(0, M, N) phi1.hat <- array(0, dim=c( M, N, K1) ) t.fit.matrix <- matrix(rep(t.fit, each=N), nrow=N) for(m in 1:M ) { row.ind <- m index <- apply( (t.fit.matrix - tx[row.ind, ])^2, 1, which.min ) y.center[row.ind, ] <- y[ row.ind ,] - mu.fit[ index ] phi1.hat[row.ind, , ] <- phi1[index, ] } est.si1 <- matrix(0, nrow=M, ncol=K1) est.si1.sd <- matrix(0, nrow=M, ncol=K1) y.hat.center <- matrix(0, M, N.fit) y.hat <- matrix(0, M, N.fit) y.hat.subject <- matrix(0, M, N.fit) y.hat.sd <- matrix(0, M, N.fit) y.hat.subject.sd <- matrix(0, M, N.fit) for(m in 1:M) { cov.y <- matrix(0, N, N) cov.xi.y <- matrix(0, K1, N ) ind1 <- m cov.y [ 1:N , 1:N ] <- phi1.hat[ind1,,] %*% diag( lam1) %*% t( phi1.hat[ind1,,] ) + diag( rep(var.noise, N) ) ind1 <- m cov.xi.y[1:K1, 1:N ] <- diag( lam1) %*% t( phi1.hat[ind1,,] ) temp.mat <- cov.xi.y %*% ginv(cov.y) score <- temp.mat %*% as.vector( t(y.center[m,]) ) var.score <- diag( c(lam1 ) ) - temp.mat %*% t(cov.xi.y) est.si1[m ,] <- score[1:K1] y.hat.subject[m, ] <- mu.fit + phi1 %*% est.si1[m,] y.hat.subject.sd[m,] <- sqrt( diag( phi1 %*% var.score[1:K1, 1:K1] %*% t( phi1 ) ) ) row.ind <- m y.hat.center[ row.ind ,] <- phi1 %*% est.si1[m,] y.hat[row.ind,] <- y.hat.center[ row.ind ,] + mu.fit temp1 <- cbind( phi1) temp2 <- var.score[ c(1:K1) , c(1:K1) ] y.hat.sd[row.ind, ] <- sqrt( diag( temp1 %*% temp2 %*% t(temp1) ) ) } SX2<-y.hat ############################################### # end sparse data estimation procedure ############################################### ################## plot par(mfrow=c(1,2)) ## X2 range_ylim = c(-2.5,1.5) power10 = floor(range_ylim[1]) axisx_range = seq(r[1]-0.1, r[length(r)], by = 1) axisx_label =axisx_range axisy_range = seq(from=range_ylim[1], to=range_ylim[2], length.out=5) axisy_label = round(axisy_range,1) for(j in 2:3){ plot(r,SX2[j,],type="l",col="red",ylim=c(-2.5,1.5),ylab="",main="",lwd=2,axes=F,cex.lab=1.5); axis(1,at=axisx_range ,col="grey35",labels=axisx_label, cex=1.2, font=2.5,cex.axis=1.3) axis(2,at=axisy_range ,col="grey35",labels=axisy_label,cex=1.2, font=2.5,cex.axis=1.3) lines(tx[j,],y[j,],type="p",col="red",pch=21, cex=1); lines(r,X2[j,],type="l",col="black",lty=3,lwd=2) } ############################################################################## ############################################################# ######### PLOTS OF Y, X_i1, X_i2 ############## ############################################################# clrs <- col2rgb("gray") clrs <- rgb(t(clrs)/255, alpha=.25) par(mfrow=c(1,3)) plot(t,outcomes[1,],type="l",ylim=range(outcomes),ylab="",main=expression(Y(t)),col=clrs) for(i in 1:n){ lines(t,outcomes[i,],col=clrs) } for (i in 1:3){lines(t, outcomes[i,], col=i+1, lwd=1) } plot(s,X1[1,],type="l",ylab="",ylim=range(X1),main=expression(X[i1](s)),col=clrs) for(i in 1:n){ lines(s,X1[i,],col=clrs) } for (i in 1:3){lines(s, X1[i,], col=i+1, lwd=1);lines(s, V1[i,], col=i+1, lwd=1, lty=2) } plot(r,X2[1,],type="l",ylab="",ylim=range(X2),main=expression(X[i2](r)),col=clrs) for(i in 1:n){ lines(r,X2[i,],col=clrs) } for (i in 1:3){lines(r, X2[i,], col=i+1, lwd=1); lines(r, V2[i,], col=i+1, lwd=1, lty=2) } ################################################################ ## pffr ## ################################################################ yvec <- as.vector(t(outcomes)) tngrid=length(t) sngrid=length(s) rngrid=length(r) ### fit the model ### tmat1 <- matrix(t, nrow=n*tngrid, nc=sngrid,byrow=FALSE) tmat2 <- matrix(t, nrow=n*tngrid, nc=rngrid,byrow=FALSE) smat <- matrix(s, nrow=n*tngrid, nc=sngrid, byrow=TRUE) rmat <- matrix(r, nrow=n*tngrid, nc=rngrid, byrow=TRUE) LSX1 <- L1*SX1 LSX2 <- L2*SX2 Lmat1 <- LSX1[rep(1:n,each=tngrid),] Lmat2 <- LSX2[rep(1:n,each=tngrid),] tvec <- matrix(t, nrow=n*tngrid, nc=1,byrow=FALSE) m100 <- bam(yvec ~s(tvec,bs="ps",k=12)+te(tmat1,smat,by=Lmat1, bs="ps")+te(tmat2,rmat,by=Lmat2, bs="ps",k=7),method="REML") ################################################################ ## for METHOD 1 pffr ## ################################################################ ###### obtain INTERCEPT term0 <- m100$smooth[[1]] x=t predDat <- data.frame(x) names(predDat) <- c(term0$term) PX0 <- PredictMat(term0, dat=predDat) m100_hatBeta0_func<- matrix(PX0%*%m100$coefficients[term0$first.para:term0$last.para], ncol=length(t)) m100_hatBeta0[b,]<-m100_hatBeta0_func+m100$coefficients[1] ###### obtain BETA1 term1 <- m100$smooth[[2]] x=rep(t, rep(length(s), length(t))) predDat <- data.frame(x=rep(t, rep(length(s), length(t))), y=rep(s, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term1$term, term1$by) PX <- PredictMat(term1, dat=predDat) m100_hatBeta1_sparse[b,,] <- matrix(PX%*%m100$coefficients[term1$first.para:term1$last.para], nrow=length(s), ncol=length(t)) ####################################################################### ######################### Pointwise CI for trueBeta0(t) ################## ####################################################################### Sigma_Beta0<-m100$Vp[term0$first.para:term0$last.para,term0$first.para:term0$last.para] PX0<-as.matrix(PX0) varBeta0<-matrix(0,nrow=length(t),ncol=1) lBoundBeta0<-matrix(0,nrow=length(t),ncol=1) uBoundBeta0<-matrix(0,nrow=length(t),ncol=1) for(i in 1:(length(t))){ varBeta0[i]<-t(PX0[i,])%*%Sigma_Beta0%*%PX0[i,] lBoundBeta0[i] <- as.vector(m100_hatBeta0[b,])[i] - 1.96*sqrt(varBeta0[i]) uBoundBeta0[i] <- as.vector(m100_hatBeta0[b,])[i] + 1.96*sqrt(varBeta0[i]) } ####################################################################### ######################### Pointwise CI for beta1(s,t) ################ ####################################################################### Sigma_Beta1<-m100$Vp[term1$first.para:term1$last.para,term1$first.para:term1$last.para] PX<-as.matrix(PX) varBeta1<-matrix(0,nrow=length(s)*length(t),ncol=1) lBoundBeta1<-matrix(0,nrow=length(s)*length(t),ncol=1) uBoundBeta1<-matrix(0,nrow=length(s)*length(t),ncol=1) for(i in 1:(length(s)*length(t))){ varBeta1[i]<-t(PX[i,])%*%Sigma_Beta1%*%PX[i,] lBoundBeta1[i] <- as.vector(m100_hatBeta1_sparse[b,,])[i] - 1.96*sqrt(varBeta1[i]) uBoundBeta1[i] <- as.vector(m100_hatBeta1_sparse[b,,])[i] + 1.96*sqrt(varBeta1[i]) } varBeta1<-matrix(varBeta1,nrow=length(s),ncol=length(t)) lBoundBeta1<-matrix(lBoundBeta1,nrow=length(s),ncol=length(t)) uBoundBeta1<-matrix(uBoundBeta1,nrow=length(s),ncol=length(t)) #########---------------- beta2(s,t) ---------------############ term2 <- m100$smooth[[3]] x=rep(t, rep(length(r), length(t))) predDat <- data.frame(x=rep(t, rep(length(r), length(t))), y=rep(r, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term2$term, term2$by) PX <- PredictMat(term2, dat=predDat) m100_hatBeta2_sparse[b,,] <- matrix(PX%*%m100$coefficients[term2$first.para:term2$last.para], nrow=length(r), ncol=length(t)) ####################################################################### ######################### Pointwise CI for beta2(s,t) ################ ####################################################################### Sigma_Beta2<-m100$Vp[term2$first.para:term2$last.para,term2$first.para:term2$last.para] PX<-as.matrix(PX) varBeta2<-matrix(0,nrow=length(r)*length(t),ncol=1) lBoundBeta2<-matrix(0,nrow=length(r)*length(t),ncol=1) uBoundBeta2<-matrix(0,nrow=length(r)*length(t),ncol=1) for(i in 1:(length(r)*length(t))){ varBeta2[i]<-t(PX[i,])%*%Sigma_Beta2%*%PX[i,] lBoundBeta2[i] <- as.vector(m100_hatBeta2_sparse[b,,])[i] - 1.96*sqrt(varBeta2[i]) uBoundBeta2[i] <- as.vector(m100_hatBeta2_sparse[b,,])[i] + 1.96*sqrt(varBeta2[i]) } varBeta2<-matrix(varBeta2,nrow=length(r),ncol=length(t)) lBoundBeta2<-matrix(lBoundBeta2,nrow=length(r),ncol=length(t)) uBoundBeta2<-matrix(uBoundBeta2,nrow=length(r),ncol=length(t)) ####################### Coverage and Width ######################## ##------------------------- trueBeta0(t) -------------------------## for(j in 1:(length(t))){ if(lBoundBeta0[j]<=trueBeta0[j]) if(uBoundBeta0[j]>=trueBeta0[j]) covBeta0[b,j]<-1 } ICBeta0[b]<-mean(covBeta0[b,]) WI0<-matrix(0,nrow=1,ncol=length(t)) for(j in 1:(length(t))){ WI0[j]<-uBoundBeta0[j]-lBoundBeta0[j] } IWBeta0[b]<-mean(WI0) #mean cov over all s and t ##------------------------- beta1(s,t)------------------------## for(i in 1:(length(s))){ for(j in 1:(length(t))){ if(lBoundBeta1[i,j]<=trueBeta1[i,j]) if(uBoundBeta1[i,j]>=trueBeta1[i,j]) covBeta1[b,i,j]<-1 } } ICBeta1[b]<-mean(covBeta1[b,,]) #mean cov over all s and t WI1<-matrix(0,nrow=length(s),ncol=length(t)) #cov at each s and t for(i in 1:(length(s))){ for(j in 1:(length(t))){ WI1[i,j]<-uBoundBeta1[i,j]-lBoundBeta1[i,j] } } IWBeta1[b]<-mean(WI1) #mean cov over all s and t ##------------------------- beta2(s,t)------------------------## for(i in 1:(length(r))){ for(j in 1:(length(t))){ if(lBoundBeta2[i,j]<=trueBeta2[i,j]) if(uBoundBeta2[i,j]>=trueBeta2[i,j]) covBeta2[b,i,j]<-1 } } ICBeta2[b]<-mean(covBeta2[b,,]) #mean cov over all s and t WI2<-matrix(0,nrow=length(r),ncol=length(t)) #cov at each s and t for(i in 1:(length(r))){ for(j in 1:(length(t))){ WI2[i,j]<-uBoundBeta2[i,j]-lBoundBeta2[i,j] } } IWBeta2[b]<-mean(WI2) #mean cov over all s and t ############################################################################### ############################################################################### ############################################################################### ################# pfr approach Goldsmith et al. (2011) ######################## ############################################################################### ############################################################################### ############################################################################### betaHat.MULT_Beta0<-matrix(nrow=1, ncol=length(t)) betaHat.MULT_Beta0_lb<-matrix(nrow=1, ncol=length(t)) betaHat.MULT_Beta0_ub<-matrix(nrow=1, ncol=length(t)) betaHat.MULT<-matrix(nrow=length(s) +length(r), ncol=length(t)) betaHat.MULT_lb<-matrix(nrow=length(s) +length(r), ncol=length(t)) betaHat.MULT_ub<-matrix(nrow=length(s) +length(r), ncol=length(t)) fitted_pfr<-matrix(nrow=n,ncol=length(t)) for(i in 1:(length(t))){ fit_pfr<- pfr(Y=outcomes[,i], funcs=cbind(LSX1,LSX2), kz=30, kb=30) betaHat.MULT_Beta0[i]<-fit_pfr$beta.covariates betaHat.MULT[,i] <- fit_pfr$betaHat betaHat.MULT_ub[,i] <- fit_pfr$Bound[,1] betaHat.MULT_lb[,i] <- fit_pfr$Bound[,2] fitted_pfr[,i]<-fit_pfr$fitted.vals # save the gam fit in order to get access to Vp needed for CI for beta0 gam_pfr<-fit_pfr$gam betaHat.MULT_Beta0_lb[i]<-fit_pfr$beta.covariates[1]-1.96*sqrt(gam_pfr$Vp[1,1]) betaHat.MULT_Beta0_ub[i]<-fit_pfr$beta.covariates[1]+1.96*sqrt(gam_pfr$Vp[1,1]) } #end loop over t ### smooth Beta0 MULT_Beta0<-gam(as.vector(betaHat.MULT_Beta0)~s(t),method="REML") term0 <- MULT_Beta0$smooth[[1]] x=t predDat <- data.frame(x) names(predDat) <- c(term0$term) PX0 <- PredictMat(term0, dat=predDat) MULT_Beta0_hatBeta0<- matrix(PX0%*%MULT_Beta0$coefficients[term0$first.para:term0$last.para], ncol=length(t)) betaHat0.mult[b,]<-MULT_Beta0_hatBeta0+MULT_Beta0$coefficients[1] ### smooth lb for Beta0 MULT_Beta0<-gam(as.vector(betaHat.MULT_Beta0_lb)~s(t),method="REML") term0 <- MULT_Beta0$smooth[[1]] x=t predDat <- data.frame(x) names(predDat) <- c(term0$term) PX0 <- PredictMat(term0, dat=predDat) MULT_Beta0_hatBeta0_lb<- matrix(PX0%*%MULT_Beta0$coefficients[term0$first.para:term0$last.para], ncol=length(t)) betaHat0.mult_lb<-MULT_Beta0_hatBeta0_lb+MULT_Beta0$coefficients[1] ### smooth ub for Beta0 MULT_Beta0<-gam(as.vector(betaHat.MULT_Beta0_ub)~s(t),method="REML") term0 <- MULT_Beta0$smooth[[1]] x=t predDat <- data.frame(x) names(predDat) <- c(term0$term) PX0 <- PredictMat(term0, dat=predDat) MULT_Beta0_hatBeta0_ub<- matrix(PX0%*%MULT_Beta0$coefficients[term0$first.para:term0$last.para], ncol=length(t)) betaHat0.mult_ub<-MULT_Beta0_hatBeta0_ub+MULT_Beta0$coefficients[1] #### Beta1 long_s<-rep(s,length(t)) t_mat<-matrix(0,nrow=length(t),ncol=length(s)) for(i in 1:(length(t))){ t_mat[i,]<-rep(t[i],length(s)) } long_t<-as.vector(t(t_mat)) MULT_te1<-gam(as.vector(betaHat.MULT[(1:length(s)),])~te(long_t,long_s),method="REML") term_pfr_beta1 <- MULT_te1$smooth[[1]] x=rep(t, rep(length(s), length(t))) predDat <- data.frame(x=rep(t, rep(length(s), length(t))), y=rep(s, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta1$term, term_pfr_beta1$by) PX_pfr_beta1 <- PredictMat(term_pfr_beta1, dat=predDat) betaHat1.mult[b,,] <- matrix(PX_pfr_beta1%*%MULT_te1$coefficients[term_pfr_beta1$first.para:term_pfr_beta1$last.para]+MULT_te1$coefficients[1], nrow=length(s), ncol=length(t)) ### Beta1 lower bound MULT_te1<-gam(as.vector(betaHat.MULT_lb[(1:length(s)),])~te(long_t,long_s),method="REML") term_pfr_beta1 <- MULT_te1$smooth[[1]] x=rep(t, rep(length(s), length(t))) predDat <- data.frame(x=rep(t, rep(length(s), length(t))), y=rep(s, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta1$term, term_pfr_beta1$by) PX_pfr_beta1 <- PredictMat(term_pfr_beta1, dat=predDat) betaHat1.mult_lb <- matrix(PX_pfr_beta1%*%MULT_te1$coefficients[term_pfr_beta1$first.para:term_pfr_beta1$last.para]+MULT_te1$coefficients[1], nrow=length(s), ncol=length(t)) ### Beta1 upper bound MULT_te1<-gam(as.vector(betaHat.MULT_ub[(1:length(s)),])~te(long_t,long_s),method="REML") term_pfr_beta1 <- MULT_te1$smooth[[1]] x=rep(t, rep(length(s), length(t))) predDat <- data.frame(x=rep(t, rep(length(s), length(t))), y=rep(s, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta1$term, term_pfr_beta1$by) PX_pfr_beta1 <- PredictMat(term_pfr_beta1, dat=predDat) betaHat1.mult_ub <- matrix(PX_pfr_beta1%*%MULT_te1$coefficients[term_pfr_beta1$first.para:term_pfr_beta1$last.para]+MULT_te1$coefficients[1], nrow=length(s), ncol=length(t)) #### Beta2 long_r<-rep(r,length(t)) t_mat_r<-matrix(0,nrow=length(t),ncol=length(r)) for(i in 1:(length(t))){ t_mat_r[i,]<-rep(t[i],length(r)) } long_t_r<-as.vector(t(t_mat_r)) MULT_te2<-gam(as.vector(betaHat.MULT[(length(s)+1):(length(s) +length(r)),])~te(long_t_r,long_r),method="REML") term_pfr_beta2 <- MULT_te2$smooth[[1]] x=rep(t, rep(length(r), length(t))) predDat <- data.frame(x=rep(t, rep(length(r), length(t))), y=rep(r, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta2$term, term_pfr_beta2$by) PX_pfr_beta2 <- PredictMat(term_pfr_beta2, dat=predDat) betaHat2.mult[b,,] <- matrix(PX_pfr_beta2%*%MULT_te2$coefficients[term_pfr_beta2$first.para:term_pfr_beta2$last.para]+MULT_te2$coefficients[1], nrow=length(r), ncol=length(t)) ### Beta2 lower bound MULT_te2<-gam(as.vector(betaHat.MULT_lb[(length(s)+1):(length(s) +length(r)),])~te(long_t_r,long_r),method="REML") term_pfr_beta2 <- MULT_te2$smooth[[1]] x=rep(t, rep(length(r), length(t))) predDat <- data.frame(x=rep(t, rep(length(r), length(t))), y=rep(r, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta2$term, term_pfr_beta2$by) PX_pfr_beta2 <- PredictMat(term_pfr_beta2, dat=predDat) betaHat2.mult_lb <- matrix(PX_pfr_beta2%*%MULT_te2$coefficients[term_pfr_beta2$first.para:term_pfr_beta2$last.para]+MULT_te2$coefficients[1], nrow=length(r), ncol=length(t)) ### Beta 2 upper bound MULT_te2<-gam(as.vector(betaHat.MULT_ub[(length(s)+1):(length(s) +length(r)),])~te(long_t_r,long_r),method="REML") term_pfr_beta2 <- MULT_te2$smooth[[1]] x=rep(t, rep(length(r), length(t))) predDat <- data.frame(x=rep(t, rep(length(r), length(t))), y=rep(r, length(t)), by=rep(1, length(t)*length(x))) names(predDat) <- c(term_pfr_beta2$term, term_pfr_beta2$by) PX_pfr_beta2 <- PredictMat(term_pfr_beta2, dat=predDat) betaHat2.mult_ub <- matrix(PX_pfr_beta2%*%MULT_te2$coefficients[term_pfr_beta2$first.para:term_pfr_beta2$last.para]+MULT_te2$coefficients[1], nrow=length(r), ncol=length(t)) ##------------------------- Beta0(t) -------------------------## covBeta0_pfr<-matrix(0,nrow=1,ncol=length(t)) for(j in 1:(length(t))){ if(betaHat0.mult_lb[j]<=trueBeta0[j]) if(betaHat0.mult_ub[j]>=trueBeta0[j]) covBeta0_pfr[,j]<-1 } ICBeta0_pfr[b]<-mean(covBeta0_pfr) WI0_pfr<-matrix(0,nrow=1,ncol=length(t)) for(j in 1:(length(t))){ WI0_pfr[j]<-betaHat0.mult_ub[j]-betaHat0.mult_lb[j] } IWBeta0_pfr[b]<-mean(WI0_pfr) #mean cov over all s and t ##------------------------- beta1(s,t)------------------------## covBeta1_pfr<-matrix(0,nrow=length(s),ncol=length(t)) covBeta2_pfr<-matrix(0,nrow=length(r),ncol=length(t)) for(i in 1:(length(s))){ for(j in 1:(length(t))){ if(betaHat1.mult_lb[i,j]<=trueBeta1[i,j]) if(betaHat1.mult_ub[i,j]>=trueBeta1[i,j]) covBeta1_pfr[i,j]<-1 } } ICBeta1_pfr[b]<-mean(covBeta1_pfr) #mean cov over all s and t WI1_pfr<-matrix(0,nrow=length(s),ncol=length(t)) #cov at each s and t for(i in 1:(length(s))){ for(j in 1:(length(t))){ WI1_pfr[i,j]<-betaHat1.mult_ub[i,j]-betaHat1.mult_lb[i,j] } } IWBeta1_pfr[b]<-mean(WI1_pfr) #mean cov over all s and t ##------------------------- beta2(s,t)------------------------## for(i in 1:(length(r))){ for(j in 1:(length(t))){ if(betaHat2.mult_lb[i,j]<=trueBeta2[i,j]) if(betaHat2.mult_ub[i,j]>=trueBeta2[i,j]) covBeta2_pfr[i,j]<-1 } } ICBeta2_pfr[b]<-mean(covBeta2_pfr) #mean cov over all s and t WI2_pfr<-matrix(0,nrow=length(r),ncol=length(t)) #cov at each s and t for(i in 1:(length(r))){ for(j in 1:(length(t))){ WI2_pfr[i,j]<-betaHat2.mult_ub[i,j]-betaHat2.mult_lb[i,j] } } IWBeta2_pfr[b]<-mean(WI2_pfr) #mean cov over all s and t ######################### EMSE, BIAS2, VAR #################################### ISE_Beta0[b]<-mean((m100_hatBeta0[b,]-trueBeta0)^2) ISE_Beta1[b]<-mean((m100_hatBeta1_sparse[b,,]-trueBeta1)^2) ISE_Beta2[b]<-mean((m100_hatBeta2_sparse[b,,]-trueBeta2)^2) ISE_Beta0_pfr[b]<-mean((betaHat0.mult[b,]-trueBeta0)^2) ISE_Beta1_pfr[b]<-mean((betaHat1.mult[b,,]-trueBeta1)^2) ISE_Beta2_pfr[b]<-mean((betaHat2.mult[b,,]-trueBeta2)^2) ############################ R2 for pffr ###################################### meanoutcomes<-colSums(outcomes)/n intvec<-rep(meanoutcomes,n) yvec <- as.vector(t(outcomes)) IR2[b]<-1-crossprod(fitted(m100)-yvec)/crossprod(yvec-intvec) ############################ R2 for pfr ###################################### pfrBeta0_mat<-matrix(betaHat0.mult[b,],nrow=n,ncol=length(t),byrow=T) all_pfr <- pfrBeta0_mat + LSX1 %*% betaHat1.mult[b,,] + LSX2 %*% betaHat2.mult[b,,] pfr_fitted_outcomes <- all_pfr yfitted_pfr <- as.vector(t(pfr_fitted_outcomes)) IR2_pfr[b]<-1-crossprod(yfitted_pfr-yvec)/crossprod(yvec-intvec) ################################################################################# ################################################################################# ################################################################################# ################################################################################# } #end B simulations# ################################################################################# ################################################################################# ################################################################################# ################################################################################# MIR2<-mean(IR2) MIR2_pfr<-mean(IR2_pfr) ########################## Beta 0 ################################ MISE_Beta0<-mean(ISE_Beta0) # E over B simulations ##### ---------------------- VAR --------------------------####### E_m100_hatBeta0<-matrix(0,nrow=1,ncol=length(t)) # E over B sims for(j in 1:(length(t))){ E_m100_hatBeta0[1,j]<-mean(m100_hatBeta0[,j]) } IVAR_Beta0<-matrix(nrow=B, ncol=length(t)) for(b in 1:B){ for(j in 1:(length(t))){ IVAR_Beta0[b,j]<-(m100_hatBeta0[b,j]-E_m100_hatBeta0[1,j])^2 } } MIVAR_Beta0<-mean(IVAR_Beta0) #mean over t and B ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta0<-mean((E_m100_hatBeta0-trueBeta0)^2) ########################## Beta 1 ################################ MISE_Beta1<-mean(ISE_Beta1) ##### ---------------------- VAR --------------------------####### E_m100_hatBeta1_sparse<-matrix(0,nrow=length(s),ncol=length(t)) for(i in 1:(length(s))){ for(j in 1:(length(t))){ E_m100_hatBeta1_sparse[i,j]<-mean(m100_hatBeta1_sparse[,i,j]) } } IVAR_Beta1<-array(0, dim=c(B,nrow=length(s), ncol=length(t))) for(b in 1:B){ for(i in 1:(length(s))){ for(j in 1:(length(t))){ IVAR_Beta1[b,i,j]<-(m100_hatBeta1_sparse[b,i,j]-E_m100_hatBeta1_sparse[i,j])^2 } } } MIVAR_Beta1<-mean(IVAR_Beta1) ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta1<-mean((E_m100_hatBeta1_sparse-trueBeta1)^2) ########################## Beta 2 ################################ MISE_Beta2<-mean(ISE_Beta2) ##### ---------------------- VAR --------------------------####### E_m100_hatBeta2_sparse<-matrix(0,nrow=length(r),ncol=length(t)) for(i in 1:(length(r))){ for(j in 1:(length(t))){ E_m100_hatBeta2_sparse[i,j]<-mean(m100_hatBeta2_sparse[,i,j]) } } IVAR_Beta2<-array(0, dim=c(B,nrow=length(r), ncol=length(t))) for(b in 1:B){ for(i in 1:(length(r))){ for(j in 1:(length(t))){ IVAR_Beta2[b,i,j]<-(m100_hatBeta2_sparse[b,i,j]-E_m100_hatBeta2_sparse[i,j])^2 } } } MIVAR_Beta2<-mean(IVAR_Beta2) ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta2<-mean((E_m100_hatBeta2_sparse-trueBeta2)^2) ########################## Beta 0 pfr ################################ MISE_Beta0_pfr<-mean(ISE_Beta0_pfr) # E over B simulations ##### ---------------------- VAR --------------------------####### E_pfr_hatBeta0<-matrix(0,nrow=1,ncol=length(t)) # E over B sims for(j in 1:(length(t))){ E_pfr_hatBeta0[1,j]<-mean(betaHat0.mult[,j]) } IVAR_Beta0_pfr<-matrix(nrow=B, ncol=length(t)) for(b in 1:B){ for(j in 1:(length(t))){ IVAR_Beta0_pfr[b,j]<-(betaHat0.mult[b,j]-E_pfr_hatBeta0[1,j])^2 } } MIVAR_Beta0_pfr<-mean(IVAR_Beta0_pfr) #mean over t and B ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta0_pfr<-mean((E_pfr_hatBeta0-trueBeta0)^2) ########################## Beta 1 pfr ################################ MISE_Beta1_pfr<-mean(ISE_Beta1_pfr) ##### ---------------------- VAR --------------------------####### E_pfr_hatBeta1<-matrix(0,nrow=length(s),ncol=length(t)) for(i in 1:(length(s))){ for(j in 1:(length(t))){ E_pfr_hatBeta1[i,j]<-mean(betaHat1.mult[,i,j]) } } IVAR_Beta1_pfr<-array(0, dim=c(B,nrow=length(s), ncol=length(t))) for(b in 1:B){ for(i in 1:(length(s))){ for(j in 1:(length(t))){ IVAR_Beta1_pfr[b,i,j]<-(betaHat1.mult[b,i,j]-E_pfr_hatBeta1[i,j])^2 } } } MIVAR_Beta1_pfr<-mean(IVAR_Beta1_pfr) ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta1_pfr<-mean((E_pfr_hatBeta1-trueBeta1)^2) ########################## Beta 2 pfr ################################ MISE_Beta2_pfr<-mean(ISE_Beta2_pfr) ##### ---------------------- VAR --------------------------####### E_pfr_hatBeta2<-matrix(0,nrow=length(r),ncol=length(t)) for(i in 1:(length(r))){ for(j in 1:(length(t))){ E_pfr_hatBeta2[i,j]<-mean(betaHat2.mult[,i,j]) } } IVAR_Beta2_pfr<-array(0, dim=c(B,nrow=length(r), ncol=length(t))) for(b in 1:B){ for(i in 1:(length(r))){ for(j in 1:(length(t))){ IVAR_Beta2_pfr[b,i,j]<-(betaHat2.mult[b,i,j]-E_pfr_hatBeta2[i,j])^2 } } } MIVAR_Beta2_pfr<-mean(IVAR_Beta2_pfr) ##### ---------------------- BIAS -------------------------####### MIBIAS_Beta2_pfr<-mean((E_pfr_hatBeta2-trueBeta2)^2) ################################################################## McovBeta0<-matrix(nrow=1,ncol=length(t)) for(j in 1:(length(t))){ McovBeta0[1,j]<-mean(covBeta0[,j]) } McovBeta1<-matrix(nrow=length(s),ncol=length(t)) for(i in 1:(length(s))){ for(j in 1:(length(t))){ McovBeta1[i,j]<-mean(covBeta1[,i,j]) } } McovBeta2<-matrix(nrow=length(r),ncol=length(t)) for(i in 1:(length(r))){ for(j in 1:(length(t))){ McovBeta2[i,j]<-mean(covBeta2[,i,j]) } } #### pfr McovBeta1_pfr<-matrix(nrow=length(s),ncol=length(t)) for(i in 1:(length(s))){ for(j in 1:(length(t))){ McovBeta1_pfr[i,j]<-mean(covBeta1_pfr[i,j]) } } McovBeta2_pfr<-matrix(nrow=length(r),ncol=length(t)) for(i in 1:(length(r))){ for(j in 1:(length(t))){ McovBeta2_pfr[i,j]<-mean(covBeta2_pfr[i,j]) } } ################################################################## IACBeta0<-mean(ICBeta0) IACBeta1<-mean(ICBeta1) IACBeta2<-mean(ICBeta2) IACBeta0_pfr<-mean(ICBeta0_pfr) IACBeta1_pfr<-mean(ICBeta1_pfr) IACBeta2_pfr<-mean(ICBeta2_pfr) IAWBeta0<-mean(IWBeta0) IAWBeta1<-mean(IWBeta1) IAWBeta2<-mean(IWBeta2) IAWBeta0_pfr<-mean(IWBeta0_pfr) IAWBeta1_pfr<-mean(IWBeta1_pfr) IAWBeta2_pfr<-mean(IWBeta2_pfr) ################################################################## Results<-matrix(c(MISE_Beta0,MIVAR_Beta0,MIBIAS_Beta0, MISE_Beta1,MIVAR_Beta1,MIBIAS_Beta1, MISE_Beta2,MIVAR_Beta2,MIBIAS_Beta2, MISE_Beta0_pfr,MIVAR_Beta0_pfr,MIBIAS_Beta0_pfr, MISE_Beta1_pfr,MIVAR_Beta1_pfr,MIBIAS_Beta1_pfr, MISE_Beta2_pfr,MIVAR_Beta2_pfr,MIBIAS_Beta2_pfr),nrow=6,ncol=3,byrow=TRUE) colnames(Results)<-c("MISE","MIVAR","MIBIAS") rownames(Results)<-c("Beta0","Beta1","Beta2","Beta0_pfr","Beta1_pfr","Beta2_pfr") CI_Results<-matrix(c(IACBeta0,IACBeta1,IACBeta2, IAWBeta0,IAWBeta1,IAWBeta2,IACBeta0_pfr, IACBeta1_pfr,IACBeta2_pfr,IAWBeta0_pfr,IAWBeta1_pfr,IAWBeta2_pfr),nrow=3,ncol=4,byrow=FALSE) colnames(CI_Results)<-c("IAC","IAW","IAC_pfr","IAW_pfr") rownames(CI_Results)<-c("Beta0","Beta1","Beta2") ################################################################## ################################################################## round(sqrt(Results)*1000,2) round(CI_Results,2) ################################################################## ##################################################################