# R. Chris Fraley | Oct 15, 2003

# Estimate statistical power for a linear regression model

#  with two predictors and the interaction (optional)

 

# Plug in hypothesized values, and initialize some variables

# Everything in this section can be set by user

 

b1 <- .3             # standardized beta weight for first predictor

b2 <- .2             # standardized beta weight for second predictor

b3 <- .1             # standardized beta weight for interaction term

n <- 100             # sample size

trials <- 100       # number of simulation trials

r <- .3              # correlation between first and second predictor variables

 

#---------------------------------------------

# ignore the parameters below

# int

 

p.vec1<-rep(99,trials)

p.vec2<-rep(99,trials)

p.vec3<-rep(99,trials)

 

# simulation of sampling process

 

for(i in 1:trials){

   # create predictor scores

   #  correlated on average = r

   f1 <- scale(rnorm(n))

   x1 <- scale(sqrt(r)*f1 + (1-r^2)*rnorm(n))

   x2 <- scale(sqrt(r)*f1 + (1-r^2)*rnorm(n))

  

   # generate interaction term

   x3 <- scale(x1*x2)

  

   # generate error term

   rMat<-cor(cbind(x1,x2,x3))

   bvec<-c(b1,b2,b3)

   varMat<-t(bvec)%*%rMat%*%(bvec)   # variance explained by predictors

   errorWeight <- as.numeric(sqrt(1 - varMat) )

   error<-scale(rnorm(n)) 

  

   # generate y values

   y <- b1*x1 + b2*x2 + b3*x3 + errorWeight*error

  

   # estimate regression parameters and get p values for coefficients

   results<-summary(lm(y~x1+x2+x3))$coeff

   p.vec1[i]<-results[2,4]

   p.vec2[i]<-results[3,4]

   p.vec3[i]<-results[4,4]

 

}

 

# Print the proportion of p<.05 for each effect

 

cat("Power for a main effect of first predictor","\n")

print(table(p.vec1<=.05)/trials)

 

cat("Power for a main effect of second predictor","\n")

print(table(p.vec2<=.05)/trials)

 

cat("Power for an interaction between two predictors","\n")

print(table(p.vec3<=.05)/trials)