Package gregmisc is no more available. It is replaced by gplots for this document. Package Rassoc is no more available. All funcitons were copied from the archived version.

Some bugs were corrected. The argument

checkpackage<-function(searching, ifnotfound="stop", repos="http://cran.md.tsukuba.ac.jp/"){

  ## packages(ベクトル)で指定されたパッケージが入っているかチェック
  ## 入っていない場合、"stop"なら停止、"install"ならインストール、"continue"なら、スルー

  res<-TRUE
  for(i in 1:length(searching)){
    ## まず、ロード
    isloaded<-library(searching[i], logical.return=TRUE, character.only=TRUE)
#    if(sum(ifelse(is.na(match(search(), paste(c("package:",searching[i]), collapse=""))),0,1))==1){
    if(isloaded==TRUE){
      print(paste(c(searching[i], " is found"), collapse=""))
    }else{
      print(paste(c(searching[i], " is NOT found"), collapse=""))
      if(ifnotfound=="install"){
        install.packages(searching[i], repos=repos)
        isloaded<-library(searching[i], logical.return=TRUE, character.only=TRUE)
        if(isloaded==FALSE){
          print(paste(c("could not install ", searching[i]), collapse=""))
          return(FALSE)
        }
      }else if(ifnotfound=="stop"){
        return(FALSE)
      }else{
        res<-FALSE
        ## 続ける
      }
    }
  }

  if(ifnotfound=="install"){
    print("all packages were successfully installed!!")
  }
  return(res)
}

#a<-checkpackage(c("ape","binom","bnlearn","clinfun","evd","gregmisc","gtools","MCMCpack","phangorn","Rassoc","rmeta"),ifnotfound="install")
a<-checkpackage(c("ape","binom","bnlearn","clinfun","evd","gplots","gtools","MCMCpack","phangorn","rmeta"),ifnotfound="install")
## [1] "ape is found"
## [1] "binom is found"
## [1] "bnlearn is found"
## [1] "clinfun is found"
## [1] "evd is found"
## [1] "gplots is found"
## [1] "gtools is found"
## [1] "MCMCpack is found"
## [1] "phangorn is found"
## [1] "rmeta is found"
## [1] "all packages were successfully installed!!"

Rassoc package is not active. You can get the archived version from http://cran.r-project.org/web/packages/Rassoc/index.html .

The following functions are copy-pasted from the archived version.

# Rassoc's funcitons
## This function conducts the allelic based test (ABT) to a given case-control table. ##
## The ABT detects association by comparing the allele frequencies between cases and controls. ##
## Under the null hypothesis of no association, the ABT follows the standard normal distribution N(0,1). ##
## The ABT needs the Hardy-Weinberg Equilibrium (HWE) proportions to hold in the population. ##  
## The ABT calculates the statistic and associated p-value as well as reporting the conclusion of the hypothesis test. ## 
## data is a 2 by 3 contingency table for analysis. ##
## the rows of the data represent disease status and the columns represent genotypes. ## 

ABT <-
function(data){
  DNAME=deparse(substitute(data))
  ## Check if data is a 2 by 3 contingency table. ##
  if(any(is.na(data)==TRUE)||any(abs(data)>10^9))  
  stop("data must be a 2 by 3 table without infinite and missing values.")
  if(any(data<0))
  stop("all entries of data must be non-negative.")
  if((dim(data)[1]!=2)||(dim(data)[2]!=3))
  stop("data must be a 2 by 3 table.")
  if((data[1,1]<0.5)||(data[1,2]<0.5)||(data[1,3]<0.5)||(data[2,1]<0.5)||(data[2,2]<0.5)||(data[2,3]<0.5)){
    warning("At least one cell of the table is zero.")
    data=data+matrix(rep(0.5,6),nrow=2)
  }
  ## rr is the vector of case group. ##
  rr=data[1,]
  ## ss is the vector of control group. ##
  ss=data[2,]
  ## nn is the vector of samples. ##
  nn=apply(data,2,sum)
  ## r is the number of cases. ##
  r=sum(rr)
  ## s is the number of controls. ##
  s=sum(ss)
  ## n is the number of samples. ##
  n=sum(nn)
  ## pd is the estimated allele frequency in cases. ##
  pd=(2*rr[3]+rr[2])/(2*r)
  ## ph is the estimated allele frequency in controls. ##
  ph=(2*ss[3]+ss[2])/(2*s)
  ## p is the estimated allele frequency under null hypothesis. ##
  p=(2*nn[3]+nn[2])/(2*n)
  ## Calculate the test statistic. ##
  u=pd-ph
  v=p*(1-p)*(1/(2*r)+1/(2*s))
  re1=u/sqrt(v)
  ## Calculate the p-value. ##
  re2=1-pchisq(re1^2,df=1)
  ## Print the output. ##
  names(re1)="statistic"
  structure(list(statistic=re1,p.value=re2,method="The allelic based test",data.name=DNAME),class="htest")
  
}



## Example 
## ca=c(139,249,112) 
## co=c(136,244,120) 
## a=rbind(ca,co) 
## ABT(a) 
## "The allelic based test" 
## data: a 
## statistic=-0.4924, p-value=0.6224

## This function conducts the Cochran-Armitage trend test (CATT) to a given case-control table. ## 
## The CATT detects association by comparing the genotype frequencies between cases and controls. ##
## Under the null hypothesis of no association, the CATT follows the standard normal distribution N(0,1). ##
## The CATT calculates the statistic and associated p-value as well as reporting the conclusion of the hypothesis test. ## 
## data is a 2 by 3 contingency table for analysis. ##
## the rows of the data represent disease status and the columns represent genotypes. ## 
## x is the score for the CATT. It can be any real number between 0 and 1. ##
## Specifically, x=0, 0.5 and 1 are optimal for REC, MUL/ADD and DOM models respectively. ##

CATT <-
function(data,x=0.5){
  score=c(0,x,1)
  DNAME=deparse(substitute(data))
  ## Check if data is a 2 by 3 contingency table. ##
  if(any(is.na(data)==TRUE)||any(abs(data)>10^9)) 
  stop("data must be a 2 by 3 table without infinite and missing values.")
  if(any(data<0))
  stop("all entries of data must be non-negative.")
  if((dim(data)[1]!=2)||(dim(data)[2]!=3))
  stop("data must be a 2 by 3 table.")
  if((data[1,1]<0.5)||(data[1,2]<0.5)||(data[1,3]<0.5)||(data[2,1]<0.5)||(data[2,2]<0.5)||(data[2,3]<0.5)){
    warning("At least one cell of the table is zero.")
    data=data+matrix(rep(0.5,6),nrow=2)
  }
  ## Calculate the test statistic. ## 
  nr=apply(data,2,sum)
  n=sum(nr)
  Rbar=sum(nr*score)/n
  s2=sum(nr*(score-Rbar)^2)
  phi=sum(data[1,])/n
  re1=sum(data[1,]*(score-Rbar))/sqrt(phi*(1-phi)*s2)
  ## Calculate the p-value. ##
  re2=1-pchisq(re1^2,df=1)
  ## Print the output. ##
  names(re1)="statistic"
  structure(list(statistic=re1,p.value=re2,method="The Cochran-Armitage trend test",data.name=DNAME),class="htest")

}

## Example 
## ca=c(139,249,112) 
## co=c(136,244,120) 
## a=rbind(ca,co) 
## CATT(a,0.5) 
## the Cochran-Armitage trend test
## data:  a 
## statistic = -0.4894, p-value = 0.6245

## This function conducts the genetic model selection (GMS) to a given case-control table. ##
## The GMS calculates the statistic and associated p-value as well as reporting the conclusion of the hypothesis test. ##
## GMS is a two-phase adaptive test. ##
## In the first phase, the underlying genetic model is selected using the Hardy-Weinberg disequilibrium trend test (HWDTT). ##
## In the second phase, the CATT with the selected genetic model is applied to test for association. ##
## CATT(0.5) is used to detect the risk allele. ##
## data is a 2 by 3 contingency table for analysis. ##
## The rows of the data represent disease status and the columns represent genotypes. ## 
## method is the method for calculating the p-values of MAX3. ##
## "boot" represents the parametric bootstrap method. ##
## "bvn" represents the simulation from the bivariate normal distribution. ##
## "asy" represents the asymptotic null distribution method. ##
## m is the number of replicates for "boot" and "bvn". ##
## m can be any positive integer for "asy". ##
## m MUST be positive intefer ##

GMS <-
function(data,method="asy",m=1){
  DNAME=deparse(substitute(data))
  ## Check if data is a 2 by 3 contingency table. ##
  if(any(is.na(data)==TRUE)||any(abs(data)>10^9)) 
  stop("data must be a 2 by 3 table without infinite and missing values.")
  if(any(data<0))
  stop("all entries of data must be non-negative.")
  if((dim(data)[1]!=2)||(dim(data)[2]!=3))
  stop("data must be a 2 by 3 table.")
  if((data[1,1]<0.5)||(data[1,2]<0.5)||(data[1,3]<0.5)||(data[2,1]<0.5)||(data[2,2]<0.5)||(data[2,3]<0.5)){
  warning("At least one cell of the table is zero.")
  data=data+matrix(rep(0.5,6),nrow=2)
  }
  ## Check if m is integer. ##
  if ((trunc(m)-m)<0)
  stop("m MUST be integer")
  ## Define CATT. ##
  CATT=function(tab,score){
    n=sum(tab)
    nr=apply(tab,2,sum)
    Rbar=sum(nr*score)/n
    s2=sum(nr*(score-Rbar)^2)
    phi=sum(tab[1,])/n
    catt_v=sum(tab[1,]*(score-Rbar))/sqrt(phi*(1-phi)*s2)
    ## Report the statistic of CATT. ##
    return(catt_v)
  }
  ## Build the index function. ##
  index=function(a){
    if(a=="TRUE"){
      b=1
    }
    else{
      b=0
    }
    return(b)
  }
  ## Define the HWDTT. ##
  HWDTT=function(tab){
    n=sum(tab)
    r=sum(tab[1,])
    s=sum(tab[2,])
    nn=apply(tab,2,sum)
    pr=tab[1,]/r
    ps=tab[2,]/s
    pn=nn/n
    deltap=pr[3]-(pr[3]+0.5*pr[2])^2
    deltaq=ps[3]-(ps[3]+0.5*ps[2])^2
    u=sqrt(r*s/n)*(deltap-deltaq)
    v1=1-pn[3]-0.5*pn[2]
    v2=pn[3]+0.5*pn[2]
    ## Report the statistic of HWDTT. ##
    return(u/(v1*v2))
  }
  ## Build the GMS function. ##
  REAL=function(data){
    catt0=CATT(data,c(0,0,1))
    catt05=CATT(data,c(0,0.5,1))
    catt1=CATT(data,c(0,1,1))
    se=HWDTT(data)
    c0=qnorm(0.95)
    a1=catt0*index((catt05>0)&&(se>c0))+catt1*index((catt05>0)&&(se<(-c0)))+catt05*index((catt05>0)&&(abs(se)<=c0))
    a2=-catt1*index((catt05<=0)&&(se>c0))-catt0*index((catt05<=0)&&(se<(-c0)))-catt05*index((catt05<=0)&&(abs(se)<=c0))
    return(a1+a2)  
  }
  GMS1=function(data){
    c0=qnorm(0.95)
    catt0=data[1]
    catt1=data[2]
    catt05=data[3]
    se=data[4]
    a1=catt0*index((catt05>0)&&(se>c0))+catt1*index((catt05>0)&&(se<(-c0)))+catt05*index((catt05>0)&&(abs(se)<=c0))
    a2=-catt1*index((catt05<=0)&&(se>c0))-catt0*index((catt05<=0)&&(se<(-c0)))-catt05*index((catt05<=0)&&(abs(se)<=c0))
    return(a1+a2)
  }
  GMS2=function(data){
    c0=qnorm(0.95)
    tab=matrix(data,nrow=2,byrow=TRUE)
    catt0=CATT(tab,c(0,0,1))
    catt05=CATT(tab,c(0,0.5,1))
    catt1=CATT(tab,c(0,1,1))
    se=HWDTT(tab)
    a1=catt0*index((catt05>0)&&(se>c0))+catt1*index((catt05>0)&&(se<(-c0)))+catt05*index((catt05>0)&&(abs(se)<=c0))
    a2=-catt1*index((catt05<=0)&&(se>c0))-catt0*index((catt05<=0)&&(se<(-c0)))-catt05*index((catt05<=0)&&(abs(se)<=c0))
    return(a1+a2)
  }
  ## Caculate the correlations. ##
  MAF=sum(data[,3])/sum(data)+0.5*(sum(data[,2])/sum(data))
  rho01=sqrt(MAF*(1-MAF)/((1+MAF)*(2-MAF)))
  rho005=sqrt(2*(MAF)/(1+MAF))
  rho051=sqrt(2*(1-MAF)/(2-MAF))
  rho0=sqrt((1-MAF)/(1+MAF))
  rho1=-sqrt(MAF/(2-MAF))
  omega0=sqrt(MAF*(1+MAF)/2)
  omega1=sqrt((1-MAF)*(2-MAF)/2)
  phi0=(rho0-rho1*rho01)/(1-rho01^2)
  phi1=(rho1-rho0*rho01)/(1-rho01^2)
  p=c(sum(data[,1]),sum(data[,2]),sum(data[,3]))/sum(data)
  r=sum(data[1,])
  s=sum(data[2,])
  ## Report the statistic of GMS. ##
  real=REAL(data)
  ## Define the asymptotic formula. ##
  ASY=function(t){
    c0=qnorm(0.95)
    s1=matrix(c(1,0,rho005,0,1,rho0,rho005,rho0,1),nrow=3)
    s2=matrix(c(1,0,rho051,0,1,-rho1,rho051,-rho1,1),nrow=3)
    l1=pmvnorm(lower=-Inf,upper=c(0,-c0,-t),mean=c(0,0,0),sigma=s1)[1] 
    l2=pmvnorm(lower=-Inf,upper=c(0,-c0,-t),mean=c(0,0,0),sigma=s2)[1]
    a=2*(l1+l2)+1.8*pnorm(min(0,-t))
    return(a)  
  }
  if((method!="boot")&&(method!="bvn")&&(method!="asy"))
  stop("method must be boot, bvn or asy")
  ## Use "asy" to caculate the p-value of GMS. ##
  if(method=="asy"){
    ## Report asymptotic p-value using "asy" method. ##
    b=ASY(real)
    md="The GMS test using the asy method"
  }
  ## Use "bvn" to caculate the p-value of GMS. ##
  if(method=="bvn"){
    BV05=function(x,a1,a2){
      return(a1*x[1]+a2*x[2])
    }
    bv01=rmvnorm(m,mean=c(0,0),sigma=matrix(c(1,rho01,rho01,1),nrow=2))
    bv05=apply(bv01,1,BV05,a1=omega0,a2=omega1)
    bvh=apply(bv01,1,BV05,a1=phi0,a2=phi1)
    bv=cbind(bv01,bv05,bvh)
    gmstt1=apply(bv,1,GMS1)
    ## Report empirical p-value using "bvn" method. ##
    b=length(gmstt1[gmstt1>real])/m
    md="The GMS test using the bvn method"   
  }
  ## Use "boot" to caculate the p-value of GMS. ##
  if(method=="boot"){
    ca=rmultinom(m,r,p)
    co=rmultinom(m,s,p)
    caco=rbind(ca,co)
    gmstt2=apply(caco,2,GMS2)
    ## Report empirical p-value using "boot" method. ##
    b=length(gmstt2[gmstt2>real])/m
    md="The GMS test using the boot method"    
  }
  ## print the output. ##
  names(real)="statistic"
  structure(list(statistic=real,p.value=b,method=md,data.name=DNAME),class="htest")

}

## Examples 
## ca=c(139,249,112) 
## co=c(136,244,120) 
## a=rbind(ca,co) 
## GMS(a,"boot",100000.5) 
## Error in GMS(a, "boot", 100000.5) : warning: m MUST be integer 
## GMS(a,"hansi",10000)
## Error in GMS(a, "hansi", 10000) : method must be boot, bvn or asy

## GMS(a,"boot",100000) 
## The GMS test using the boot method
## data:  a 
## statistic = 0.4894, p-value = 0.6658

## GMS(a,"bvn",100000)
## The GMS test using the bvn method
## data:  a 
## statistic = 0.4894, p-value = 0.6598

## GMS(a,"asy",100000)
## The GMS test using the asy method
## data:  a 
## statistic = 0.4894, p-value = 0.6621


## This function conducts the MAX3 to a given case-control table. ##
## MAX3 takes the maximum of the absolute values of the three CATTs respectively optimal for the REC, ADD and DOM models. ##
## The MAX3 calculates the statistic and associated p-value as well as reporting the conclusion of the hypothesis test. ##
## data is a 2 by 3 contingency table for analysis. ##
## The rows of the data represent disease status and the columns represent genotypes. ## 
## method is the method for calculating the p-values of MAX3. ##
## "boot" represents the parametric bootstrap method. ##
## "bvn" represents the simulation from the bivariate normal distribution. ##
## "asy" represents the asymptotic null distribution method. ##
## m is the number of replicates for "boot" and "bvn". ##
## m can be any positive integer for "asy". ##
## m MUST be positive intefer ##

MAX3 <-
function(data,method="asy",m=1){
  DNAME=deparse(substitute(data))
  ## Check if data is a 2 by 3 contingency table. ##
  if(any(is.na(data)==TRUE)||any(abs(data)>10^9))
  stop("data must be a 2 by 3 table without infinite and missing values.")
  if(any(data<0))
  stop("all entries of data must be non-negative.")
  if((dim(data)[1]!=2)||(dim(data)[2]!=3))
  stop("data must be a 2 by 3 table.")
  if((data[1,1]<0.5)||(data[1,2]<0.5)||(data[1,3]<0.5)||(data[2,1]<0.5)||(data[2,2]<0.5)||(data[2,3]<0.5)){
  warning("At least one cell of the table is zero.")
  data=data+matrix(rep(0.5,6),nrow=2)
  }
  ## Check if m is integer. ##
  if ((trunc(m)-m)<0)
  stop("m MUST be integer")
  ## Caculate the correlations. ##
  p=apply(data,2,sum)/sum(data)
  p0=p[1]
  p1=p[2]
  p2=p[3]
  rho005=p2*(p1+2*p0)/(sqrt(p2*(1-p2))*sqrt((p1+2*p2)*p0+(p1+2*p0)*p2))
  rho105=p0*(p1+2*p2)/(sqrt(p0*(1-p0))*sqrt((p1+2*p2)*p0+(p1+2*p0)*p2))
  rho01=sqrt((p0*p2)/((1-p0)*(1-p2)))
  w0=(rho005-rho01*rho105)/(1-rho01^2)
  w1=(rho105-rho01*rho005)/(1-rho01^2)
  r=sum(data[1,])
  s=sum(data[2,])
  ## Define CATT. ##
  CATT=function(tab,score){
    nr=apply(tab,2,sum)
    n=sum(nr)
    Rbar=sum(nr*score)/n
    s2=sum(nr*(score-Rbar)^2)
    phi=sum(tab[1,])/n
    catt_v=sum(tab[1,]*(score-Rbar))/sqrt(phi*(1-phi)*s2)
    ## Report the statistic of CATT. ##
    return(catt_v)
  }
  ## Report the statistic of MAX3. ##
  maxr=max(abs(CATT(data,c(0,0,1))),abs(CATT(data,c(0,0.5,1))),abs(CATT(data,c(0,1,1))))
  if((method!="boot")&&(method!="bvn")&&(method!="asy"))
  stop("method must be boot, bvn or asy.")
  ## Use "boot" to caculate the p-value of MAX3. ##
  if(method=="boot"){
    CATTN=function(data,score){
      tab=matrix(data,nrow=2,byrow=TRUE)
      nr=apply(tab,2,sum)
      n=sum(nr)
      Rbar=sum(nr*score)/n
      s2=sum(nr*(score-Rbar)^2)
      phi=sum(tab[1,])/n
      catt_v=sum(tab[1,]*(score-Rbar))/sqrt(phi*(1-phi)*s2)
      return(catt_v)
    }
    ca=rmultinom(m,r,p)
    co=rmultinom(m,s,p)
    caco=rbind(ca,co)
    caco0=apply(caco,2,CATTN,score=c(0,0,1))
    caco05=apply(caco,2,CATTN,score=c(0,0.5,1))
    caco1=apply(caco,2,CATTN,score=c(0,1,1))
    cacos=rbind(abs(caco0),abs(caco05),abs(caco1))
    max1=apply(cacos,2,max)
    ## Report empirical p-value using "boot" method. ##
    pv=length(max1[max1>=maxr])/m
    md="The MAX3 test using the boot method"
   
  }
  ## Use "bvn" to caculate the p-value of MAX3. ##
  if(method=="bvn"){
    BV05=function(x,a1,a2){
      return(a1*x[1]+a2*x[2])
    }
    bv01=rmvnorm(m,mean=c(0,0),sigma=matrix(c(1,rho01,rho01,1),nrow=2))
    bv05=apply(bv01,1,BV05,a1=w0,a2=w1)
    bv=cbind(abs(bv01),abs(bv05))
    max2=apply(bv,1,max)
    ## Report empirical p-value using "bvn" method. ##
    pv=length(max2[max2>=maxr])/m
    md="The MAX3 test using the bvn method"
    
  }
  ## Use "asy" to caculate the p-value of MAX3. ##
  if(method=="asy"){
    fun1=function(t,z){
      return(pnorm((t-rho01*z)/sqrt(1-rho01^2))*dnorm(z))
    }
    fun2=function(t,z){
      return(pnorm(((t-w0*z)/w1-rho01*z)/sqrt(1-rho01^2))*dnorm(z))
    }
    fun3=function(t,z){
      return(pnorm((-t-rho01*z)/sqrt(1-rho01^2))*dnorm(z))
    }
    asy=function(t){
      z1=2*integrate(function(z){fun1(t,z)},lower=0,upper=t*(1-w1)/w0,subdivisions=1000)$value
      z2=2*integrate(function(z){fun2(t,z)},lower=t*(1-w1)/w0,upper=t,subdivisions=1000)$value
      z3=-2*integrate(function(z){fun3(t,z)},lower=0,upper=t,subdivisions=500)$value
      return(z1+z2+z3)
    }
    ## Report asymptotic p-value using "asy" method. ##
    pv=1-asy(maxr)
    md="The MAX3 test using the asy method"
    
  }
  ## print the output. ##
    names(maxr)="statistic"
    structure(list(statistic=maxr,p.value=pv,method=md,data.name=DNAME),class="htest")

}

## Examples 
## ca=c(139,249,112) 
## co=c(136,244,120) 
## a=rbind(ca,co) 
## MAX3(a,"boot",100000.5) 
## Error in MAX3(a, "boot", 100000.5) : warning: m MUST be integer 
## MAX3(a,"hansi",100000) 
## Error in MAX3(a, "hansi", 1e+05) : method must be boot, bvn or asy.
## MAX3(a,"boot",100000)
## The MAX3 test using the boot method
## data:  a 
## statistic = 0.5993, p-value = 0.7936
## MAX3(a,"bvn",100000)
## The MAX3 test using the bvn method
## data:  a 
## statistic = 0.5993, p-value = 0.792
## MAX3(a,"asy",1)
## The MAX3 test using the asy method
## data:  a 
## statistic = 0.5993, p-value = 0.7933

## This function conducts the maximin efficiency robust test (MERT) to a given case-control table. ##
## The MERT achieves the maximum minimum efficiency. ##
## Under some conditions, the MERT can be written as the weighted average of two normally distributed tests with the minimum correlation. ##
## In case-control association studies, the extreme pair corresponds to the CATTs under the REC and DOM models. ##
## Under the null hypothesis of no association, the MERT follows the standard normal distribution N(0,1). ##
## The MERT calculates the statistic and associated p-value as well as reporting the conclusion of the hypothesis test. ## 
## data is a 2 by 3 contingency table for analysis. ##
## the rows of the data represent disease status and the columns represent genotypes. ##

MERT <-
function(data){
  DNAME=deparse(substitute(data))
  ## Check if data is a 2 by 3 contingency table. ##
  if(any(is.na(data)==TRUE)||any(abs(data)>10^9)) 
  stop("data must be a 2 by 3 table without infinite and missing values.")
  if(any(data<0))
  stop("all entries of data must be non-negative.")
  if((dim(data)[1]!=2)||(dim(data)[2]!=3))
  stop("data must be a 2 by 3 table.")
  if((data[1,1]<0.5)||(data[1,2]<0.5)||(data[1,3]<0.5)||(data[2,1]<0.5)||(data[2,2]<0.5)||(data[2,3]<0.5)){
  warning("At least one cell of the table is zero.")
  data=data+matrix(rep(0.5,6),nrow=2)
  }
  ## Define the CATT ##
  CATT=function(data,x=0.5){
    score=c(0,x,1)    
    ## Calculate the test statistic. ## 
    nr=apply(data,2,sum)
    n=sum(nr)
    Rbar=sum(nr*score)/n
    s2=sum(nr*(score-Rbar)^2)
    phi=sum(data[1,])/n
    catt_v=sum(data[1,]*(score-Rbar))/sqrt(phi*(1-phi)*s2)
    return(catt_v)
  }
  ## Calculate the correlation ##
  n=apply(data,2,sum)
  nn=sum(n)
  rho=sqrt((n[1]*n[3])/((nn-n[1])*(nn-n[3])))
  ## Caculate the test statistic of MERT. ##
  catt0=CATT(data,0)
  catt1=CATT(data,1)
  re1=(catt0+catt1)/sqrt(2*(1+rho))
  ## Caculate the p-value. ##
  re2=1-pchisq(re1^2,df=1)
  ## Print the output. ##
  names(re1)="statistic"
  structure(list(statistic=re1,p.value=re2,method="The maximin efficiency robust test",data.name=DNAME),class="htest")

}

## Example 
## ca=c(139,249,112) 
## co=c(136,244,120) 
## a=rbind(ca,co) 
## MERT(a) 
## The maximin efficiency robust test
## data:  a 
## statistic = -0.4962, p-value = 0.6198

Part I

Chapter 1

Chromosomes

Get familiar with R by drawing a box plot of chromosome size of human beings.

#染色体の長さ(単位:塩基対数)
#chromLenに24個の数値のベクトルを代入する
chromLen<-c(247249719,242951149,199501827,191273063,180857866,
170899992,158821424,146274826,140273252,135374737,134452384,
132349534,114142980,106368585,100338915,88827254,78774742,
76117153,63811651,62435964,46944323,49691432,154913754,57772954)
#barplot(棒グラフ)を描く。データはchromLen、棒の名前は、1:22(1,2,...,22)と"X","Y"のベクトル。色は黒
barplot(chromLen,names=c(1:22,"X","Y"),col="black")

## Genetic models

Single nucleotide polymorphisms have two alleles.

In case of autosomal loci, three genotypes MM, Mm and mm exsit with M and m being two alleles.

Pheno, Geno MM Mm mm Sum
(+) \(n_{10}\) \(n_{11}\) \(n_{12}\) \(n_{1.}\)
(-) \(n_{20}\) \(n_{21}\) \(n_{22}\) \(n_{2.}\)
Sum \(n_{.0}\) \(n_{.1}\) \(n_{.2}\) \(n_{..}\)

\(j\) of \(n_{ij}\) stands for the number of copy of allele m.

The fraction of (+) phenotype \(f_i=\frac{n_{1i}}{n_{.i}},i=1,2,3\) varies among genotypes, which are penetrance or penetration rate.

Genotype relative risk (GRR) (\(\lambda_i=\frac{f_i}{f_0}\)) is the relative ratio of a genotype compared to a genotype set as standard (e.g. MM being the standard genotype).

When MM is set as the standard, \(\lambda_0=1\).

\(\lambda_1=\lambda_2\) means that GRRs of Mm and mm are equal, representing dominant model.

Recessive model has \(\lambda_1=\lambda_0\).

Additive and multiplicative models are both the models between dominant and recessive models. Additive model is the case with x = 0.5 for the following formula, \[ \lambda_1=x \lambda_2 + (1-x) \lambda_0 \] and multiplicative model is the case with y=0.5 for the following formula, \[ \lambda_1=\lambda_2^y \times \lambda_0^{1-y}, \] or \(\lambda_1=\frac{\lambda_0+\lambda_2}{2}\) (arithmetic mean of \(\lambda_0\) and \(\lambda_2\)) is “additive” and \(\lambda_1=\sqrt{\lambda_2}=\sqrt{\lambda_0\times \lambda_2}\) (geometric mean of \(\lambda_0\) and \(\lambda_2\)) is for “multiplicative” models, respectively.

It is a good idea to expand your knowledge to generalized mean including arithmatic and geometic means.

Arithmatic \(A=\frac{\sum_{i=1}^n x_i}{n}\), geometric \(G=(\prod_{i=1}^n x_i)^\frac{1}{n}\), Harmonic \(H=\frac{n}{\sum_{i=1}^n \frac{1}{x_i}}\) and generalized \(M_p=(\frac{1}{n}\sum_{i=1}^n x_i^p)^\frac{1}{p}\).

Arithmatic, geometric and harmonic have \(M_{1},M_{\infty},M_{-1}\) in the generalized form.

With $x, y (-,) $, you can specify all possible genetic models parameterized, including overdominance where heterozygotes have the highest risk.

Chapter 2

Summarize a data set with representative values.

When you have a data set, it is good to summarize them with representative values.

One of those values are the mean, which is expected value in case of stochastic variables.

Assume a variable \(X=\{x_1,x_2,...\}\) with probabilities of each elements \(P=\{p_1,p_2,...\} (\sum_{i} p_i =1)\), its expected value is

\[ E(X)=\sum_{i} x_i p_i . \]

Variance is also useful, \[ V(X)=\sum_{i} p_i (x_i-E(X))^2 \]

Variance is the second moment around the mean.

There are k-th moments around the mean or around zero. In general, k-th moment around c is, \[ \mu_k(c) = \sum_{i} p_i (x_i-c)^k \]

#関数を作るときには、function()関数を使います
#"momentProb"という名前の関数を作ります
#function()関数は、引数を()の中に、処理を{}の中に書きます。
#xが値ベクトル、pがその確率ベクトル
#引数のうちx,pは"="で値を指定していませんが、
#orderとcenterは値が指定されています
#x,pは引数を与えないといけませんが、orderとcenterは
#値を与えなければ、デフォルト値(1,FALSE)が用いられます
momentProb<-function (x, p,order = 1, center = FALSE) 
{
 if(center) # 平均を中心とするならxの値から平均を引く
  x <- x - momentProb(x,p,order=1,center=FALSE)
 sum(x^order*p)
}
# 使ってみる
datax<-c(1,2,3)
datap<-c(0.2,0.3,0.5)
momentProb(x=datax,p=datap,order=2,center=TRUE)
## [1] 0.61
momentX<-function (x, order = 1, center = FALSE) 
{
 # すべての標本に等確率 rep(1,length(x))/length(x)を与えたmomentProb()に同じ
 momentProb(x,p=rep(1,length(x))/length(x),order=order,center=center) 
 # length(x)はxの要素数、rep(v,L)はvがL個並んだベクトル
}
x<-c(1,0.5,0) # IBD数別の一致の値
pDoho<-c(1/4,1/2,1/4) # 同胞のIBD数別確率
momentProb(x,pDoho,order=1,center=FALSE) # 期待値
## [1] 0.5
momentProb(x,pDoho,order=2,center=TRUE) # 分散
## [1] 0.125
# 等長染色体$k$本のときにi=0,1,...,2k本が一致した場合の一致率 
sibIdValue<-function(k=1){ 
 (0:(2*k))/(2*k) # i/(2k)
}
# 2k 本のうちi=0,1,...,2k本が一致する確率
# dbinom()関数については付録A.5 確率分布関数、疑似乱数列の発生を参照
sibIdProb<-function(k=1){ 
 dbinom(0:(2*k),2*k,0.5) 
}
numch<-1:23 # 染色体数の例として1から23
means<-vars<-rep(0,length(numch))
for(i in 1:length(numch)){
 identity<-sibIdValue(numch[i])
 prob<-sibIdProb(numch[i])
 if(i>1){par(new=TRUE)} # 図を重ねて描く
 plot(identity,prob,type="b",ylim=c(0,1))
 means[i]<-momentProb(identity,prob,order=1,center=FALSE) # 平均
 vars[i]<-momentProb(identity,prob,order=2,center=TRUE) # 分散
}

plot(means) # 期待値をプロット

plot(vars) # 分散をプロット

# f:個々の染色体が占める割合; Niter:シミュレーション回数を与えて、一致率がいくつになるかをシミュレーションする
SibSim<-function(f=f,Niter=10000){ 
# 0(非共有)か1(共有)かの乱数の行列を作る
# (行数:試行回数行、列数:染色体本数)
 rs<-matrix(rbinom(Niter*length(f),1,0.5),nrow=Niter) 
 # 行列の外積。共有染色体の割合を全染色体について合算
 # 乱数が1のときに一致したとして、その染色体の分の割合をかける
 rs%*%f 
}
# 1,2,...,X染色体を2本ずつで46本
chs<-rep(chromLen[1:23],2) # chromLen は■ R1-1.Rで与えたもの
f<-chs/sum(chs) # 個々の染色体の占める割合
Niter<-10000 # 試行回数
simOut<-SibSim(f,Niter=Niter)
# 染色体が等長だと・・・
cheven<-rep(1,length(f))
feven<-cheven/sum(cheven)
simOutEven<-SibSim(feven,Niter=Niter)
# 横軸を0-1に納めた上で、Niter回試行の一致率を昇順ソートしてプロット
plot(ppoints(Niter,a=0),sort(simOut),type="l",ylim=c(0,1))

plot(ppoints(Niter,a=0),sort(simOutEven),type="l",ylim=c(0,1))

# 可能箇所すべてで交叉がおきるかどうかを試す方法
RecombSim<-function(L=10000,r=0.001,Niter=1000){ 
# Lは配列長,rは箇所あたりの交叉確率,Niterはシミュレーション試行回数
# 行数Niter、列数L-1箇所の行列にする
 m<-matrix(rbinom((L-1)*Niter,1,r),nrow=Niter)
 apply(m,1,sum)
}
# ポアッソン分布を使う方法
RecombPois<-function(L=10000,r=0.001,Niter=1000){
 rpois(Niter,(L-1)*r)
 # rpois() 関数については付録A.5 確率分布関数・疑似乱数列の発生を参照
}
L<-10000;r<-0.0001;Niter<-1000
NumSim<-RecombSim(L=L,r=r,Niter=Niter)
NumPois<-RecombPois(L=L,r=r,Niter=Niter)
ylim<-c(0,max(NumSim,NumPois))
plot(ppoints(Niter,a=0),sort(NumSim),ylim=ylim,col=gray(6/8))
par(new=T)
plot(ppoints(Niter,a=0),sort(NumPois),type="l",ylim=ylim)

Niter<-1000 # シミュレーション回数
L<-1000000 #染色体の長さ
r<-0.0001 #塩基間あたりの交叉確率
# 交叉箇所数をポアッソン分布からの乱数で指定し、交叉箇所をsample関数で指定する
crosses<-sort(sample(1:(L-1),rpois(1,(L-1)*r),replace=FALSE))
# 交叉間距離のベクトルを作る
A<-c(0,crosses) # 染色体の始点と交叉箇所のベクトル
B<-c(crosses,L) # 交叉箇所と染色体の終点のベクトル
C<-B-A #交叉間距離のベクトル
# 平均がmean(C)の指数分布からの乱数をlength(C)の数だけ発生させてプロット
rexps<-rexp(length(C),1/mean(C))
# rexp() 関数については付録A.5 確率分布関数、疑似乱数列の発生を参照
# 交叉間距離をソートしてプロット
ylim<-c(0,max(C,rexps))
plot(sort(C),ylim=ylim,cex=0.5,pch=15) #交叉間距離の昇順プロット
par(new=T)
plot(sort(rexps),col="red",ylim=ylim,type="l") # 指数分布からの乱数の昇順プロット

# 集団サイズ・変異アレル本数などを与え、何世代後に変異アレル本数が何本であるかの確率を計算する
#N 染色体集団サイズ,p 変異アレル本数,k 個々の染色体が次世代に残す染色体最大本数,infty 制限なく残せるならinfty=TRUE
probDrift02NInf<-function(N,p,k,infty=FALSE){ 
 if(infty){ # 染色体を抜き取るプールのサイズを無限大にする
  ret<-rep(0,(N+1)) # 変異染色体本数0,1,2,...,Nの状態
  pr1<-p/N # 変異染色体頻度
  pr2<-1-pr1 # 非変異染色体頻度
  if(pr1==0){ # 変異染色体がなければ、次世代も変異染色体はない
   ret[N+1]<-1
  }else if(pr2==0){ # 非変異染色体がなければ、次世代は変異染色体ばかり
   ret[1]<-1
  }else{
   for(i in 0:N){ # N本中i本が変異染色体である確率
    ret[i+1]<-exp(lgamma(N+1)-lgamma(i+1)-lgamma(N-i+1)+i*log(pr1)+(N-i)*log(pr2))
   }
  }
  ret
 }else{ # 次世代染色体を抜き出すプールは有限本数とする
  ret<-rep(0,(N+1))
  kN<-k*N
  kp<-k*p
  kNp<-kN-kp
  k1N<-(k-1)*N
  commonLN<-lgamma(kp+1)+lgamma(kNp+1)+lgamma(k1N+1)+lgamma(N+1)-lgamma(kN+1)
  for(i in 0:N){
   if(kp>=i){ # プールにある変異染色体本数より多い本数を抜き出すことはできない
    n11<-i
    n12<-kp-n11
    n21<-N-n11
    n22<-k1N-n12
    if(n11>=0 && n12>=0 && n21>=0 && n22>=00){
     ret[i+1]<-exp(commonLN-lgamma(n11+1)-lgamma(n12+1)-lgamma(n21+1)-lgamma(n22+1))
    }

   }
  }
  ret
 }

}
# ある状態において、次世代の変異染色体本数別の確率を計算する
nextGenExProb2<-function(x,k,infty){
 N<-length(x)-1 # 染色体集団サイズ
 ret<-rep(0,length(x))
 for(i in 1:length(x)){ # 状態0,1,...,Nごとに、次世代の状態への移り変わる確率を計算する
  p<-i-1
  tmpret<-rep(0,length(x))
  tmpret<-probDrift02NInf(N,p,k,infty=infty) 
  ret<-ret+tmpret*x[i] # 今の世代における変異本数がiの確率がx[i]なので、その比率に応じて、次世代の状態0,1,2,...,Nになる確率を加算
 }
 return(ret)
}

DriftSim3<-function(k=2,initial=1,np=20,ng=100,infty=FALSE){ # k: 最大子孫染色体本数, initial: 初期変異染色体本数, np: 集団サイズ, ng: シミュレーション世代数,infty:無限大プールにするかどうか
 m<-matrix(rep(0,(np*2+1)*ng),nrow=ng) #全世代の全状態の確率を納める
 m[1,initial+1]<-1 # 第1世代は、初期本数の状態の確率が1
 for(i in 2:ng){ # 第2世代以降を順次シミュレート
  m[i,]<-nextGenExProb2(m[i-1,],k=k,infty=infty)

 }
 return(m)
}
# 人数20人、染色体数40本、最大子染色体数2,初期変異染色体数10本、世代数25
out<-DriftSim3(k=2,np=20,initial=10,ng=25,infty=FALSE) # 実行
# 結果を鳥瞰図表示
# theta,phiは鳥瞰の視点を決める変数、shadeは陰影をつけて見やすくするための変数
persp(out,theta=120,phi=30,shade=0.2,xlab="generation",ylab="No. mutants",zlab="probability")

seq1<-sample(c("A","T","G","C"),100,replace=TRUE) # 長さ100のDNA配列をランダムに作る
exonpattern1<-c(11:20,41:60,81:90) # mRNA1は3エクソン。そのパターン
exonpattern2<-c(11:20,41:60,66:77,81:90) #mRNA2はmRAN1の第2エクソンと第3エクソンの間にエクソンが挿入されています
seq1[seq1=="T"]<-"U" # mRNAではDNAの"T"が"U"になります。
mRNA1<-seq1[exonpattern1] # mRNA1の配列を抜き出します
mRNA2<-seq1[exonpattern2]
nsample<-10000;X1<-rnorm(nsample);X2<-rnorm(nsample) # rnorm()関数については、付録A.5 確率分布関数、疑似乱数列の発生を参照
p12<-X1+X2
cov12<-cov(cbind(X1,X2)) #X1, X2の分散・共分散行列を計算します
cov12 # 分散共分散行列を表示します 共分散は小さいです(右上、左下のセル)
##            X1         X2
## X1 1.02695975 0.01764682
## X2 0.01764682 1.00678958
vpp_2<-cov(p12,p12) #p12の分散
vpp_2-sum(cov12)  # p12の分散が、分散・共分散の和である。
## [1] 0
plot(X1,X2) # X1, X2のばらつきを示します

# x1と関係のあるx2を作ります
X3<-X1+0.1*rnorm(nsample) 
p13<-X1+X3
cov13<-cov(cbind(X1,X3)) #X1, X2の分散・共分散行列を計算します
cov13 # 分散共分散行列を表示します 共分散は大きいです(右上、左下のセル)
##          X1       X3
## X1 1.026960 1.025437
## X3 1.025437 1.034138
vpp_3<-cov(p13,p13) #p12の分散
vpp_3-sum(cov13)  # p13の分散が、分散・共分散の和である。
## [1] 0
plot(X1,X3) #プロットは強い相関を示します

covXY<-function(x,y){ #2列の値ベクトルから共分散を算出
 mx<-momentX(x,order=1,center=FALSE)
 my<-momentX(y,order=1,center=FALSE)
 sum((x-mx)*(y-my))/length(x)
}
covMat<-function(m){ # すべての列ベクトルのペアについて共分散を計算
 ret<-matrix(rep(0,length(m[1,])^2),nrow=length(m[1,]))
 for(i in 1:length(m[1,])){
  for(j in 1:length(m[1,])){
   ret[i,j]=covXY(m[,i],m[,j])
  }
 }
 ret
}
N<-1000;M<-100 # サンプル数 2アレル型多型数
af<-runif(M) # アレル頻度 runif()関数については、付録A.5 確率分布関数、疑似乱数列の発生を参照
r<-rnorm(M) # アレル1本分の得点
d<-rnorm(M) # マーカーごとの相加モデルからのずれ
Xmat<-matrix(rep(0,N*M),nrow=N)
for(i in 1:M){ # マーカーごとに人数分の得点を計算
 x<-rep(0,N)
 x<-x+rbinom(N,1,af[i])
 x<-x+rbinom(N,1,af[i])
 x[x==1]<-1+d[i] # ヘテロ接合体特有の値(相加モデルからのずれ)を付与
 x<-x*r[i] # 座位の強さを掛ける
 Xmat[,i]<-x
}
X<-apply(Xmat,1,sum) # 個人の全多型分の得点
momentX(X,order=2,center=TRUE) # 全多型分の得点の分散と
## [1] 54.59826
sum(covMat(Xmat)) #分散共分散行列の値の和は一致する
## [1] 54.59826
hist(X) # 個人の全多型分得点の分布

hweExact<-function(g=c(813,182,5)){ #3ジェノタイプの人数
 n<-sum(g) # 総人数
 nA<-2*g[1]+g[2] # Aアレル本数
 na<-2*g[3]+g[2] # aアレル本数
 evod<-g[2]%%2 # ヘテロ人数の偶数奇数判断
 maxAa<-min(nA,na)-evod
 Aa<-seq(from=evod,to=maxAa,by=2) # 観測しうるヘテロ人数のベクトル
 AA<-(nA-Aa)/2 # 観測しうるAA人数
 aa<-(na-Aa)/2 # 観測しうるaa人数
 obs<-(g[2]-evod)/2+1 # 観察データのヘテロ人数はAa[obs] 
 prob<-rep(0,length(Aa)) # 観測しうる表の生起確率
 prob<-exp(n*lgamma(2+1)+lgamma(nA+1)+lgamma(na+1)-lgamma(2*n+1)-(AA*lgamma(2+1)+Aa*lgamma(1+1)+aa*lgamma(2+1))+lgamma(n+1)-(lgamma(AA+1)+lgamma(Aa+1)+lgamma(aa+1)))
 p.value<-sum(prob[prob<=prob[obs]]) # 観測表の生起確率以下の生起確率を持つ表の生起確率の和
 # Aa 観測しうるヘテロ人数リスト
 # prob ヘテロ人数別の生起確率
 # obsprob 観察テーブルの生起確率
 # p.value 正確検定p値
 list(Aa=Aa,prob=prob,obsprob=prob[obs],p.value=p.value)
}
xx<-hweExact(c(813,182,5))
xx$p.value # 検定p値
## [1] 0.1457905
sum(xx$prob) # 全表の生起確率の和は1になります
## [1] 1
#サンプル数Ns=5が5次元のデータを持っているようなデータセットを作成
Ns<-5;k<-5;x <- matrix(rnorm(Ns*k), nrow=Ns)
dist(x,method="euclidean") # ユークリッド距離の距離行列
##          1        2        3        4
## 2 1.644269                           
## 3 3.435906 2.490368                  
## 4 5.084121 3.943994 3.695698         
## 5 4.997720 3.447472 3.278464 2.841841
library(ape) # 木を作る関数nj()を持つパッケージapeの読み込み
treu<-nj(dist(x,method="euclidean")) # apeの近隣結合法関数njにより木を作る
trman<-nj(dist(x,method="manhattan")) # マンハッタン距離で木を作る
par(mfcol=c(1,2)) # 画面を1行2列に分割
plot(treu);plot(trman) # 2つの距離法で木の表示

par(mfcol=c(1,1)) # 画面分割を1行1列に戻す
powerSetHasse<-function(k=4){
 lev<-k+1
 numelem<-rep(0,lev)
 a<-0:k
 numelem<-choose(k,a)

 numnode<-2^k
 nodesX<-rep(0,numnode)
 nodesY<-rep(0,numnode)
 vecs<-matrix(rep(0,numnode*k),ncol=k)
 keta<-2^(0:(k-1))

 counter<-rep(0,lev)
 startval<--(numelem-1)/2
 for(i in 2:numnode){
  vecs[i,]<-vecs[i-1,]
  vecs[i,1]<-vecs[i,1]+1
  for(j in 1:(k-1)){
   if(vecs[i,j]==2){
    vecs[i,j]<-0
    vecs[i,j+1]<-vecs[i,j+1]+1
   }else{
    j<-k
   }
  }
  tmpsum<-sum(vecs[i,])
  nodesY[i]<-tmpsum
  nodesX[i]<-counter[tmpsum]+startval[tmpsum+1]
  counter[tmpsum]<-counter[tmpsum]+1
 }

 values<-vecs%*%keta

 edges<-matrix(rep(FALSE,numnode^2),ncol=numnode)
 for(i in 1:(numnode-1)){
  for(j in (i+1):numnode){
   tmpdist<-sum(abs(vecs[i,]-vecs[j,]))
   if(tmpdist==1){
    edges[i,j]<-TRUE
   }
  }
 }
 Edges<-which(edges,arr.ind=TRUE)
 plot(nodesX,nodesY,pch=15)
 segments(nodesX[Edges[,1]],nodesY[Edges[,1]],nodesX[Edges[,2]],nodesY[Edges[,2]])
}

powerSetHasse(4)

distMatrix<-dist(x,method="manhattan") # 距離にはマンハッタン距離を使用
trclust<-hclust(distMatrix,method="ward") # クラスタ間距離の定義にはウォード法を使用
plot(trclust)

m<-matrix(rbinom(120,1,0.5),20,6)
heatmap(m)

cormatrix<-cor(m);rsqmatrix<-cormatrix^2
image(1:nrow(rsqmatrix),1:ncol(rsqmatrix),rsqmatrix,col=gray((100:0)/100))

ARGsim<-function(n=8,g=5,l=6,r=r){
 ids<-1:n
 now<-1:n
 parents<-rep(0,n*g*l)
 data<-rep(0,n*g*l)
 for(i in 1:n){
  data[(1+(i-1)*l):(i*l)]<-i
 }
 count<-1+n*l
 for(i in 2:g){
  tmp<-sample(ids,replace=FALSE)
  for(j in 1:(n/2)){
   countinside<-1
   for(x in 1:2){
    first<-1
    if(runif(1)>0.5){
     first<-2
    }
    data[count]<-now[tmp[(j-1)*2+first]]
    parents[count]<-tmp[(j-1)*2+first]
    now[countinside]<-data[count]
    count<-count+1
    countinside<-countinside+1
    for(k in 1:(l-1)){
     if(runif(1)<r){
      first<-first+1
      if(first==3){
       first<-1
      }
     }
     data[count]<-now[tmp[(j-1)*2+first]]
     parents[count]<-tmp[(j-1)*2+first]
     now[countinside]<-data[count]
     count<-count+1
     countinside<-countinside+1
    }
   }
  }
 }

 return(list(allele=matrix(data,nrow=l),parents=matrix(parents,nrow=l)))
}
n<-8
g<-10
l<-200
r<-0.05
d<-ARGsim(n=n,g=g,l=l,r=r)
mmm<-matrix(d$parents[1,],ncol=g)


plotSlice<-function(m){
 points<-which(m>=0,arr.ind=TRUE)
 plot(points[,1],points[,2])
 for(i in 2:length(m[1,])){
  for(j in 1:length(m[,1])){
   segments(m[j,i],i-1,j,i)
  }
 }

}
locus<-1:l

PlotSerial<-function(m,locus,g){
 for(i in locus){
  mmm<-matrix(m[i,],ncol=g)
  plotSlice(m=mmm)
 }
}
PlotOverlap<-function(m,locus,g){
 for(i in locus){
  mmm<-matrix(m[i,],ncol=g)
  par(new=T)
  plotSlice(m=mmm)
 }
}
PlotSerial(d$parents,locus=locus,g=g)