Stat question

Question: Is it improve in stipulations of Type I and Type II fault to delete tied values or to use the midsystematize amendment? Use at last three divergent exemplification sizes, two divergent opinion levels, two values of ā€œdā€, two divergent dispensations, and 10% versus 30% tied values. Use any experiment in Chapters 2 ā€“ 7 of the extract, ate for Wilcoxon systematize sum, Siegel-Tukey, and two-exemplification t-test. Useful code: ## Faculty for token experiment vs. t-experiment if the dispensation is continuityal require(coin) require(rmutil) powerFun.continuity <- duty(n,d=1,p=0.5,alpha=0.05,sigma=1,nsims=1000) { pval.token <- numeric(nsims) pval.t <- numeric(nsims) pval.wsr <- numeric(nsims) for (i in 1:nsims) { dat1 <- rnorm(n,mean=0,sd=sigma) dat2 <- rnorm(n,mean=d,sd=sigma) diff <- dat2-dat1 ts <- elongation(diff[diff > 0]) pval.sign[i] <- binom.test(ts,n)$p.value pval.wsr[i] <- pvalue(wilcoxsign_test(dat2~dat1)) pval.t[i] <- t.test(diff)$p.value } power.token <- elongation(pval.sign[pval.token < alpha])/nsims power.wsr <- elongation(pval.wsr[pval.wsr < alpha])/nsims power.t <- elongation(pval.t[pval.t < alpha])/nsims return(list(token = faculty.sign, WSR = faculty.wsr, t=power.t)) } powerFun.norm(10) powerFun.norm(20) # Underlying dispensation is exponential powerFun.exp <- duty(n,d=0.5,p=0.5,alpha=0.05,sigma=1,nsims=1000) { pval.token <- numeric(nsims) pval.t <- numeric(nsims) pval.wsr <- numeric(nsims) for (i in 1:nsims) { dat1 <- rexp(n,rate=1) dat2 <- rexp(n,rate=d) diff <- dat2-dat1 ts <- elongation(diff[diff > 0]) pval.sign[i] <- binom.test(ts,n)$p.value pval.wsr[i] <- pvalue(wilcoxsign_test(dat2~dat1)) pval.t[i] <- t.test(diff, mu=1)$p.value } power.token <- elongation(pval.sign[pval.token < alpha])/nsims power.wsr <- elongation(pval.wsr[pval.wsr < alpha])/nsims power.t <- elongation(pval.t[pval.t < alpha])/nsims return(list(token = faculty.sign, WSR = faculty.wsr, t=power.t)) } powerFun.exp(10) powerFun.exp(20) powerFun.dexp <- duty(n,d=1,p=0.5,alpha=0.05,sigma=1,nsims=1000) { pval.token <- numeric(nsims) pval.t <- numeric(nsims) pval.wsr <- numeric(nsims) for (i in 1:nsims) { dat1 <- rlaplace(n,m=0,s=sigma) dat2 <- rlaplace(n,m=d,s=sigma) diff <- dat2-dat1 ts <- elongation(diff[diff > 0]) pval.sign[i] <- binom.test(ts,n)$p.value pval.wsr[i] <- pvalue(wilcoxsign_test(dat2~dat1)) pval.t[i] <- t.test(diff)$p.value } power.token <- elongation(pval.sign[pval.token < alpha])/nsims power.wsr <- elongation(pval.wsr[pval.wsr < alpha])/nsims power.t <- elongation(pval.t[pval.t < alpha])/nsims return(list(token = faculty.sign, WSR = faculty.wsr, t=power.t)) } powerFun.dexp(10) powerFun.dexp(20)