blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6ad30b7185303d36297e3d48b164d5398b48225
|
e8a4ff8a3df82b03e323581ee68a378ff501f2b2
|
/vns.R
|
84edc2ff16df6f6fa9b93e489f89f4e5b8fb327e
|
[] |
no_license
|
aldamuhammadsulaiman/OKH
|
04ba54d7f50a984b467fb160d6bad36acc3c3769
|
8f106ca9ba3b4dbd1189b283995e4b9716040975
|
refs/heads/master
| 2020-09-13T06:21:49.609991
| 2019-12-11T12:27:18
| 2019-12-11T12:27:18
| 222,680,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,513
|
r
|
vns.R
|
vns<- function(sol,cm,stud,max_iter) {
k<-1
start<-Sys.time()
pen<-calculatePen(hcPrepare(sol),cm)
iteration_pen<-matrix(0,nrow=max_iter,ncol=1)
for (iter in 1:max_iter) {
if(k==1) {
rand_exam<-round(runif(1,1,nrow(sol)),0)
sel_ts<-sol[sol[,1]==rand_exam,2]
conf_ts<-getNeighboringTS(sol,rand_exam,cm)
if(length(conf_ts)==0) {
k<-k+1
iteration_pen[iter]<-pen
next
}
rand_ts<-conf_ts[round(runif(1,1,length(conf_ts)),0)]
sel_ts_member<-sol[sol[,2]==sel_ts,1]
rand_ts_member<-sol[sol[,2]==rand_ts,1]
kempe_chain<-findKempeChain(sel_ts_member,rand_ts_member,cm)
if(length(kempe_chain)>0) {
deltapen<-calculatePen_kswap_vns(sol,cm,sel_ts,rand_ts,kempe_chain,stud)
if(deltapen<0) {
sol<-chainSwap(sol,kempe_chain,sel_ts,rand_ts)
k<-1
pen<-pen+deltapen
}
}
else
k<-k+1
}
else if(k==2) {
move_candidate<-sol[sol[,2]==sel_ts,1]
move_candidate<-move_candidate[round(runif(1,1,length(move_candidate)),0)]
best_delta<-0
moveto<-sol[sol[,1]==move_candidate,2]
for (i in 1:length(unique(sol[,2]))) {
if(i==sel_ts) { next }
curr_delta<-calculatePen_move(sol,cm,sel_ts,i,move_candidate,stud)
if (curr_delta<best_delta) {
best_delta<-curr_delta
moveto<-i
}
}
pen<-pen+best_delta
sol[sol[,1]==move_candidate,2]<-moveto
k<-1
}
iteration_pen[iter]<-pen
}
plot(iteration_pen, type = "l")
print(Sys.time()-start)
return(sol)
}
getNeighboringTS <- function (sol, rand_exam, cm) {
crashing_ts<-numeric(0)
crashing_exam<-cm[rand_exam,]
crashing_exam<-as.integer(substr(names(crashing_exam[which(crashing_exam>0)]),2,5))
if(length(crashing_exam)==0)
return(numeric(0))
for (i in 1:length(crashing_exam)) {
crashing_ts[i]<-sol[sol[,1]==crashing_exam[i],2]
}
return(unique(crashing_ts))
}
findKempeChain <- function (sel_ts_member, rand_ts_member, cm) {
kempechain<-data.frame(sel=numeric(0),rand=numeric(0))
for (i in 1:length(sel_ts_member)) {
for (j in 1:length(rand_ts_member)) {
if (cm[sel_ts_member[i],rand_ts_member[j]]>0) {
kempechain<-rbind(kempechain,c(sel_ts_member[i],rand_ts_member[j]))
}
}
}
return(data.matrix(kempechain))
}
chainSwap <- function (sol,kempe_chain,sel_ts,rand_ts) {
kempe1<-unique(kempe_chain[,1])
kempe2<-unique(kempe_chain[,2])
for (i in 1:length(kempe1))
sol[sol[,1]==kempe1[i],2]<-rand_ts
for (i in 1:length(kempe2))
sol[sol[,1]==kempe2[i],2]<-sel_ts
return(sol)
}
calculatePen_kswap_vns <- function(sol,cm,init,target,kempe_chain,stud) {
check_ts<-list(init,target)
kempe_list<-list(unique(kempe_chain[,1]),unique(kempe_chain[,2]))
delta<-0
temp_delta<-0
for (l in 1:2) {
if (l==2) {
sw<-check_ts[[1]]
check_ts[[1]]<-check_ts[[2]]
check_ts[[2]]<-sw
}
for (k in 1:length(kempe_list[[l]])) {
temp_delta<-0
for (i in 1:2) {
if (i==2) {
sol[sol[,1]==kempe_list[[l]][[k]],2]<-check_ts[[i]]
temp_delta<-(-temp_delta)
}
neighbor<-sol[(sol[,2]<check_ts[[i]] & sol[,2]>(check_ts[[i]]-6)) | (sol[,2]>check_ts[[i]] & sol[,2]<(check_ts[[i]]+6)) ,]
for (j in 1:nrow(neighbor))
temp_delta<-temp_delta+(cm[sol[sol[,1]==kempe_list[[l]][[k]],1],neighbor[j,1]]*2^(5-abs(check_ts[[i]]-neighbor[j,2])))
}
delta<-delta+temp_delta
}
}
return (delta/stud)
}
calculatePen_move_vns <- function(sol,cm,init,target,exam,stud) {
sol_target<-sol[sol[,2]==target,1]
for(i in 1:length(sol_target)) {
if(cm[sol_target[i],exam]>0) {
return (1)
}
}
check_ts<-list(init,target)
delta<-0
for (i in 1:2) {
if (i==2) {
sol[sol[,1]==exam,2]<-target
delta<-(-delta)
}
neighbor<-sol[(sol[,2]<check_ts[[i]] & sol[,2]>(check_ts[[i]]-6)) | (sol[,2]>check_ts[[i]] & sol[,2]<(check_ts[[i]]+6)) ,]
for (j in 1:nrow(neighbor))
delta<-delta+(cm[sol[sol[,1]==exam,1],neighbor[j,1]]*2^(5-abs(check_ts[[i]]-neighbor[j,2])))
}
return (delta/stud)
}
# calculatePen_swap_vns <- function(sol,cm,init,target,exam,targ_exam,stud) {
# check_ts<-list(init,target)
# exm<-list(exam,targ_exam)
# delta<-0
# tot_delta<-0
# for (k in 1:2) {
# if (k==2) {
# sw<-check_ts[[1]]
# check_ts[[1]]<-check_ts[[2]]
# check_ts[[2]]<-sw
# delta<-0
# }
# for (i in 1:2) {
# if (i==2) {
# sol[sol[,1]==exm[[k]],2]<-target
# delta<-(-delta)
# }
# neighbor<-sol[(sol[,2]<check_ts[[i]] & sol[,2]>(check_ts[[i]]-6)) | (sol[,2]>check_ts[[i]] & sol[,2]<(check_ts[[i]]+6)) ,]
# for (j in 1:nrow(neighbor))
# delta<-delta+(cm[sol[sol[,1]==exm[[k]],1],neighbor[j,1]]*2^(5-abs(check_ts[[i]]-neighbor[j,2])))
# }
# tot_delta<-tot_delta+delta
# }
# return (delta/stud)
# }
vns_2<- function(sol,cm,stud_amount,max_iter) {
k<-1
start<-Sys.time()
pen<-calculatePen(hcPrepare(sol),cm)
iteration_pen<-matrix(10,nrow=max_iter,ncol=2)
for (i in 1:max_iter) {
rand_exam<-round(runif(1,1,nrow(sol)),0)
if(k<4) {
if(k==1) {
curr_ts<-sol[sol[,1]==rand_exam,2]
}
else if(k==2) {
rand_exam2<-cm[rand_exam,]
rand_exam2<-rand_exam2[which(rand_exam2==0)]
rand_exam2<-rand_exam2[round(runif(1,1,length(rand_exam2)),0)]
rand_exam2<-as.integer(substr(names(rand_exam2),2,5))
rand_exam<-c(rand_exam,rand_exam2)
curr_ts<-c(sol[sol[,1]==rand_exam[1],2],
sol[sol[,1]==rand_exam[2],2])
}
else if(k==3) {
rand_exam2<-cm[rand_exam,]
rand_exam2<-rand_exam2[which(rand_exam2==0)]
rand_exam2<-rand_exam2[round(runif(1,1,length(rand_exam2)),0)]
rand_exam2<-as.integer(substr(names(rand_exam2),2,5))
rand_exam3<-cm[c(rand_exam,rand_exam2),]
rand_exam3<-colSums(rand_exam3)
rand_exam3<-rand_exam3[which(rand_exam3==0)]
rand_exam3<-rand_exam3[round(runif(1,1,length(rand_exam3)),0)]
rand_exam3<-as.integer(substr(names(rand_exam3),2,5))
rand_exam<-c(rand_exam,rand_exam2,rand_exam3)
curr_ts<-c(sol[sol[,1]==rand_exam[1],2],
sol[sol[,1]==rand_exam[2],2],
sol[sol[,1]==rand_exam[3],2])
}
rand_ts<-getPossibleMove(sol,cm,rand_exam)
if(length(rand_ts)>0) {
rand_ts<-rand_ts[round(runif(1,1,length(rand_ts)),0)]
} else {
iteration_pen[i,1]<-0
k+1
next
}
deltapen<-calculatePen_move_sa(sol,cm,curr_ts,rand_ts,rand_exam,stud_amount)
}
else {
swap_cand<-round(runif(1,1,length(unique(sol[,2]))),0)
swap_cand<-sol[sol[,2]==swap_cand,1]
swap_cand<-swap_cand[round(runif(1,1,length(swap_cand)),0)]
if (k==4) { rand_exam<-c(rand_exam,swap_cand) }
else {
swap_cand2<-round(runif(1,1,length(unique(sol[,2]))),0)
swap_cand2<-sol[sol[,2]==swap_cand2,1]
swap_cand2<-swap_cand2[round(runif(1,1,length(swap_cand2)),0)]
rand_exam<-c(rand_exam,swap_cand,swap_cand2)
}
deltapen<-calculatePen_swap_sa(sol,cm,rand_exam,stud_amount)
}
if(deltapen<0) {
iteration_pen[i,1]<-deltapen
if (k<4) {
for (i in 1:length(rand_exam))
sol[sol[,1]==rand_exam[i],2]<-rand_ts
}
else {
saved<-sol[sol[,1]==rand_exam[1],2]
if(length(rand_exam)==2) {
sol[sol[,1]==rand_exam[1],2]<-sol[sol[,1]==rand_exam[2],2]
sol[sol[,1]==rand_exam[2],2]<-saved
} else {
sol[sol[,1]==rand_exam[1],2]<-sol[sol[,1]==rand_exam[2],2]
sol[sol[,1]==rand_exam[2],2]<-sol[sol[,1]==rand_exam[3],2]
sol[sol[,1]==rand_exam[3],2]<-saved
}
}
k<-1
}
else {
iteration_pen[i,1]<-0
if (k==5)
k<-1
else
k<-k+1
}
}
print(Sys.time()-start)
for (i in 1:nrow(iteration_pen)) {
if(i==1) {
iteration_pen[i,2]<-pen
next
}
iteration_pen[i,2]<-iteration_pen[(i-1),2]+iteration_pen[i,1]
}
plot(iteration_pen[,2], type = "l")
return (sol)
}
|
09f7631fbb011a0ddb2f593a513f78d2a2abad6f
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlecomputealpha.auto/man/AddressesScopedList.Rd
|
e11a06fc5219a7713000ee4fd39c36fbb6ba61ee
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,062
|
rd
|
AddressesScopedList.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_objects.R
\name{AddressesScopedList}
\alias{AddressesScopedList}
\title{AddressesScopedList Object}
\usage{
AddressesScopedList(AddressesScopedList.warning = NULL,
AddressesScopedList.warning.data = NULL, addresses = NULL,
warning = NULL)
}
\arguments{
\item{AddressesScopedList.warning}{The \link{AddressesScopedList.warning} object or list of objects}
\item{AddressesScopedList.warning.data}{The \link{AddressesScopedList.warning.data} object or list of objects}
\item{addresses}{[Output Only] List of addresses contained in this scope}
\item{warning}{[Output Only] Informational warning which replaces the list of addresses when the list is empty}
}
\value{
AddressesScopedList object
}
\description{
AddressesScopedList Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other AddressesScopedList functions: \code{\link{AddressesScopedList.warning.data}},
\code{\link{AddressesScopedList.warning}}
}
|
a3f3017913cc3a8492508eef2e5061d44a4e6d06
|
8b9821528190c4b4ff791c9d87c5d10851705bdb
|
/man/read_geocarb.Rd
|
ddaf237d1b36fe4248a4a1046e29330476d11790
|
[
"MIT"
] |
permissive
|
jonathan-g/geocarb
|
5eb88b7941c4c4781d4c9a4db8a54076d81eb151
|
a7213a1d31e02b9f50048a98c010160ff0b4ce6e
|
refs/heads/master
| 2021-03-11T04:13:43.362794
| 2020-03-12T02:17:39
| 2020-03-12T02:17:39
| 246,508,618
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 410
|
rd
|
read_geocarb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_geocarb.R
\name{read_geocarb}
\alias{read_geocarb}
\title{read_geocarb}
\usage{
read_geocarb(filename)
}
\arguments{
\item{filename}{The file to read.}
}
\value{
A tibble containing the results of the GEOCARB simulation.
See \link{run_geocarb} for details.
}
\description{
Read a GEOCARB output file.
}
\seealso{
run_geocarb
}
|
15c6a5d7f21658bdf06eac3ba64ad81c624a35fb
|
f6342eaa9aa5e4640305166e4a9bf91753d3a86f
|
/R/ITPimage.R
|
a6376486af3ff7ad1001fbd181c9f9df25acf110
|
[] |
no_license
|
cran/fdatest
|
1edd9c7545dc7ad25bfd5c2bce4e307b9bac5159
|
56b9403f749e3e6442249ca6c6cd04bf1825706a
|
refs/heads/master
| 2022-05-24T11:12:19.389283
| 2022-05-04T07:30:08
| 2022-05-04T07:30:08
| 17,696,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,138
|
r
|
ITPimage.R
|
ITPimage <-
function(ITP.result,alpha=0.05,abscissa.range=c(0,1),nlevel=20){
if(ITP.result$basis=='paFourier' & ITP.result$test=='2pop'){
#2 plots: phase and amplitude
par(ask=T)
#phase:
p <- dim(ITP.result$heatmap.matrix_phase)[1]
min.ascissa <- 1-(p-1)/2
max.ascissa <- p+(p-1)/2
ascissa.grafico <- seq(min.ascissa,max.ascissa,length.out=p*4)
ordinata.grafico <- 1:p
colori=rainbow(nlevel,start=0.15,end=0.67)
colori <- colori[length(colori):1]
##dev.new()
#layout(rbind(c(1,1,1,1,1,1,1,1,2),c(1,1,1,1,1,1,1,1,2),c(3,3,3,3,3,3,3,3,0),c(4,4,4,4,4,4,4,4,0)))
layout(rbind(1:2,c(3,0),c(4,0)),widths=c(8,1),heights=c(2,1,1))
par(mar=c(4.1, 4.1, 3, .2),cex.main=1.5,cex.lab=1.1,las=0)
#1: heatmap
matrice.quad <- ITP.result$heatmap.matrix_phase[,(p+1):(3*p)]
ascissa.quad <- ascissa.grafico[(p+1):(3*p)]
image(ascissa.quad,ordinata.grafico,t(matrice.quad[p:1,]),col=colori,ylab='Interval length',main='p-value heatmap (phase)',xlab='Abscissa',zlim=c(0,1),asp=1)
min.plot <- par("usr")[1]
max.plot <- par("usr")[2]
#2: legend
par(mar=c(4.1, 1, 3, 3),las=1)
image(1,seq(0,1,length.out=nlevel)-0.025*seq(0,1,length.out=nlevel)+0.025*seq(1,0,length.out=nlevel),t(as.matrix(seq(0,1,length.out=nlevel))),col=colori,xaxt='n',yaxt='n',xlab='',ylab='')
axis(4,at=seq(0,1,0.2),padj=0.4)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
#3: corrected p values
par(mar=c(4.1, 4.1, 3, .2),las=0)
plot(1:p,ITP.result$corrected.pval_phase,pch=16,ylim=c(0,1),xlim=c(min.plot,max.plot),main='Corrected p-values (phase)',ylab='p-value',xlab='Component',xaxs='i')
#gray shaded area
difference <- which(ITP.result$corrected.pval_phase<alpha)
abscissa.pval <- 1:p
if(length(difference)>0){
for(j in 1:length(difference)){
min.rect <- abscissa.pval[difference[j]] - 0.5
max.rect <- min.rect + 1
rect(min.rect, par("usr")[3], max.rect, par("usr")[4], col = 'gray90',density=-2,border = NA)
}
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
}
for(j in 0:10){
abline(h=j/10,col='lightgray',lty="dotted")
}
points(abscissa.pval,ITP.result$corrected.pval_phase,pch=16)
#4: functional data
abscissa.new <- seq(abscissa.range[1],abscissa.range[2],length.out=dim(ITP.result$data.eval)[2])
matplot(abscissa.new,t(ITP.result$data.eval),col=ITP.result$labels,type='l',main='Functional data',xlab='Abscissa',ylab='Value',xaxs='i')
#amplitude
#dev.new()
#layout(rbind(c(1,1,1,1,1,1,1,1,2),c(1,1,1,1,1,1,1,1,2),c(3,3,3,3,3,3,3,3,0),c(4,4,4,4,4,4,4,4,0)))
layout(rbind(1:2,c(3,0),c(4,0)),widths=c(8,1),heights=c(2,1,1))
par(mar=c(4.1, 4.1, 3, .2),cex.main=1.5,cex.lab=1.1,las=0)
#1: heatmap
matrice.quad <- ITP.result$heatmap.matrix_amplitude[,(p+1):(3*p)]
ascissa.quad <- ascissa.grafico[(p+1):(3*p)]
image(ascissa.quad,ordinata.grafico,t(matrice.quad[p:1,]),col=colori,ylab='Interval length',main='p-value heatmap (amplitude)',xlab='Abscissa',zlim=c(0,1),asp=1)
min.plot <- par("usr")[1]
max.plot <- par("usr")[2]
#2: legend
par(mar=c(4.1, 1, 3, 3),las=1)
image(1,seq(0,1,length.out=nlevel)-0.025*seq(0,1,length.out=nlevel)+0.025*seq(1,0,length.out=nlevel),t(as.matrix(seq(0,1,length.out=nlevel))),col=colori,xaxt='n',yaxt='n',xlab='',ylab='')
axis(4,at=seq(0,1,0.2),padj=0.4)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
#3: corrected p values
par(mar=c(4.1, 4.1, 3, .2),las=0)
plot(1:p,ITP.result$corrected.pval_amplitude,pch=16,ylim=c(0,1),xlim=c(min.plot,max.plot),main='Corrected p-values (amplitude)',ylab='p-value',xlab='Component',xaxs='i')
difference <- which(ITP.result$corrected.pval_amplitude<alpha)
abscissa.pval <- 1:p
if(length(difference)>0){
for(j in 1:length(difference)){
min.rect <- abscissa.pval[difference[j]] - 0.5
max.rect <- min.rect + 1
rect(min.rect, par("usr")[3], max.rect, par("usr")[4], col = 'gray90',density=-2,border = NA)
}
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
}
for(j in 0:10){
abline(h=j/10,col='lightgray',lty="dotted")
}
points(1:p,ITP.result$corrected.pval_amplitude,pch=16)
#4: functional data
abscissa.new <- seq(abscissa.range[1],abscissa.range[2],length.out=dim(ITP.result$data.eval)[2])
matplot(abscissa.new,t(ITP.result$data.eval),col=ITP.result$labels,type='l',main='Functional data',xlab='Abscissa',ylab='Value',xaxs='i')
par(ask=FALSE)
}else if(ITP.result$basis=='Fourier'){
p <- dim(ITP.result$heatmap.matrix)[1]
min.ascissa <- 1-(p-1)/2
max.ascissa <- p+(p-1)/2
ascissa.grafico <- seq(min.ascissa,max.ascissa,length.out=p*4)
ordinata.grafico <- 1:p
colori=rainbow(nlevel,start=0.15,end=0.67)
colori <- colori[length(colori):1]
#dev.new()
#layout(rbind(c(1,1,1,1,1,1,1,1,2),c(1,1,1,1,1,1,1,1,2),c(3,3,3,3,3,3,3,3,0),c(4,4,4,4,4,4,4,4,0)))
layout(rbind(1:2,c(3,0),c(4,0)),widths=c(8,1),heights=c(2,1,1))
par(mar=c(4.1, 4.1, 3, .2),cex.main=1.5,cex.lab=1.1,las=0)
#1: heatmap
matrice.quad <- ITP.result$heatmap.matrix[,(p+1):(3*p)]
ascissa.quad <- ascissa.grafico[(p+1):(3*p)]
image(ascissa.quad,ordinata.grafico,t(matrice.quad[p:1,]),col=colori,ylab='Interval length',main='p-value heatmap',xlab='Abscissa',zlim=c(0,1),asp=1)
min.plot <- par("usr")[1]
max.plot <- par("usr")[2]
#2: legend
par(mar=c(4.1, 1, 3, 3),las=1)
image(1,seq(0,1,length.out=nlevel)-0.025*seq(0,1,length.out=nlevel)+0.025*seq(1,0,length.out=nlevel),t(as.matrix(seq(0,1,length.out=nlevel))),col=colori,xaxt='n',yaxt='n',xlab='',ylab='')
axis(4,at=seq(0,1,0.2),padj=0.4)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
#3: corrected p values
par(mar=c(4.1, 4.1, 3, .2),las=0)
plot(1:p,ITP.result$corrected.pval,pch=16,ylim=c(0,1),xlim=c(min.plot,max.plot),main='Corrected p-values',ylab='p-value',xlab='Component',xaxs='i')
difference <- which(ITP.result$corrected.pval<alpha)
abscissa.pval <- 1:p
if(length(difference)>0){
for(j in 1:length(difference)){
min.rect <- abscissa.pval[difference[j]] - 0.5
max.rect <- min.rect + 1
rect(min.rect, par("usr")[3], max.rect, par("usr")[4], col = 'gray90',density=-2,border = NA)
}
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
}
for(j in 0:10){
abline(h=j/10,col='lightgray',lty="dotted")
}
points(1:p,ITP.result$corrected.pval,pch=16)
#4: functional data
abscissa.new <- seq(abscissa.range[1],abscissa.range[2],length.out=dim(ITP.result$data.eval)[2])
matplot(abscissa.new,t(ITP.result$data.eval),col=ITP.result$labels,type='l',main='Functional data',xlab='Abscissa',ylab='Value',xaxs='i')
if(ITP.result$test=='1pop'){
if(length(ITP.result$mu)==1){
abscissa.mu <- abscissa.new
mu <- rep(ITP.result$mu,1000)
}else{
abscissa.mu <- seq(abscissa.range[1],abscissa.range[2],length.out=length(ITP.result$mu))
mu <- ITP.result$mu
}
lines(abscissa.mu,mu,col='gray')
}
}else if(ITP.result$basis=='B-spline'){
min.ascissa <- abscissa.range[1]-(abscissa.range[2]-abscissa.range[1])/2
max.ascissa <- abscissa.range[2]+(abscissa.range[2]-abscissa.range[1])/2
p <- dim(ITP.result$heatmap.matrix)[1]
ordinata.grafico <- seq(abscissa.range[1],abscissa.range[2],length.out=p) - abscissa.range[1]
colori=rainbow(nlevel,start=0.15,end=0.67)
colori <- colori[length(colori):1]
#dev.new()
#layout(rbind(c(1,1,1,1,1,1,1,1,2),c(1,1,1,1,1,1,1,1,2),c(3,3,3,3,3,3,3,3,0),c(4,4,4,4,4,4,4,4,0)))
layout(rbind(1:2,c(3,0),c(4,0)),widths=c(8,1),heights=c(2,1,1))
#1: heatmap
par(mar=c(4.1, 4.1, 3, .2),cex.main=1.5,cex.lab=1.1,las=0)
matrice.quad <- ITP.result$heatmap.matrix[,(p+1):(3*p)]
ascissa.quad <- seq(abscissa.range[1],abscissa.range[2],length.out=p*2)
image(ascissa.quad,ordinata.grafico,t(matrice.quad[p:1,]),col=colori,ylab='Interval length',main='p-value heatmap',xlab='Abscissa',zlim=c(0,1),asp=1)
min.plot <- par("usr")[1]
max.plot <- par("usr")[2]
#2: legend
par(mar=c(4.1, 1, 3, 3),las=1)
image(1,seq(0,1,length.out=nlevel)-0.025*seq(0,1,length.out=nlevel)+0.025*seq(1,0,length.out=nlevel),t(as.matrix(seq(0,1,length.out=nlevel))),col=colori,xaxt='n',yaxt='n',xlab='',ylab='')
axis(4,at=seq(0,1,0.2),padj=0.4)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
#3: corrected p values
par(mar=c(4.1, 4.1, 3, .2),las=0)
abscissa.pval <- seq(abscissa.range[1],abscissa.range[2],length.out=p)
plot(abscissa.pval,ITP.result$corrected.pval,pch=16,ylim=c(0,1),xlim=c(min.plot,max.plot),main='Corrected p-values',ylab='p-value',xlab='Component',xaxs='i')
difference <- which(ITP.result$corrected.pval<alpha)
if(length(difference) >0){
for(j in 1:length(difference)){
min.rect <- abscissa.pval[difference[j]] - (abscissa.pval[2]-abscissa.pval[1])/2
max.rect <- min.rect + (abscissa.pval[2]-abscissa.pval[1])
rect(min.rect, par("usr")[3], max.rect, par("usr")[4], col = 'gray90',density=-2,border = NA)
}
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
}
for(j in 0:10){
abline(h=j/10,col='lightgray',lty="dotted")
}
points(abscissa.pval,ITP.result$corrected.pval,pch=16)
#3: functional data
abscissa.new <- seq(abscissa.range[1],abscissa.range[2],length.out=dim(ITP.result$data.eval)[2])
matplot(abscissa.new,t(ITP.result$data.eval),col=ITP.result$labels,type='l',xlim=c(min.plot,max.plot),main='Functional data',xlab='Abscissa',ylab='Value',xaxs='i')
if(length(difference) >0){
for(j in 1:length(difference)){
min.rect <- abscissa.pval[difference[j]] - (abscissa.pval[2]-abscissa.pval[1])/2
max.rect <- min.rect + (abscissa.pval[2]-abscissa.pval[1])
rect(min.rect, par("usr")[3], max.rect, par("usr")[4], col = 'gray90',density=-2,border = NA)
}
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = NULL,border='black')
}
matplot(abscissa.new,t(ITP.result$data.eval),col=ITP.result$labels,type='l',add=TRUE)
if(ITP.result$test=='1pop'){
if(length(ITP.result$mu)==1){
abscissa.mu <- abscissa.new
mu <- rep(ITP.result$mu,1000)
}else{
abscissa.mu <- seq(abscissa.range[1],abscissa.range[2],length.out=length(ITP.result$mu))
mu <- ITP.result$mu
}
lines(abscissa.mu,mu,col='blue')
}
}
}
|
4499f2816db016b41f310e2cd9c062ce95e1c351
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/windfarmGA/man/getRects.Rd
|
02661709ecccb30867b1a98629130ec0e7e54842
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
getRects.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{getRects}
\alias{getRects}
\title{Deprecated use \code{\link[windfarmGA]{get_grids}} instead.}
\usage{
getRects(trimtonOut, Grid)
}
\arguments{
\item{trimtonOut}{Input matrix with binary values.}
\item{Grid}{Grid of the considered area}
}
\description{
getRects is replaced by \code{\link[windfarmGA]{get_grids}}.
}
\keyword{internal}
|
c41735324946b1bd50f7081ea1e425c857d133ca
|
1486b7de41ac7a9882ecd92722d886802d49f9aa
|
/man/drag.Rd
|
b6ba2e7853e656d141b06dfaeafb6e77e5a6e96e
|
[] |
no_license
|
richardsc/ocedata
|
4f155f7ad72ac90a42c6003fb316c5e41b7e5a8b
|
ad804a151f3d50ea41df6f7cfd401e0446ddac6a
|
refs/heads/master
| 2021-01-01T17:58:48.937286
| 2017-07-24T16:20:25
| 2017-07-24T16:20:25
| 98,209,705
| 0
| 0
| null | 2017-07-24T16:08:07
| 2017-07-24T16:08:07
| null |
UTF-8
|
R
| false
| false
| 1,117
|
rd
|
drag.Rd
|
\name{drag}
\docType{data}
\alias{drag}
\title{Wind drag coefficient}
\description{Wind drag coefficients for wind-profile and eddy-covariance
methods, summarized in Figure 3 of Garratt (1977). The data are stored in
a data frame with four columns: \code{U} is wind speed in m/s; \code{Cd} is
for (neutral) drag coefficient; \code{n} is the number of data measurements
represented by the value; and \code{method} is a factor with two levels:
\code{profile} and \code{eddy}, for two methods of inferring \code{Cd}.}
\usage{data(drag)}
\examples{
\dontrun{
library(ocedata)
data(drag)
par(mar=c(3,3,1,1), mgp=c(2, 0.7, 0))
plot(drag$U, drag$Cd, pch=ifelse(drag$method == "profile", 1, 19),
xlab="U [m/s]", ylab=expression(C[D]),
xlim=c(0, 22), ylim=c(0, 3e-3))
legend("topleft", pch=c(1,19), legend=c("profile method", "eddy covariance method"))
}
}
\source{Data digitized from Figure 3 of Garratt (1977) by Dan Kelley.}
\references{J. R. Garratt, 1977. Review of drag coefficients over oceans and
continents. \emph{Monthly Weather Review}, 105:915-927.}
\keyword{datasets}
|
dec3e96aa70e490a9741e18aeb275b56487b21f8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EGRET/tests/tests_imports.R
|
8633ed813167c94f1e33ccd71c0027e3af790905
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,319
|
r
|
tests_imports.R
|
context("EGRET retrieval tests")
test_that("External Daily tests", {
testthat::skip_on_cran()
DailyNames <- c("Date","Q","Julian","Month","MonthSeq","waterYear",
"Day","DecYear","Qualifier","i","LogQ","Q7","Q30")
Daily <- readNWISDaily('01594440',
'00060',
'1985-01-01',
'1985-03-31')
expect_that(all(names(Daily) %in% DailyNames),is_true())
expect_is(Daily$Date, 'Date')
expect_is(Daily$Q, 'numeric')
DailySuspSediment <- readNWISDaily('01594440',
'80154',
'1985-01-01',
'1985-03-31',convert=FALSE)
expect_is(DailySuspSediment$Date, 'Date')
expect_is(DailySuspSediment$Q, 'numeric')
})
test_that("External NWIS Sample tests", {
testthat::skip_on_cran()
SampleNames <- c("Date","ConcLow","ConcHigh","Uncen","ConcAve","Julian","Month",
"Day","DecYear","MonthSeq","waterYear","SinDY","CosDY")
Sample_01075 <- readNWISSample('01594440',
'01075',
'1985-01-01',
'1985-03-31')
expect_that(all(names(Sample_01075) %in% SampleNames),is_true())
Sample_All2 <- readNWISSample('05114000',
c('00915','00931'),
'1985-01-01',
'1985-03-31')
expect_that(all(names(Sample_All2) %in% SampleNames),is_true())
Sample_Select <- readNWISSample('05114000',
c('00915','00931'),
'', '')
expect_that(all(names(Sample_Select) %in% SampleNames),is_true())
expect_is(Sample_Select$Date, 'Date')
expect_is(Sample_Select$ConcAve, 'numeric')
expect_that(nrow(Sample_Select) > nrow(Sample_All2),is_true())
})
test_that("External WQP Sample tests", {
testthat::skip_on_cran()
SampleNames <- c("Date","ConcLow","ConcHigh","Uncen","ConcAve","Julian","Month",
"Day","DecYear","MonthSeq","waterYear","SinDY","CosDY")
Sample_All <- readWQPSample('WIDNR_WQX-10032762','Specific conductance', '', '')
expect_that(all(names(Sample_All) %in% SampleNames),is_true())
})
test_that("External INFO tests", {
testthat::skip_on_cran()
requiredColumns <- c("shortName", "paramShortName","constitAbbrev",
"drainSqKm","paStart","paLong")
INFO <- readNWISInfo('05114000','00010',interactive=FALSE)
expect_that(all(requiredColumns %in% names(INFO)),is_true())
nameToUse <- 'Specific conductance'
pcodeToUse <- '00095'
INFO_WQP <- readWQPInfo('USGS-04024315',pcodeToUse,interactive=FALSE)
expect_that(all(requiredColumns %in% names(INFO_WQP)),is_true())
INFO2 <- readWQPInfo('WIDNR_WQX-10032762',nameToUse,interactive=FALSE)
expect_that(all(requiredColumns %in% names(INFO2)),is_true())
})
test_that("User tests", {
filePath <- system.file("extdata", package="EGRET")
fileName <- 'ChoptankRiverFlow.txt'
ChopData <- readDataFromFile(filePath,fileName, separator="\t")
expect_equal(ncol(ChopData), 2)
fileNameDaily <- "ChoptankRiverFlow.txt"
Daily_user <- readUserDaily(filePath,fileNameDaily,separator="\t",verbose=FALSE)
DailyNames <- c("Date","Q","Julian","Month","MonthSeq","waterYear",
"Day","DecYear","Qualifier","i","LogQ","Q7","Q30")
expect_that(all(names(Daily_user) %in% DailyNames),is_true())
fileNameSample <- 'ChoptankRiverNitrate.csv'
Sample_user <- readUserSample(filePath,fileNameSample, separator=";",verbose=FALSE)
SampleNames <- c("Date","ConcLow","ConcHigh","Uncen","ConcAve","Julian","Month",
"Day","DecYear","MonthSeq","waterYear","SinDY","CosDY")
expect_that(all(names(Sample_user) %in% SampleNames),is_true())
})
test_that("processQWData", {
testthat::skip_on_cran()
rawWQP <- dataRetrieval::readWQPqw('WIDNR_WQX-10032762','Specific conductance', '2012-01-01', '2012-12-31')
Sample2 <- processQWData(rawWQP, pCode=FALSE)
expect_true(all(Sample2[[2]] == ""))
})
|
db15f0ce01090f7454e6dcf7ba8d212ef22ebb6f
|
2be9eed323fde560de24bf4999cffbd3977ac2ee
|
/Appli/Dashboard_hackaton_AAV20/Script.R
|
2a7d7c2088c82ea2c057e5665f3fcc509aea37c9
|
[] |
no_license
|
Reinaldodos/Hackathon_AAV20
|
e2b02614679188697e36ebca12c23c2c1dd502cb
|
c5b3a07aa333f2198d7819925c104e2e1ccae3c2
|
refs/heads/master
| 2023-04-12T22:56:47.541966
| 2021-04-24T15:06:26
| 2021-04-24T15:06:26
| 360,656,988
| 1
| 1
| null | 2021-04-23T18:11:57
| 2021-04-22T19:17:06
|
R
|
UTF-8
|
R
| false
| false
| 997
|
r
|
Script.R
|
source("Fonctions.R")
# Bases_urbaines = sf::st_read(dsn = "data/fond_AAV2020_geo20_metro/zMetro.shp")
#
# Depts = sf::st_read(dsn = "data/departements.geojson")
input_GEODES = rio::import(file = "https://www.data.gouv.fr/fr/datasets/r/c2e2e844-9671-4f81-8c81-1b79f7687de3",
format = "csv")
output_GEODES = filtrer_GEODES(input_GEODES = input_GEODES)
# Population = Charger_INSEE(file = "data/recensement.csv")
# Population = rio::import(file = "data/Recensement.csv")
#
# output_urbaines = Incidence_aires_urbaines(input = output_GEODES,
# Population = Population,
# Bases_urbaines = Bases_urbaines)
#
# Carte_low =
# CartO(data = output_urbaines, selon = LOW) +
# labs(subtitle = paste("Hypothèse basse", lubridate::today()))
#
# Carte_high =
# CartO(data = output_urbaines, selon = HIGH) +
# labs(subtitle = paste("Hypothèse haute", lubridate::today()))
|
cbe4c7c41ca1ddb1df1f79e0f58e12f67580721b
|
d810389391683989eaa93d41a88da2c9518bc527
|
/ggplot2_demo/demo.R
|
10c835bbdea27bad72a39dd653f39f44887153a0
|
[
"MIT"
] |
permissive
|
jiachengx/ironmen2020
|
098bc54b1199c591cba771ae7d8641bcddf9ea84
|
3f1dbeba14cf5e77ef54cabdbc21e00e430ca56b
|
refs/heads/master
| 2023-01-12T15:56:33.347439
| 2023-01-07T08:38:18
| 2023-01-07T08:38:18
| 279,243,570
| 1
| 1
|
MIT
| 2023-01-07T08:38:18
| 2020-07-13T08:33:32
|
C#
|
UTF-8
|
R
| false
| false
| 757
|
r
|
demo.R
|
name <- "demo.csv"
n <- strsplit(name,'.',fixed = TRUE)[[1]]
fname <- n[1:1]
library(ggplot2)
dt <- read.table(name, header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
dt$SSD <- factor(dt$SSD)
str(dt)
filtertps <- function(y) { subset(y, TPS>100) }
dd=subset(filtertps(dt))
m <- ggplot(dd, aes(x=SSD, y=TPS)) + geom_jitter(size=2, aes(color=SSD)) + labs(x='', y='TPS (Transaction per second)') + guides(color=FALSE)
library(plyr)
dd.mean <- ddply(dd,'SSD',summarize, TPS = round(mean(TPS), 2), RT = round(mean(RT), 2))
m + geom_text(data=dd.mean, aes(x=SSD, label=TPS)) + theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1,color="black",size="10"))
ggsave(paste(n[1], "png", sep=".") ,width=8, height=5, dpi=400)
|
39525da9f3896bf393b519b4dcc5b893ba68ac00
|
324a3580d9df6967f24fa254b06770f36da138d3
|
/uncovering_ML_families/src/2_family_identification/utils/models.R
|
a6decb842f7a2b494bfc7e10ed07632aea6f824a
|
[] |
no_license
|
rfabra/whos-behind
|
67fed1f9b691d6c130c43f86342d33ee4ed22457
|
ab88c1db2df351a93270c56568ff49da9714cbf6
|
refs/heads/master
| 2020-03-18T14:13:25.366860
| 2019-04-22T10:29:58
| 2019-04-22T10:29:58
| 134,836,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 157
|
r
|
models.R
|
models <- c(
"rda",
"rf",
"c5.0",
"svmRadialCost_C2_5",
"mlpML",
"NB",
"knn_k5",
"glmnet",
"simpls_ncomp4",
"multinom",
"gcvEarth_d3"
)
|
00c44276ae07bbf18a00eef9e41d6a473bda6cab
|
591559d85e13f636099d98f5f611c64b0db8592f
|
/scripts/Ordination.R
|
b34741f89bb820fb29b5dcec5de7709d874b17bf
|
[] |
no_license
|
eabowman/Bowmanetal-EctomycorrhizalFireStudy-Arizona
|
8be10a0971332a9081ca2f885cebdb8e6a074f68
|
e505be455c331ca191037c37c47e0f26c6b7e27a
|
refs/heads/master
| 2023-01-03T11:14:52.395331
| 2020-10-28T22:42:12
| 2020-10-28T22:42:12
| 107,572,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,559
|
r
|
Ordination.R
|
## Script created by Liz Bowman June 3, 2017
## for analyzing community differences between ranges and burn status of sites
#========================================================================================#
# Load data ----
#========================================================================================#
#----------------------------------------------------------------------------------------#
# Load data and clean up: Tree level----
#----------------------------------------------------------------------------------------#
stsp.matrix <- read.csv(paste0(dat.dir,'97%_SitexSpecies_TipAb.csv'), as.is = T)
# << Create distinct tables for each burn history/range >> --
FA.matrix <- stsp.matrix[stsp.matrix$Burn_status == 'burned',]
FU.matrix <- stsp.matrix[stsp.matrix$Burn_status == 'unburned',]
scm.matrix <- stsp.matrix[stsp.matrix$Range == 'santa.catalina',]
pm.matrix <- stsp.matrix[stsp.matrix$Range == 'pinaleno',]
#----------------------------------------------------------------------------------------#
# Create result tables
#----------------------------------------------------------------------------------------#
#--Table of Anosim results
anosim.res <- c('jaccard.overall','morisita.overall',
'jaccard.p', 'morisita.p',
'jaccard.scm', 'morisita.scm')
anosim.res <- data.frame(anosim.res)
#--Table of PERMANOVA results
permanova.res <- data.frame(c('jaccard','morisita'))
colnames(permanova.res) <- "test"
#========================================================================================#
# Jaccard based dissimilarity index: Overall----
#========================================================================================#
#--remove outliers
jaccard.matrix <- stsp.matrix[!stsp.matrix$Tree %in% c('NF19','NF16'),]
#jaccard.matrix <- stsp.matrix
#--isolate otu data
comm.matrix <- jaccard.matrix[12:length(jaccard.matrix)]
#--remove singletons; comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1,]
jaccard.matrix <- jaccard.matrix[rownames(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.jaccard <- vegdist(comm.matrix, method = 'jaccard', binary = F, na.rm = T)
#--NMDS analysis
jaccard.otu <- metaMDS(comm.dist.jaccard, dist = 'bray', permutations = 999,
try = 100)
jaccard.otu$stress
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.jaccard, group = jaccard.matrix$Burn_status)
jaccard.betadisper <- anova(betadisper)
jaccard.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "F.betadisper"] <-
jaccard.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "df.betadisper.1"] <-
jaccard.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "df.betadisper.2"] <-
jaccard.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "p.betadisper"] <-
jaccard.betadisper$`Pr(>F)`[1]
#<< PERMANOVA >>-----------------------------------------------------------------------------
jaccard.adonis <- adonis(comm.dist.jaccard ~ Burn_status * Range, data = jaccard.matrix)
jaccard.adonis
#--Burn f.model, r2, p-value
anosim.res[which(anosim.res$anosim.res == "jaccard.overall"), "F.model.burn"] <-
morisita.adonis.anosim$aov.tab$F.Model[1]
anosim.res[which(anosim.res$anosim.res == "jaccard.overall"), "r2.burn"] <-
morisita.adonis.anosim$aov.tab$R2[1]
anosim.res[which(anosim.res$anosim.res == "jaccard.overall"), "p.burn"] <-
morisita.adonis.anosim$aov.tab$`Pr(>F)`[1]
#--Base R plot
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_overall_Jaccard.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#par(mar=c(7.1, 4.1, 4.1, 12.1), xpd=TRUE)
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(jaccard.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# color and shape for points
color.vec <- data.frame(color = rep(NA, nrow(jaccard.matrix)),
shape = rep(NA, nrow(jaccard.matrix)),
fire.group = jaccard.matrix$Burn_status,
range.group = jaccard.matrix$Range)
# populate color
color.vec[color.vec$fire.group == 'burned' & color.vec$range.group == 'pinaleno',
'color'] <- 'black'
color.vec[color.vec$fire.group == 'unburned' & color.vec$range.group == 'pinaleno',
'color'] <- 'darkgreen'
color.vec[color.vec$fire.group == 'burned' & color.vec$range.group == 'santa.catalina',
'color'] <- 'darkgrey'
color.vec[color.vec$fire.group == 'unburned' & color.vec$range.group == 'santa.catalina',
'color'] <- 'lightgreen'
# populate shape
color.vec[color.vec$range.group == 'pinaleno', 'shape'] <- 19
color.vec[color.vec$range.group == 'santa.catalina', 'shape'] <- 18
#ordipointlabel(jaccard.otu, display = "sites")
#--add points to plot
points(jaccard.otu$points, display = "sites", cex = 3,
pch = color.vec$shape,
col = color.vec$color,
bg = color.vec$color)
#--Ordihull variations by range, burn, and both burn and range
ordihull(jaccard.otu, groups = paste0(color.vec$fire.group, color.vec$range.group),
draw = 'polygon')
dev.off()
#--Legend
jpeg(filename = 'figures/Legend.jpeg',
width = 700, height = 600,
quality = 100)
plot(NULL ,xaxt='n',yaxt='n',bty='n',ylab='',xlab='', xlim=0:1, ylim=0:1)
legend('topleft',
legend=c('Santa Catalina Mts. burned',
'Santa Catalina Mts. unburned',
'Pinaleno Mts. burned',
'Pinaleno Mts. unburned'),
col = c('darkgrey','lightgreen','black','darkgreen'),
pch = c(18,18,19,19),
pt.cex = 3, cex = 1.5, bty = 'n')
dev.off()
#<< Pairwise >>-----------------------------------------------------------------------------
jac.overall <- read.csv('data/JaccardOverall_firehistory.csv', as.is = T)
# rownames(stsp.matrix) <- stsp.matrix$Tree
# #--remove outliers
# jaccard.matrix <- stsp.matrix[!stsp.matrix$Tree %in% c('NF19','NF16'),]
#
# #--isolate otu data
# comm.matrix <- jaccard.matrix[12:length(jaccard.matrix)]
#
# #--remove singletons; comment to include singletons
# comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
# comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1,]
# jaccard.matrix <- jaccard.matrix[rownames(comm.matrix),]
#
# #--distance matrix using jaccard index
# comm.dist.jaccard <- vegdist(comm.matrix, method = 'jaccard', binary = F, na.rm = T)
# comm.dist.jaccard <- as.data.frame(as.matrix(comm.dist.jaccard))
# comm.dist.jaccard <- read.csv('data_output/JaccardOverall_firehistory_dist.csv', row.names = 1)
# comm.dist.jaccard$tree1 <- rownames(comm.dist.jaccard)
#
# #--Make dataframe
# jac.overall <- gather(comm.dist.jaccard,'tree2', 'dissimilarity', -tree1)
# jac.overall <- jac.overall[!is.na(jac.overall$dissimilarity),]
# #--Add fire history data
# for(i in jac.overall$tree1){
# for(t in jac.overall$tree2){
# jac.overall[jac.overall$tree1 == i, 'fire.history.1'] <- jaccard.matrix[jaccard.matrix$Tree == i, 'Burn_status']
# jac.overall[jac.overall$tree2 == t, 'fire.history.2'] <- jaccard.matrix[jaccard.matrix$Tree == t, 'Burn_status']
# jac.overall[jac.overall$tree1 == i, 'range.1'] <- jaccard.matrix[jaccard.matrix$Tree == i, 'Range']
# jac.overall[jac.overall$tree2 == t, 'range.2'] <- jaccard.matrix[jaccard.matrix$Tree == t, 'Range']
# }
# }
# #--assess if within same or different fire history
# for(i in 1:nrow(jac.overall)){
# if(jac.overall[i, 'fire.history.1'] == jac.overall[i, 'fire.history.2']){
# jac.overall[i, 'comp'] <- 'Same'
# } else{jac.overall[i, 'comp'] <- 'Different'}
# }
# #--assess if within same or different range origin
# for(i in 1:nrow(jac.overall)){
# if(jac.overall[i, 'range.1'] == jac.overall[i, 'range.2']){
# jac.overall[i, 'comp.range'] <- 'Same'
# } else{jac.overall[i, 'comp.range'] <- 'Different'}
# }
#--Remove self-comparisons (dissimilarity = 0) and outliers
jac.overall <- jac.overall[jac.overall$dissimilarity > 0, ]
# same.mor.scm <- mor.scm[mor.scm$comp == 'Same',]
# same.mor.scm <- same.mor.scm[same.mor.scm$dissimilarity > 0.4,]
# diff.mor.scm <- mor.scm[mor.scm$comp == 'Different',]
# diff.mor.scm <- diff.mor.scm[diff.mor.scm$dissimilarity > 0.57,]
# mor.scm.out <- bind_rows(same.mor.scm, diff.mor.scm)
#--Logit transform data
jac.overall$logit.dis <- logit(jac.overall$dissimilarity)
#--wilcox test
wilcox.test(dissimilarity ~ comp.fire, data = jac.overall)
ggplot(jac.overall, mapping = aes(x = comp.fire,
y = logit.dis)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Logit Jaccard dissimilarity') +
theme_classic()
#========================================================================================#
# Morisita based dissimilarity index: Overall----
#========================================================================================#
#--remove outliers
morisita.matrix <- stsp.matrix[!stsp.matrix$Tree %in% c('NF16','NF19'),]
#morisita.matrix <- stsp.matrix
#--isolate otu data
comm.matrix <- morisita.matrix[12:length(morisita.matrix)]
#--remove singletons; comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 2]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1,]
morisita.matrix <- morisita.matrix[rownames(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.morisita <- vegdist(comm.matrix, method = 'horn', binary = F, na.rm = T)
#--NMDS analysis
morisita.otu <- metaMDS(comm.dist.morisita, dist = 'bray', permutations = 999,
try = 100)
morisita.otu$stress
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.morisita, group = morisita.matrix$Burn_status)
jaccard.betadisper <- anova(betadisper)
jaccard.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "F.betadisper"] <-
jaccard.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "df.betadisper.1"] <-
jaccard.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "df.betadisper.2"] <-
jaccard.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="jaccard.overall"), "p.betadisper"] <-
jaccard.betadisper$`Pr(>F)`[1]
#<< PERMANOVA >>-----------------------------------------------------------------------------
morisita.adonis <- adonis(comm.dist.morisita ~ Burn_status * Range, data = morisita.matrix)
morisita.adonis
#--Burn f.model, r2, p-value
anosim.res[which(anosim.res$anosim.res == "morisita.overall"), "F.model.burn"] <-
morisita.adonis.anosim$aov.tab$F.Model[1]
anosim.res[which(anosim.res$anosim.res == "morisita.overall"), "r2.burn"] <-
morisita.adonis.anosim$aov.tab$R2[1]
anosim.res[which(anosim.res$anosim.res == "morisita.overall"), "p.burn"] <-
morisita.adonis.anosim$aov.tab$`Pr(>F)`[1]
#--Base R plot
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_overall_MorisitaHorn.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#par(mar=c(7.1, 4.1, 4.1, 12.1), xpd=TRUE)
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(morisita.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# color and shape for points
color.vec <- data.frame(color = rep(NA, nrow(morisita.matrix)),
shape = rep(NA, nrow(morisita.matrix)),
fire.group = morisita.matrix$Burn_status,
range.group = morisita.matrix$Range)
# populate color
color.vec[color.vec$fire.group == 'burned' & color.vec$range.group == 'pinaleno',
'color'] <- 'black'
color.vec[color.vec$fire.group == 'unburned' & color.vec$range.group == 'pinaleno',
'color'] <- 'darkgreen'
color.vec[color.vec$fire.group == 'burned' & color.vec$range.group == 'santa.catalina',
'color'] <- 'darkgrey'
color.vec[color.vec$fire.group == 'unburned' & color.vec$range.group == 'santa.catalina',
'color'] <- 'lightgreen'
# populate shape
color.vec[color.vec$fire.group == 'burned', 'shape'] <- 19
color.vec[color.vec$fire.group == 'unburned', 'shape'] <- 18
#ordipointlabel(morisita.otu, display = "sites")
#--add points to plot
points(morisita.otu$points, display = "sites", cex = 3,
pch = color.vec$shape,
col = color.vec$color,
bg = color.vec$color)
# #--Legend
# legend("topright", inset=c(-1,0),
# legend=c('Santa Catalina Mts. burned',
# 'Santa Catalina Mts. unburned',
# 'Pinaleno Mts. burned',
# 'Pinaleno Mts. unburned'),
# col = c('darkgrey','lightgreen','black','darkgreen'),
# pch = c(19,18,19,18),
# cex = 1)
#--Ordihull variations by range, burn, and both burn and range
ordihull(morisita.otu, groups = paste0(color.vec$fire.group, color.vec$range.group),
draw = 'polygon')
dev.off()
#<< Pairwise >>-----------------------------------------------------------------------------
horn.overall <- read.csv('data/MorisitaHornOverall_firehistory.csv', as.is = T)
# rownames(stsp.matrix) <- stsp.matrix$Tree
# #--remove outliers
# morisita.matrix <- stsp.matrix[!stsp.matrix$Tree %in% c('NF19','NF16'),]
#
# #--isolate otu data
# comm.matrix <- morisita.matrix[12:length(morisita.matrix)]
#
# #--remove singletons; comment to include singletons
# comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
# comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1,]
# morisita.matrix <- morisita.matrix[rownames(comm.matrix),]
#
# #--distance matrix using jaccard index
# comm.dist.horn <- vegdist(comm.matrix, method = 'horn', binary = F, na.rm = T)
# comm.dist.horn <- as.data.frame(as.matrix(comm.dist.horn))
# comm.dist.horn <- read.csv('./data_output/MorisitaHornOverall_firehistory_dist.csv', row.names = 1)
# comm.dist.horn$tree1 <- rownames(comm.dist.horn)
#
# #--Make dataframe
# horn.overall <- gather(comm.dist.horn,'tree2', 'dissimilarity', -tree1)
# horn.overall <- horn.overall[!is.na(horn.overall$dissimilarity),]
#--Add fire history data
# for(i in horn.overall$tree1){
# for(t in horn.overall$tree2){
# horn.overall[horn.overall$tree1 == i, 'fire.history.1'] <- morisita.matrix[morisita.matrix$Tree == i, 'Burn_status']
# horn.overall[horn.overall$tree2 == t, 'fire.history.2'] <- morisita.matrix[morisita.matrix$Tree == t, 'Burn_status']
# horn.overall[horn.overall$tree1 == i, 'range.1'] <- morisita.matrix[morisita.matrix$Tree == i, 'Range']
# horn.overall[horn.overall$tree2 == t, 'range.2'] <- morisita.matrix[morisita.matrix$Tree == t, 'Range']
# }
# }
# #--assess if within same or different fire history
# for(i in 1:nrow(horn.overall)){
# if(horn.overall[i, 'fire.history.1'] == horn.overall[i, 'fire.history.2']){
# horn.overall[i, 'comp'] <- 'Same'
# } else{horn.overall[i, 'comp'] <- 'Different'}
# }
# #--assess if within same or different range origin
# for(i in 1:nrow(horn.overall)){
# if(horn.overall[i, 'range.1'] == horn.overall[i, 'range.2']){
# horn.overall[i, 'comp.range'] <- 'Same'
# } else{horn.overall[i, 'comp.range'] <- 'Different'}
# }
#--logit transform
horn.overall$logit.dis <- logit(horn.overall$dissimilarity)
#--Remove self-comparisons (dissimilarity = 0) and outliers
horn.overall <- horn.overall[horn.overall$dissimilarity > 0, ]
horn.overall <- horn.overall[horn.overall$dissimilarity < 0.9997,]
#--wilcox test
t.test(logit.dis ~ comp.fire, data = horn.overall)
ggplot(horn.overall, mapping = aes(x = comp.fire,
y = logit.dis)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Jaccard dissimilarity') +
theme_classic()
#========================================================================================#
# Jaccard based dissimilarity index: Pinaleno----
#========================================================================================#
#--Remove outlier, NF16
pm.matrix <- pm.matrix[!pm.matrix$Tree %in% c('NF19','NF16'),]
#--isolate otu data
comm.matrix <- pm.matrix[12:length(pm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 2]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
jaccard.matrix <- pm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.jaccard <- vegdist(comm.matrix, method = "jaccard", binary = F, na.rm = T)
#--NMDS analysis
jaccard.otu <- metaMDS(comm.dist.jaccard, dist = "bray", permutations = 999,
try = 100, trymax = 1000)
#--Stress
jaccard.otu$stress
#--add stress of NMDS to results table
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "stress.nmds"] <-
jaccard.otu$stress
#--Base R plot
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_pinaleno_Jaccard.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(jaccard.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# colors for points
color.vec <- data.frame(color = rep(NA, nrow(jaccard.matrix)),
p.group = jaccard.matrix$Burn_status)
color.vec[color.vec$p.group == 'burned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'unburned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'burned', 'shape'] <- 15
color.vec[color.vec$p.group == 'unburned', 'shape'] <- 0
#ordipointlabel(jaccard.otu, display = "sites")
#--isolate points for pinaleno mts.
burned <- row.names(jaccard.matrix[jaccard.matrix$Burn_status == 'burned',])
unburned <- row.names(jaccard.matrix[jaccard.matrix$Burn_status == 'unburned',])
#--isolate points using rows isolated above
points(jaccard.otu$points[burned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'burned',
'shape'],
col = color.vec[color.vec$p.group == 'burned',
'color'],
bg = color.vec[color.vec$p.group == 'burned',
'color'])
points(jaccard.otu$points[unburned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'unburned',
'shape'],
col = color.vec[color.vec$p.group == 'unburned',
'color'],
bg = color.vec[color.vec$p.group == 'unburned',
'color'])
legend("bottomleft", legend = c('Burned',
'Unburned'), bty = "n",
col = c('black', 'black'),
pch = c(15,0),
pt.bg = c('black','black'), cex = 2)
#--Ordihull variations by range, burn, and both burn and range
#ordihull(jaccard.otu, groups = color.vec$p.group)
ordiellipse(jaccard.otu, groups = jaccard.matrix$Burn_status,
col = c('black', 'black'),
kind = 'ehull')
#ordihull(jaccard.otu, groups = stsp.matrix$Range) # just mt. range
#--Overlays
# fit <- envfit(jaccard.otu ~ prec, jaccard.matrix)
# fit
# plot(fit, type = 'n')
dev.off()
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.jaccard, group = jaccard.matrix$Burn_status)
jaccard.betadisper <- anova(betadisper)
jaccard.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "F.betadisper"] <-
jaccard.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "df.betadisper.1"] <-
jaccard.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "df.betadisper.2"] <-
jaccard.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "p.betadisper"] <-
jaccard.betadisper$`Pr(>F)`[1]
#<< ANOSIM >>-----------------------------------------------------------------------------
jaccard.anosim <- anosim(comm.matrix, grouping = jaccard.matrix$Burn_status,
distance = "jaccard")
jaccard.anosim
#--Add results to data frame
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "r"] <- jaccard.anosim$statistic
anosim.res[which(anosim.res$anosim.res =="jaccard.p"), "p"] <- jaccard.anosim$signif
#<< Pairwise >>-----------------------------------------------------------------------------
#--Remove outlier, NF16
pm.matrix <- pm.matrix[!pm.matrix$Tree %in% c('NF19','NF16'),]
rownames(pm.matrix) <- pm.matrix$Tree
#--isolate otu data
comm.matrix <- pm.matrix[12:length(pm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
jaccard.matrix <- pm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.jaccard <- vegdist(comm.matrix, method = "jaccard", binary = F, upper = T)
comm.dist.jaccard <- as.data.frame(as.matrix(comm.dist.jaccard))
comm.dist.jaccard$tree1 <- rownames(comm.dist.jaccard)
#--Make dataframe
jac.pin <- gather(comm.dist.jaccard,'tree2', 'dissimilarity', -tree1)
test <- distinct(jac.pin)
#--Add fire history data
for(i in jac.pin$tree1){
for(t in jac.pin$tree2){
jac.pin[jac.pin$tree1 == i, 'fire.history.1'] <- jaccard.matrix[jaccard.matrix$Tree == i, 'Burn_status']
jac.pin[jac.pin$tree2 == t, 'fire.history.2'] <- jaccard.matrix[jaccard.matrix$Tree == t, 'Burn_status']
}
}
#--assess if within same or different fire history
for(i in 1:nrow(jac.pin)){
if(jac.pin[i, 'fire.history.1'] == jac.pin[i, 'fire.history.2']){
jac.pin[i, 'comp'] <- 'Same'
} else{jac.pin[i, 'comp'] <- 'Different'}
}
#--Remove self-comparisons (dissimilarity = 0) and outliers
jac.pin <- jac.pin[jac.pin$dissimilarity > 0, ]
jac.pin.out <- jac.pin[!jac.pin$dissimilarity < 0.92,]
#jac.pin.out <- jac.pin[jac.pin$comp == 'Different' & jac.pin$dissimilarity < 0.96,]
#--wilcox test
wilcox.test(dissimilarity ~ comp, data = jac.pin.out)
ggplot(jac.pin.out, mapping = aes(x = comp,
y = dissimilarity)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Jaccard dissimilarity') +
theme_classic()
#========================================================================================#
# Morisita based dissimilarity index: Pinaleno----
#========================================================================================#
#--isolate site X species matrix only without metadata
comm.matrix <- pm.matrix[12:length(pm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 2]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
morisita.matrix <- pm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.morisita <- vegdist(comm.matrix, method = "horn", binary = F)
#--NMDS analysis
morisita.otu <- metaMDS(comm.dist.morisita, dist = "bray", permutations = 999,
try = 100, trymax = 1000)
#--add stress of NMDS to results table
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "stress.nmds"] <-
morisita.otu$stress
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_pinaleno_MorisitaHorn.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(morisita.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# colors for points
color.vec <- data.frame(color = rep(NA, nrow(morisita.matrix)),
p.group = morisita.matrix$Burn_status)
color.vec[color.vec$p.group == 'burned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'unburned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'burned', 'shape'] <- 15
color.vec[color.vec$p.group == 'unburned', 'shape'] <- 0
#ordipointlabel(jaccard.otu, display = "sites")
#--isolate points for pinaleno mts.
burned <- row.names(morisita.matrix[morisita.matrix$Burn_status == 'burned',])
unburned <- row.names(morisita.matrix[morisita.matrix$Burn_status == 'unburned',])
#--isolate points using rows isolated above
points(morisita.otu$points[burned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'burned',
'shape'],
col = color.vec[color.vec$p.group == 'burned',
'color'],
bg = color.vec[color.vec$p.group == 'burned',
'color'])
points(morisita.otu$points[unburned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'unburned',
'shape'],
col = color.vec[color.vec$p.group == 'unburned',
'color'],
bg = color.vec[color.vec$p.group == 'unburned',
'color'])
legend("bottomleft", legend = c('Burned',
'Unburned'), bty = "n",
col = c('black', 'black'),
pch = c(15,0),
pt.bg = c('black','black'), cex = 2)
#--Ordihull variations by range, burn, and both burn and range
ordiellipse(morisita.otu, groups = morisita.matrix$Burn_status,
col = c('black', 'black'),
kind = 'ehull')
dev.off()
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.morisita, group = morisita.matrix$Burn_status)
morisita.betadisper <- anova(betadisper)
morisita.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "F.betadisper"] <-
morisita.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "df.betadisper.1"] <-
morisita.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "df.betadisper.2"] <-
morisita.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "p.betadisper"] <-
morisita.betadisper$`Pr(>F)`[1]
#<< ANOSIM >>-----------------------------------------------------------------------------
morisita.anosim <- anosim(comm.matrix, grouping = morisita.matrix$Burn_status,
distance = "horn")
morisita.anosim
#--Add results to data frame
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "r"] <- morisita.anosim$statistic
anosim.res[which(anosim.res$anosim.res =="morisita.p"), "p"] <- morisita.anosim$signif
#<< Pairwise >>-----------------------------------------------------------------------------
#--Remove outlier, NF16
pm.matrix <- pm.matrix[!pm.matrix$Tree %in% c('NF19','NF16'),]
rownames(pm.matrix) <- pm.matrix$Tree
#--isolate otu data
comm.matrix <- pm.matrix[12:length(pm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
morisita.matrix <- pm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.morisita <- vegdist(comm.matrix, method = "horn", binary = F, upper = T)
comm.dist.morisita <- as.data.frame(as.matrix(comm.dist.morisita))
comm.dist.morisita$tree1 <- rownames(comm.dist.morisita)
#--Make dataframe
mor.pin <- gather(comm.dist.morisita,'tree2', 'dissimilarity', -tree1)
#--Add fire history data
for(i in mor.pin$tree1){
for(t in mor.pin$tree2){
mor.pin[mor.pin$tree1 == i, 'fire.history.1'] <- morisita.matrix[morisita.matrix$Tree == i, 'Burn_status']
mor.pin[mor.pin$tree2 == t, 'fire.history.2'] <- morisita.matrix[morisita.matrix$Tree == t, 'Burn_status']
}
}
#--assess if within same or different fire history
for(i in 1:nrow(mor.pin)){
if(mor.pin[i, 'fire.history.1'] == mor.pin[i, 'fire.history.2']){
mor.pin[i, 'comp'] <- 'Same'
} else{mor.pin[i, 'comp'] <- 'Different'}
}
#--Remove self-comparisons (dissimilarity = 0) and outliers
mor.pin <- mor.pin[mor.pin$dissimilarity > 0, ]
mor.pin.out <- mor.pin[!mor.pin$dissimilarity < 0.85,]
#jac.pin.out <- jac.pin[jac.pin$comp == 'Different' & jac.pin$dissimilarity < 0.96,]
#--wilcox test
wilcox.test(dissimilarity ~ comp, data = mor.pin.out)
ggplot(mor.pin.out, mapping = aes(x = comp,
y = dissimilarity)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Morisita-Horn dissimilarity') +
theme_classic()
#========================================================================================#
# Jaccard based dissimilarity index: Santa Catalina----
#========================================================================================#
#--Remove outlier
scm.matrix <- scm.matrix[!scm.matrix$Tree == 'LB056',]
#--isolate otu data
comm.matrix <- scm.matrix[12:length(scm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 2]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
jaccard.matrix <- scm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.jaccard <- vegdist(comm.matrix, method = "jaccard", binary = TRUE)
#--NMDS analysis
jaccard.otu <- metaMDS(comm.dist.jaccard, dist = "bray", permutations = 999,
try = 100, trymax = 1000)
#--Stress
jaccard.otu$stress
#--add stress of NMDS to results table
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "stress.nmds"] <-
jaccard.otu$stress
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_SC_Jaccard.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(jaccard.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# colors for points
color.vec <- data.frame(color = rep(NA, nrow(jaccard.matrix)),
p.group = jaccard.matrix$Burn_status)
color.vec[color.vec$p.group == 'burned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'unburned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'burned', 'shape'] <- 16
color.vec[color.vec$p.group == 'unburned', 'shape'] <- 1
#ordipointlabel(jaccard.otu, display = "sites")
#--isolate points for pinaleno mts.
burned <- row.names(jaccard.matrix[jaccard.matrix$Burn_status == 'burned',])
unburned <- row.names(jaccard.matrix[!jaccard.matrix$Burn_status == 'burned',])
#--isolate points using rows isolated above
points(jaccard.otu$points[burned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'burned',
'shape'],
col = color.vec[color.vec$p.group == 'burned',
'color'],
bg = color.vec[color.vec$p.group == 'burned',
'color'])
points(jaccard.otu$points[unburned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'unburned',
'shape'],
col = color.vec[color.vec$p.group == 'unburned',
'color'],
bg = color.vec[color.vec$p.group == 'unburned',
'color'])
legend("bottomleft", legend = c('Burned','Unburned'), bty = "n",
col = c('black','black'),
pch = c(16,1),
pt.bg = c('black','black'), cex = 2)
#--Ordihull variations by range, burn, and both burn and range
#ordihull(jaccard.otu, groups = color.vec$p.group)
ordiellipse(jaccard.otu, groups = jaccard.matrix$Burn_status,
col = c('black','black'),
kind = 'ehull')
#ordihull(jaccard.otu, groups = stsp.matrix$Range) # just mt. range
#--Overlays
# fit <- envfit(jaccard.otu ~ po4.p.ppm + temp.warmest.quarter +
# pH.su, jaccard.matrix)
# fit
# plot(fit, type = 'n')
dev.off()
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.jaccard, group = jaccard.matrix$Burn_status)
jaccard.betadisper <- anova(betadisper)
jaccard.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "F.betadisper"] <-
jaccard.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "df.betadisper.1"] <-
jaccard.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "df.betadisper.2"] <-
jaccard.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "p.betadisper"] <-
jaccard.betadisper$`Pr(>F)`[1]
#<< ANOSIM >>-----------------------------------------------------------------------------
jaccard.anosim <- anosim(comm.matrix, grouping = jaccard.matrix$Burn_status,
distance = "jaccard")
jaccard.anosim
#--Add results to data frame
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "r"] <- jaccard.anosim$statistic
anosim.res[which(anosim.res$anosim.res =="jaccard.scm"), "p"] <- jaccard.anosim$signif
#<< Pairwise >>-----------------------------------------------------------------------------
rownames(scm.matrix) <- scm.matrix$Tree
#--isolate otu data
comm.matrix <- scm.matrix[12:length(scm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
jaccard.matrix <- scm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.jaccard <- vegdist(comm.matrix, method = "jaccard", binary = F, upper = T)
comm.dist.jaccard <- as.data.frame(as.matrix(comm.dist.jaccard))
comm.dist.jaccard$tree1 <- rownames(comm.dist.jaccard)
#--Make dataframe
jac.scm <- gather(comm.dist.jaccard,'tree2', 'dissimilarity', -tree1)
#--Add fire history data
for(i in jac.scm$tree1){
for(t in jac.scm$tree2){
jac.scm[jac.scm$tree1 == i, 'fire.history.1'] <- jaccard.matrix[jaccard.matrix$Tree == i, 'Burn_status']
jac.scm[jac.scm$tree2 == t, 'fire.history.2'] <- jaccard.matrix[jaccard.matrix$Tree == t, 'Burn_status']
}
}
#--assess if within same or different fire history
for(i in 1:nrow(jac.scm)){
if(jac.scm[i, 'fire.history.1'] == jac.scm[i, 'fire.history.2']){
jac.scm[i, 'comp'] <- 'Same'
} else{jac.scm[i, 'comp'] <- 'Different'}
}
#--Remove self-comparisons (dissimilarity = 0) and outliers
jac.scm <- jac.scm[jac.scm$dissimilarity > 0, ]
jac.scm.out <- jac.scm[!jac.scm$dissimilarity < 0.83,]
#jac.pin.out <- jac.pin[jac.pin$comp == 'Different' & jac.pin$dissimilarity < 0.96,]
#--wilcox test
wilcox.test(dissimilarity ~ comp, data = jac.scm.out)
ggplot(jac.scm.out, mapping = aes(x = comp,
y = dissimilarity)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Jaccard dissimilarity') +
theme_classic()
#========================================================================================#
# Morisita based dissimilarity index: Santa Catalina----
#========================================================================================#
#--isolate site X species matrix only without metadata
comm.matrix <- scm.matrix[12:length(scm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 2]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
morisita.matrix <- scm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.morisita <- vegdist(comm.matrix, method = "horn", binary = F)
#--NMDS analysis
morisita.otu <- metaMDS(comm.dist.morisita, dist = "bray", permutations = 999,
try = 100, trymax = 1000)
#--add stress of NMDS to results table
anosim.res[which(anosim.res$anosim.res =="morisita.scm"), "stress.nmds"] <-
morisita.otu$stress
#--format and output NMDS plots to figure folder
jpeg(filename = 'figures/NMDS_SC_MorisitaHorn.jpeg',
width = 700, height = 600,
quality = 100)
par(mfrow = c(1,1), "mar"=c(6, 5, 5, 3))
#--Plot NMDS of EM community based on Jaccard index and OTU abundance
plot(morisita.otu, display = "sites", type = "n", cex.lab = 2,
cex.axis = 1.5, xlab = 'Axis 1', ylab = 'Axis 2')
# colors for points
color.vec <- data.frame (color = rep (NA, nrow(morisita.matrix)),
p.group = morisita.matrix$Burn_status)
color.vec[color.vec$p.group == 'burned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'unburned', 'color'] <- 'black'
color.vec[color.vec$p.group == 'burned', 'shape'] <- 16
color.vec[color.vec$p.group == 'unburned', 'shape'] <- 1
#ordipointlabel(morisita.otu, display = "sites")
#--isolate points for pinaleno mts.
burned <- row.names(morisita.matrix[morisita.matrix$Burn_status == 'burned',])
unburned <- row.names(morisita.matrix[!morisita.matrix$Burn_status == 'burned',])
#--isolate points using rows isolated above
points(morisita.otu$points[burned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'burned',
'shape'],
col = color.vec[color.vec$p.group == 'burned',
'color'],
bg = color.vec[color.vec$p.group == 'burned',
'color'])
points(morisita.otu$points[unburned,1:2], display = "sites", cex = 3,
pch = color.vec[color.vec$p.group == 'unburned',
'shape'],
col = color.vec[color.vec$p.group == 'unburned',
'color'],
bg = color.vec[color.vec$p.group == 'unburned',
'color'])
legend("bottomright", legend = c('Burned','Unburned'), bty = "n",
col = c('black','black'),
pch = c(16,1),
pt.bg = c('black','black'), cex = 2)
#--Ordihull variations by range, burn, and both burn and range
#ordihull(morisita.otu, groups = color.vec$p.group,
# col = c('black','green4','black','green4')),
ordiellipse(morisita.otu, groups = morisita.matrix$Burn_status,
col = c('black','black'),
kind = 'ehull') # just burn status
#ordihull(morisita.otu, groups = stsp.matrix$Range,
# col = c('black','green4','black','green4')) # just mt. range
dev.off()
#--BetaDisper
#--a multivariate analogue of Levene's test for homogeneity of variance
betadisper <- betadisper(comm.dist.morisita, group = morisita.matrix$Burn_status)
morisita.betadisper <- anova(betadisper)
morisita.betadisper
#--add Betadisper results to table
anosim.res[which(anosim.res$anosim.res =="morisita.scm"), "F.betadisper"] <-
morisita.betadisper$`F value`[1]
anosim.res[which(anosim.res$anosim.res =="morisita.scm"), "df.betadisper.1"] <-
morisita.betadisper$Df[1]
anosim.res[which(anosim.res$anosim.res =="morisita.scm"), "df.betadisper.2"] <-
morisita.betadisper$Df[2]
anosim.res[which(anosim.res$anosim.res =="morisita.scm"), "p.betadisper"] <-
morisita.betadisper$`Pr(>F)`[1]
#<< PERMANOVA >>------------------------------------------------------------------------
morisita.adonis.anosim <- adonis(comm.dist.morisita ~ Burn_status,
data = morisita.matrix, permutations = 1000)
morisita.adonis.anosim
#--Burn f.model, r2, p-value
anosim.res[which(anosim.res$anosim.res == "morisita.scm"), "F.model.burn"] <-
morisita.adonis.anosim$aov.tab$F.Model[1]
anosim.res[which(anosim.res$anosim.res == "morisita.scm"), "r2.burn"] <-
morisita.adonis.anosim$aov.tab$R2[1]
anosim.res[which(anosim.res$anosim.res == "morisita.scm"), "p.burn"] <-
morisita.adonis.anosim$aov.tab$`Pr(>F)`[1]
#<< Pairwise >>-----------------------------------------------------------------------------
rownames(scm.matrix) <- scm.matrix$Tree
#--isolate otu data
comm.matrix <- scm.matrix[12:length(scm.matrix)]
#--comment to include singletons
comm.matrix <- comm.matrix[colSums(comm.matrix) >= 4]
comm.matrix <- comm.matrix[rowSums(comm.matrix) > 1, ] # remove rows with sums of 0
morisita.matrix <- scm.matrix[row.names(comm.matrix),]
#--distance matrix using jaccard index
comm.dist.morisita <- vegdist(comm.matrix, method = "horn", binary = F, upper = T)
comm.dist.morisita <- as.data.frame(as.matrix(comm.dist.morisita))
comm.dist.morisita$tree1 <- rownames(comm.dist.morisita)
#--Make dataframe
mor.scm <- gather(comm.dist.morisita,'tree2', 'dissimilarity', -tree1)
#--Add fire history data
for(i in mor.scm$tree1){
for(t in mor.scm$tree2){
mor.scm[mor.scm$tree1 == i, 'fire.history.1'] <- morisita.matrix[jaccard.matrix$Tree == i, 'Burn_status']
mor.scm[mor.scm$tree2 == t, 'fire.history.2'] <- morisita.matrix[jaccard.matrix$Tree == t, 'Burn_status']
}
}
#--assess if within same or different fire history
for(i in 1:nrow(mor.scm)){
if(mor.scm[i, 'fire.history.1'] == mor.scm[i, 'fire.history.2']){
mor.scm[i, 'comp'] <- 'Same'
} else{mor.scm[i, 'comp'] <- 'Different'}
}
#--Remove self-comparisons (dissimilarity = 0) and outliers
mor.scm <- mor.scm[mor.scm$dissimilarity > 0, ]
same.mor.scm <- mor.scm[mor.scm$comp == 'Same',]
same.mor.scm <- same.mor.scm[same.mor.scm$dissimilarity > 0.4,]
diff.mor.scm <- mor.scm[mor.scm$comp == 'Different',]
diff.mor.scm <- diff.mor.scm[diff.mor.scm$dissimilarity > 0.57,]
mor.scm.out <- bind_rows(same.mor.scm, diff.mor.scm)
#--wilcox test
wilcox.test(dissimilarity ~ comp, data = mor.scm.out)
ggplot(mor.scm.out, mapping = aes(x = comp,
y = dissimilarity)) +
geom_boxplot() +
xlab('Fire history') +
ylab('Morisita-Horn dissimilarity') +
theme_classic()
|
6257a55b6efbeee141c809d2ab37da892702c068
|
23340bcd0584bee0d1ebcca4b8aab4d7f3d1bed8
|
/R/Bus_workers.R
|
0d6bd59739b563f03ff867bde8068a28fb6c9ac8
|
[] |
no_license
|
dtkaplan/mdsint
|
51565e28f37fb812771ec340a17fdcdbb9852c94
|
570d9edc58fc8e4bbfb11887b3bcae472ae8ba5c
|
refs/heads/master
| 2021-08-22T16:57:31.214289
| 2017-11-30T18:12:07
| 2017-11-30T18:12:07
| 109,734,206
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
Bus_workers.R
|
#' Coronary heart disease in London transport workers
#'
#' In the 1950s, epidemiologist Jerry Morris explored the hypothesis that
#' lack of exercise is associated with higher levels of coronary heart disease. One of his
#' first systematic observations of this was the different levels of disease in bus drivers
#' (who sit while they work) and bus conductors (who, in the double-decker London busses, go
#' up and downstairs frequently while collecting tickets). These data are a reverse-engineered version
#' of the summary provided in the main table of Morris's paper.
#'
#' @docType data
#' @name Bus_workers
#' @keywords datasets
#' @usage data(Bus_workers)
#'
#' @source The summary data are in J.N. Morris and P.A.B. Raffle (1954) "Coronary heart disease in transport
#' workers: A progress report" *Brit. J. industr. Med.*, **11**:260-264
#'
#' @format 24937 transport workers in London busses.
#' * `age` -- age of the worker in 1949-1950
#' * `job` -- whether the worker was a bus driver or conductor
#' * `event` -- whether the worker presented with medical symptoms of coronary heart disease
#' * `day3` -- whether the worker survived the first 3 days after the onset of symptoms
#' * `month3` -- whether the worker survived the first 3 months after the onset of symptoms
#' * `year3` -- whether the worker survived the first 3 years after the onset of symptoms.
#' Note that a worker who died in the first three days by necessity failed to survive the first
#' three months or years. Workers who did not have an attack are listed as having
#' survived the three years of follow-up.
#'
NA
|
6053a4081c42f6dbe611343531419e273ada1cf7
|
cab2b8d274b4e6b92a5af098210ba30daa8eb754
|
/examples/date.R
|
d0efdfffb2f2c8003054634622ff0bebe7d6dddd
|
[] |
no_license
|
olafmersmann/sendmailR
|
8f34a34a63572739212e4147d0bf636fa1783a93
|
ea90ce2e433f34d43bb2025b28a195e7b6951caa
|
refs/heads/main
| 2023-08-31T10:55:11.444257
| 2023-08-10T12:48:01
| 2023-08-10T12:48:01
| 42,791,434
| 9
| 12
| null | 2023-08-10T12:48:02
| 2015-09-19T22:12:09
|
R
|
UTF-8
|
R
| false
| false
| 569
|
r
|
date.R
|
library("sendmailR")
rfc2822_date <- function(time=Sys.time()) {
lc <- Sys.getlocale("LC_TIME")
on.exit(Sys.setlocale("LC_TIME", lc))
Sys.setlocale("LC_TIME", "C")
strftime(time, format="%a, %d %b %Y %H:%M:%S -0000",
tz="UTC", use.tz=TRUE)
}
from <- "sendmailR@p-value.net"
to <- "olafm@statistik.tu-dortmund.de"
subject <- "Hello from R"
body <- list("It works!", mime_part(iris))
headers <- list(Date=rfc2822_date())
sendmail(from, to, subject, body,
control=list(smtpServer="mail.statistik.tu-dortmund.de"),
headers=headers)
|
87e38d35fee2e0a03662a26a383a4559b82473af
|
c3e2451daec7c223e6bca5e8ec5d29ea3efa5c6a
|
/man/fast_rmvnorm_transpose.Rd
|
4f0c74154bc9038ebada75c0be26be95f5198a13
|
[] |
no_license
|
pierrejacob/bayeshscore
|
d0336efc16dd0456ffa2c3f6fbe51aabbcf3f3f8
|
8f148f4074e09de4911d5645a9781c8aa844d38d
|
refs/heads/master
| 2021-09-22T10:56:27.652463
| 2018-09-08T18:21:28
| 2018-09-08T18:21:28
| 63,806,619
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 355
|
rd
|
fast_rmvnorm_transpose.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util_distributions.R
\name{fast_rmvnorm_transpose}
\alias{fast_rmvnorm_transpose}
\title{fast_rmvnorm_transpose}
\usage{
fast_rmvnorm_transpose(nparticles, mean, covariance)
}
\description{
Fast samples from Normal using Rcpp (dimensions row-wise, observations column-wise)
}
|
33b4ef3322be2e1cf819bd64defaee8cb0678d04
|
9463190790003d3cf888a8f5cff0f5a4c0383530
|
/R/matchPattern.BOC.R
|
212c902192c698968f9aa9c337c686d3fa178318
|
[] |
no_license
|
nw328/Biostrings
|
06a9f2d73466c28bdbf7ba90cbaa6002cabbcd75
|
ff7cfd71cd2b388b68234b17577a2ae919ee62f0
|
refs/heads/master
| 2020-12-25T21:13:20.681088
| 2015-08-26T08:55:38
| 2015-08-26T08:55:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,905
|
r
|
matchPattern.BOC.R
|
### =========================================================================
### Preprocessed Subject Strings of type BOC
### ------------------------------------------------------------
### Note that 'base1_code', 'base2_code' and 'base3_code' must be distinct
setClass("BOC_SubjectString",
representation(
subject="DNAString", # TODO: support "RNAString" too
pattern_length="integer", # A single integer e.g. 36
base1_code="integer", # integer belonging to DNA_BASE_CODES
base2_code="integer",
base3_code="integer",
base4_code="integer",
base1_OCbuffer="SharedRaw", # all buffers must be of length nchar(subject) - pattern_length + 1
base2_OCbuffer="SharedRaw",
base3_OCbuffer="SharedRaw",
pre4buffer="SharedRaw",
## The "stats" slot is a named list with the following elements:
## means: vector of 4 doubles
## table1, table2, table3, table4: vectors of (pattern_length + 1) integers
stats="list"
)
)
### Typical use:
### library(BSgenome.Hsapiens.UCSC.hg18)
### chr1 <- Hsapiens$chr1
### chr1boc <- new("BOC_SubjectString", chr1, 36, c("A", "C", "G")) # 3-4 seconds on lamb1
setMethod("initialize", "BOC_SubjectString",
function(.Object, subject, pattern_length, base_letters)
{
.Object@subject <- subject
if (!isSingleNumber(pattern_length))
stop("'pattern_length' must be a single integer")
pattern_length <- as.integer(pattern_length)
if (pattern_length < 4L || 254L < pattern_length)
stop("'pattern_length' must be >= 4 and <= 254")
if (pattern_length > nchar(subject))
stop("'pattern_length' must be <= 'nchar(subject)'")
.Object@pattern_length <- pattern_length
if (!is.character(base_letters) || length(base_letters) != 3
|| !all(base_letters %in% names(DNA_BASE_CODES)) || any(duplicated(base_letters)))
stop("'base_letters' must contain 3 distinct DNA base-letters")
buf_length <- nchar(subject) - pattern_length + 1
code1 <- DNA_BASE_CODES[base_letters[1]]
code2 <- DNA_BASE_CODES[base_letters[2]]
code3 <- DNA_BASE_CODES[base_letters[3]]
code4 <- DNA_BASE_CODES[setdiff(names(DNA_BASE_CODES), base_letters)]
buf1 <- SharedRaw(buf_length)
buf2 <- SharedRaw(buf_length)
buf3 <- SharedRaw(buf_length)
pre4buf <- SharedRaw(buf_length)
stats <- .Call2("match_BOC_preprocess",
subject@shared@xp, subject@offset, subject@length,
pattern_length,
code1, code2, code3, code4,
buf1@shared@xp, buf2@shared@xp, buf3@shared@xp, pre4buf@shared@xp,
PACKAGE="Biostrings")
.Object@base1_code <- code1
.Object@base2_code <- code2
.Object@base3_code <- code3
.Object@base4_code <- code4
.Object@base1_OCbuffer <- buf1
.Object@base2_OCbuffer <- buf2
.Object@base3_OCbuffer <- buf3
.Object@pre4buffer <- pre4buf
.Object@stats <- stats
.Object
}
)
### Typical use:
### Biostrings:::plotBOC(chr1boc, "Human chr1")
plotBOC <- function(x, main)
{
XLAB <- "Base Occurrence Count"
TITLE <- paste(XLAB, " for the ", x@pattern_length, "-mers in ", main, sep="")
YLAB <- paste("number of ", x@pattern_length, "-mers", sep="")
YMAX <- max(c(x@stats$table1, x@stats$table2, x@stats$table3, x@stats$table4))
plot.new()
plot.window(c(0, x@pattern_length), c(0, YMAX))
title(TITLE, xlab=XLAB, ylab=YLAB, col.main="black")
axis(1)
axis(2)
axis(4)
par(fg="red")
lines(0:x@pattern_length, x@stats$table1, type="l")
par(fg="blue")
lines(0:x@pattern_length, x@stats$table2, type="l")
par(fg="green")
lines(0:x@pattern_length, x@stats$table3, type="p")
par(fg="black")
lines(0:x@pattern_length, x@stats$table4, type="p")
LEGEND <- c(names(x@base1_code), names(x@base2_code), names(x@base3_code), names(x@base4_code))
LEGEND <- paste(LEGEND, "-count", sep="")
COLORS <- c("red", "blue", "green", "black")
legend(x=x@pattern_length, y=YMAX, legend=LEGEND, col=COLORS, lty="solid", lwd=3, xjust=1.0, yjust=1.0)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### The "matchPattern" method for BOC_SubjectString objects.
###
### Typical use:
### library(BSgenome.Hsapiens.UCSC.hg18)
### chr1 <- Hsapiens$chr1
### chr1boc <- new("BOC_SubjectString", chr1, 36, c("A", "C", "G"))
### matchPattern(chr1[1:36], chr1boc)
###
### Performance (kind of disappointing so far):
### for (i in 41:60) matchPattern(chr1[1:36+1000000*i], chr1boc)
### #--> takes about 11 seconds on lamb1
### for (i in 41:60) matchPattern(chr1[1:36+1000000*i], chr1, algo="boyer-moore")
### #--> takes about 7.6 seconds on lamb1
### for (i in 41:60) matchPattern(chr1[1:36+1000000*i], chr1, algo="naive-exact")
### #--> takes about 111 seconds on lamb1
###
.match.BOC.exact <- function(pattern, boc_subject, count.only)
{
.Call2("match_BOC_exact",
pattern@shared@xp, pattern@offset, pattern@length,
boc_subject@subject@shared@xp, boc_subject@subject@offset, boc_subject@subject@length,
boc_subject@base1_code,
boc_subject@base2_code,
boc_subject@base3_code,
boc_subject@base4_code,
boc_subject@base1_OCbuffer@shared@xp,
boc_subject@base2_OCbuffer@shared@xp,
boc_subject@base3_OCbuffer@shared@xp,
boc_subject@pre4buffer@shared@xp,
boc_subject@stats, count.only,
PACKAGE="Biostrings")
}
.match.BOC.inexact <- function(pattern, boc_subject, max.mismatch, count.only)
{
stop("NOT READY YET!")
}
### Dispatch on 'subject' (see signature of generic).
### 'algorithm' is ignored.
setMethod("matchPattern", "BOC_SubjectString",
function(pattern, subject,
max.mismatch=0, min.mismatch=0, with.indels=FALSE, fixed=TRUE,
algorithm="auto")
{
pattern <- normargPattern(pattern, subject@subject)
pattern_length <- nchar(pattern)
if (pattern_length != subject@pattern_length)
stop("subject was preprocessed for patterns of length ", subject@pattern_length)
max.mismatch <- normargMaxMismatch(max.mismatch)
if (!missing(fixed)) {
fixed <- normargFixed(fixed, subject@subject)
if (!all(fixed))
stop("only 'fixed=TRUE' can be used with a subject of class ", class(subject))
}
if (max.mismatch == 0)
C_ans <- .match.BOC.exact(pattern, subject, count.only=FALSE)
else
C_ans <- .match.BOC.inexact(pattern, subject, max.mismatch, count.only=FALSE)
unsafe.newXStringViews(subject@subject, start(C_ans), width(C_ans))
}
)
|
eb8e129e3afadc8d68ee6e1706f470e0043b3442
|
5cc55721b809c67ecd354e88a2c93b72d67f7c86
|
/06-NeuralNetworks.R
|
b09458d5e65f4191c19ff91389b5c35b1e141b03
|
[] |
no_license
|
mairatayana/R_MachineLearning
|
71eace52457b258ac6e459d070e581bec0565a82
|
c6649f2fef89db52c25cf76d1ebbe10d64275393
|
refs/heads/master
| 2020-03-21T03:53:17.968396
| 2018-09-23T02:53:22
| 2018-09-23T02:53:22
| 138,079,395
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 9,426
|
r
|
06-NeuralNetworks.R
|
# Script em R do exercício de Rede Neural
# Aluna: Maira Tayana Menegas - PPG-CC - Aluna especial
##################################################################################
# Preparacao dos dados
# Importando datos de arquivo CSV
cliente <- read.csv(file="Conj_data.csv", header=TRUE, sep=",")
View(cliente)
# a) Remova o atributo de identificação
clientePart = cliente[-c(1)]
View(clientePart)
# b) Codifique os atributos qualitativos como quantitativos
clientePart$Tamanho_família = gsub("Grande", "1", clientePart$Tamanho_família)
clientePart$Tamanho_família = gsub("Pequena", "0", clientePart$Tamanho_família)
clientePart$Comprou_antes = gsub("Sim", "1", clientePart$Comprou_antes)
clientePart$Comprou_antes = gsub("Não", "0", clientePart$Comprou_antes)
# c) Normalize os atributos numéricos (binários não precisam ser normalizados)
doNorm <- function(x) {(x - min(x))/(max(x)-min(x))}
clientePart$Idade <- as.numeric(clientePart$Idade)
clientePart$Rendimento <- as.numeric(clientePart$Rendimento)
clientePart$Tamanho_família <- as.numeric(clientePart$Tamanho_família)
clientePart$Comprou_antes <- as.numeric(clientePart$Comprou_antes)
clientePart$Comprou_anunciado <- as.numeric(clientePart$Comprou_anunciado)
clientePartNorm <- as.data.frame(lapply(clientePart, doNorm))
View(clientePartNorm)
# d) Treine uma rede Perceptron com ?? = 0,3
# threshold activation function
f_threshold <- function(u) {
if (u >= 0)
return (1)
return (0)
}
# Training a perceptron
perceptron.train <- function(dataset, eta=0.1, epsilon=1e-2) {
classId = ncol(dataset)
X = dataset[,1:classId-1]
Y = dataset[,classId]
# initializing weights
weights = runif(min=-0.5, max=0.5, n=ncol(dataset))
ncycle = 0
squaredError = 2*epsilon
while (squaredError > epsilon) {
squaredError = 0
for (i in 1:nrow(X)) {
example = c(1,as.numeric(X[i,]))
class = Y[i]
cat("example = ", as.numeric(X[i,]), "\n")
cat("class = ", class, "\n")
# computing predicted y
u = example %*% weights
cat("u = ", u, "\n")
y = f_threshold(u)
cat("predicted = ", y, "\n")
# Error
Error = class - y
squaredError = squaredError + Error^2
if(abs(Error) > 0){
# update weights
cat("updating weights...\n")
delta = eta * Error * example
weights = weights + delta
}
}
squaredError = squaredError / nrow(X)
cat("Squared error = ", squaredError, "\n")
ncycle = ncycle + 1
}
ret = list()
ret$weights = weights
ret$ncycle = ncycle
cat("Final weights = ", weights, "\n")
cat("Number of cycles = ", ncycle, "\n")
return (ret)
}
# predicting with a perceptron
perceptron.run <- function(X, model) {
cat("#example\tprediction\n")
for (i in 1:nrow(X)) {
example = c(1,as.numeric(X[i,]))
# u
u = example %*% model$weights
y = f_threshold(u)
cat(as.numeric(X[i,]), "\t", y, "\n")
}
}
# training
cat("\n\n>> Training:\n")
model = perceptron.train(clientePartNorm, 0.3)
model
#Resultado
#predicted = 0
#Squared error = 0
#Final weights = -0.6836698 2.002674 1.04126 -0.8598017 0.6839734
#Number of cycles = 30
# testing
cat("\n\n>> Testing:\n")
perceptron.run(clientePartNorm[,1:ncol(clientePartNorm)-1], model)
# e) Em seguida, usando os pesos obtidos, classifique os seguintes potenciais
# novos clientes com essa rede neural:
# e.1) Maria tem 55 anos, um rendimento de 9500 reais e uma família pequena.
# Além disso, já comprou outros produtos da empresa anteriormente.
mariaRendNorm = (9500 - min(cliente[,2]))/(max(cliente[,2]) - min(cliente[,2]))
mariaIdadeNorm = (55 - min(cliente[,3]))/(max(cliente[,3]) - min(cliente[,3]))
maria <- data.frame(Rendimento=(mariaRendNorm),Idade=(mariaIdadeNorm),Tamanho_família=(0),Comprou_antes=(1))
perceptron.run(maria, model)
# Resposta -> sim
#example prediction
#0.9450549 1 0 1 1
# e.2) João é um jovem de 23 anos com rendimento de 900 reais e família
# pequena. Ele já comprou produtos da empresa.
joaoRendNorm = (900 - min(cliente[,2]))/(max(cliente[,2]) - min(cliente[,2]))
joaoIdadeNorm = (23 - min(cliente[,3]))/(max(cliente[,3]) - min(cliente[,3]))
joao <- data.frame(Rendimento=(joaoRendNorm),Idade=(joaoIdadeNorm),Tamanho_família=(0),Comprou_antes=(1))
perceptron.run(joao, model)
# Resposta -> sim
#example prediction
#0 0.03030303 0 1 1
#f) Refaça o exercício com uma MLP, variando o número de neurônios na camada
# intermediária com os valores 2 e 5.
require(nnet)
require(tseriesChaos)
f <- function(net) {
return ( 1/(1+exp(-net)) )
}
df_dnet <- function(net) {
return ( f(net) * (1 - f(net)) )
}
fhard <- function(net) {
r = net
r[net > 0] = 1
r[net <= 0] = 0
return (r)
}
mlp.architecture <- function(input.length = 2,
hidden.length = 2,
output.length = 1,
my.f = f,
my.df_dnet = df_dnet) {
layers = list()
layers$hidden = matrix(runif(min=-0.5, max=0.5,
n=hidden.length*(input.length+1)),
nrow=hidden.length, ncol=input.length+1)
layers$output = matrix(runif(min=-0.5, max=0.5,
n=output.length*(hidden.length+1)),
nrow=output.length, ncol=hidden.length+1)
model = list()
model$layers = layers
model$f = my.f
model$df_dnet = my.df_dnet
return (model)
}
mlp.forward <- function(model, x_p) {
# x = c(1, 0)
f_h_net_h_pj = rep(0, nrow(model$layers$hidden))
df_h_dnet_h_pj = rep(0, nrow(model$layers$hidden))
for (j in 1:nrow(model$layers$hidden)) {
net_h_pj = c(x_p, 1) %*% model$layers$hidden[j,]
f_h_net_h_pj[j] = model$f(net_h_pj)
df_h_dnet_h_pj[j] = model$df_dnet(net_h_pj)
}
f_o_net_o_pk = rep(0, nrow(model$layers$output))
df_o_dnet_o_pk = rep(0, nrow(model$layers$output))
for (k in 1:nrow(model$layers$output)) {
net_o_pk = c(f_h_net_h_pj, 1) %*% model$layers$output[k,]
f_o_net_o_pk[k] = model$f(net_o_pk)
df_o_dnet_o_pk[k] = model$df_dnet(net_o_pk)
}
fwd = list()
fwd$f_h_net_h_pj = f_h_net_h_pj
fwd$f_o_net_o_pk = f_o_net_o_pk
fwd$df_h_dnet_h_pj = df_h_dnet_h_pj
fwd$df_o_dnet_o_pk = df_o_dnet_o_pk
return (fwd)
}
mlp.backpropagation <- function(X, Y, model, eta=0.1, threshold=1e-2,msg = TRUE,ncycle=1000) {
sqerror = 2 * threshold
n = 0
while ((sqerror > threshold)&&(n<ncycle)) {
sqerror = 0
# Treinando com cada exemplo de meu conjunto X dadas as classes em Y
for (p in 1:nrow(X)) {
x_p = X[p,]
y_p = Y[p,]
fwd = mlp.forward(model, x_p)
o_p = fwd$f_o_net_o_pk
delta_p = y_p - o_p
# Calculando erro quadrático
sqerror = sqerror + sum(delta_p^2)
# Calculando delta da camada de saída para um padrão
delta_o_p = delta_p * fwd$df_o_dnet_o_pk
# Calculando delta da camada escondida para um padrão
w.length = ncol(model$layers$output)-1
delta_h_p = fwd$df_h_dnet_h_pj *
(delta_o_p %*% model$layers$output[,1:w.length])
# Atualizando a camada de saída
model$layers$output = model$layers$output +
eta * (as.vector(delta_o_p) %*% t(c(as.vector(fwd$f_h_net_h_pj), 1)))
# Atualizando a camada escondida
model$layers$hidden = model$layers$hidden +
eta * (as.vector(delta_h_p) %*% t(c(x_p, 1)))
}
sqerror = sqerror / nrow(X)
if(msg){
cat("Average squared error: ", sqerror, "\n")
}
n = n+1
}
return (model)
}
client.test <- function(hidden.length = 2, eta=0.1,
train.size=0.75, threshold=1e-2) {
dataset = clientePartNorm
features = dataset[,1:4]
classes = class.ind(dataset[,5])# one-to-n encoding of the class
dataset = cbind(features, classes)
X = matrix(ts(dataset[,1:4]), ncol=4)
Y = matrix(ts(dataset[,5:6]), ncol=2)
model = mlp.architecture(4, hidden.length, 2)
trained.model = mlp.backpropagation(X, Y, model, eta, threshold,msg = TRUE)
res = NULL
datasetMaria = maria
X.testMaria = matrix(ts(datasetMaria[,1:4]), ncol=4)
datasetJoao = joao
X.testJoao = matrix(ts(datasetJoao[,1:4]), ncol=4)
x_p_maria = X.testMaria[1,]
fwd_maria = mlp.forward(trained.model, x_p_maria)
res = rbind(res, c(fwd_maria$f_o_net_o_pk))
x_p_joao = X.testJoao[1,]
fwd_joao = mlp.forward(trained.model, x_p_joao)
res = rbind(res, c(fwd_joao$f_o_net_o_pk))
colnames(res) = c("O1", "O2")
return (res)
}
res2 <- client.test(hidden.length = 2)
res2
#Resposta para 2 camadas:
# O1 O2
#[1,] 0.02744331 0.9709200 -> maria sim
#[2,] 0.18067932 0.8210844 -> joao sim
res5 <- client.test(hidden.length = 5)
res5
#resposta para 5 camadas
# O1 O2
#[1,] 0.006104979 0.9932054 -> maria sim
#[2,] 0.194616103 0.7989700 -> joao sim
|
d875d2c0e48ed614235c78beb3a7269d3c404a6d
|
540e796f202a5a826b902f7e39f038bdc8cd9d52
|
/man/ME.log.Rd
|
3f99a2d6ae799993356e8b19efec4066ee3b13a5
|
[] |
no_license
|
cran/LFDREmpiricalBayes
|
ff01e7ab0d0939b61764ab902cc4dd86e53357d9
|
e97e0722b036954f32bf2c09f92b3c5e60194afb
|
refs/heads/master
| 2021-05-05T13:33:38.750725
| 2017-09-27T08:08:46
| 2017-09-27T08:08:46
| 104,997,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,032
|
rd
|
ME.log.Rd
|
\name{ME.log}
\alias{ME.log}
\title{Provides Reliable LFDR Estimates by Selecting an Appropriate Reference Class}
\description{
%Based on Section 2.2, Algorithm 1, and Section 5.2.
%section 3: "BME" Bayes estimator of ME estimates of LFDR
Selects an appropriate reference class given two reference classes. Considers two vecotr of LFDR estimates computed based on the two alternative reference classes and provides a vector of more reliable LFDR estimates.
}
\usage{
ME.log(stat,lfdr.C,p0.C,ncp.C,p0.S,ncp.S,a=3,lower.p0=0,upper.p0=1,
lower.ncp=0.1,upper.ncp=50,length.p0=200,length.ncp=200)
}
\arguments{
\item{stat}{A vector of test statistics for SNPs falling inside the
intersection of the separate and combined reference classes.}
\item{lfdr.C}{A data frame of local false discovery rates of features falling
inside the intersection of the separate and combined reference classes,
computed based on all features belonging to the combined reference class.}
\item{p0.C}{An estimate of the proportion of the non-associated features
applied to the combined reference class. }
\item{ncp.C}{A non-centrality parameter applied to the combined reference
class.}
\item{p0.S}{An estimate of the proportion of the non-associated features
applied to the separate reference class. }
\item{ncp.S}{A non-centrality parameter applied to the separate reference
class.}
\item{a}{Parameter used to define the grade of evidence that alternative
reference class should be favoured instead of the separate reference class.}
\item{lower.p0}{The lower bound for the proportion of unassociated features.}
\item{upper.p0}{The upper bound for the proportion of unassociated features.}
\item{lower.ncp}{The lower bound for the non-centrality parameter.}
\item{upper.ncp}{The lower bound for the non-centrality parameter.}
\item{length.p0}{Desired length of a sequence vector containing the proportion
of non-associated features. The sequences starts from \code{lower.p0} and ends
at \code{upper.p0}.}
\item{length.ncp}{Desired length of a sequence vector containing
non-centrality parameters. The sequences starts from \code{lower.ncp} and ends
up at \code{upper.ncp}.}
}
\details{The terms `separate' and `combined' reference classes are used when one
sample population (reference class) is a subset of the other. Detailed
explanations can be found in the vignette
"Using the LFDREmpiricalBayes Package".
}
\value{
Returns the following values:
\item{p0.hat}{estimate of the proportion of non-associated SNPs}
\item{ncp.hat}{estimate of the non-centrality parameter}
\item{LFDR.hat}{A vector of LFDR estimates for features falling inside the
intersection of the separate and combined reference classes, obtained by the
Maximum Entropy method.}
}
\note{
The vector of test statistics: \code{stat}, need to be positive values in order
for the function \code{ME.log} to work.
}
\references{
Karimnezhad, A. and Bickel, D. R. (2016). Incorporating prior knowledge about
genetic variants into the analysis of genetic association data: An empirical
Bayes approach. Working paper. Retrieved from
\url{http://hdl.handle.net/10393/34889}
}
\author{
Code: Ali Karimnezhad. \cr
Documentation: Johnary Kim and Anna Akpawu. \cr
}
\examples{
#import the function ``lfdr.mle'' from package``LFDR.MLE''
library(LFDR.MLE)
#Consider a separate reference class and a combined reference class below:
n.SNPs.S<-3 # number of SNPs in the separate reference class
n.SNPs.Sc<-2 # number of SNPs in the complement of the separate reference class.
#Create a series of test statistics for SNPs in the separate reference class.
stat.Small<-rchisq(n.SNPs.S,df=1,ncp=0)
ncp.Sc<-10
#Create a series of test statistics for SNPs in the combined reference class.
stat.Big<-c(stat.Small,rchisq(n.SNPs.Sc,df=1,ncp=ncp.Sc))
#Using lfdr.mle, a series of arguments are used.
dFUN=dchisq; lower.ncp = .1; upper.ncp = 50;
lower.p0 = 0; upper.p0 = 1;
#Maximum Likelihood estimates for the LFDRs of SNPs in the created
# separate reference class.
#Separate reference class.
estimates.S<-lfdr.mle(x=stat.Small,dFUN=dchisq,df=1,lower.ncp = lower.ncp,
upper.ncp = upper.ncp)
LFDR.Small<-estimates.S$LFDR
p0.Small<-estimates.S$p0.hat
ncp.Small<-estimates.S$ncp.hat
# Maximum Likelihood estimates for the LFDRs of SNPs in the created combined
# reference class.
estimates.C<-lfdr.mle(x=stat.Big,dFUN=dchisq,df=1,lower.ncp = lower.ncp,
upper.ncp = upper.ncp)
LFDR.Big<-estimates.C$LFDR
p0.Big<-estimates.C$p0.hat
ncp.Big<-estimates.C$ncp.hat
#The first three values of the combined reference class correspond to the
#separate reference class in this example
LFDR.SBig<-LFDR.Big[1:3]
LFDR.ME<-ME.log(stat=stat.Small,lfdr.C=LFDR.SBig,p0.C=p0.Big,ncp.C=ncp.Big,
p0.S=p0.Small,ncp.S=ncp.Small)
LFDR.ME
}
\keyword{Maximum Entropy Method}
\keyword{LFDR Estimation}
\keyword{Maximum Likelihood}
\keyword{Reference Class}
\keyword{Separate Reference Class}
\keyword{Combined Reference Class}
\keyword{Maximum Likelihood Estimation}
|
18deaf34c8512b9577bf05f203b061d39f5dfe85
|
3190820fe0ce8a4ea87701cc4bc7b5467b210d26
|
/man/group_ops.Rd
|
4745b5074aa84ee86367c28a6bd78aa6298b6f3e
|
[
"Apache-2.0"
] |
permissive
|
jonathom/openeo-r-client
|
17ebed91a27b31089aa203c7e96206f0ae6fab0a
|
fccbe70cf53c3d5b4dfabeba28aac5faab04f6c0
|
refs/heads/master
| 2023-03-19T05:13:36.709711
| 2020-09-30T14:35:37
| 2020-09-30T14:35:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,589
|
rd
|
group_ops.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops.R
\name{group_ops}
\alias{group_ops}
\alias{sum.ProcessNode}
\alias{sum.ProcessGraphParameter}
\alias{sum.list}
\alias{prod.ProcessNode}
\alias{prod.ProcessGraphParameter}
\alias{prod.list}
\alias{min.ProcessNode}
\alias{min.ProcessGraphParameter}
\alias{min.list}
\alias{max.ProcessNode}
\alias{max.ProcessGraphParameter}
\alias{max.list}
\alias{range.ProcessNode}
\alias{range.ProcessGraphParameter}
\alias{range.list}
\alias{mean.ProcessNode}
\alias{mean.ProcessGraphParameter}
\alias{mean.list}
\alias{median.ProcessNode}
\alias{median.ProcessGraphParameter}
\alias{median.list}
\alias{sd.ProcessNode}
\alias{sd.ProcessGraphParameter}
\alias{sd.list}
\alias{var.ProcessNode}
\alias{var.ProcessGraphParameter}
\alias{var.list}
\title{Group operator wrappers}
\usage{
\method{sum}{ProcessNode}(..., na.rm = FALSE)
\method{sum}{ProcessGraphParameter}(..., na.rm = FALSE)
\method{sum}{list}(..., na.rm = FALSE)
\method{prod}{ProcessNode}(..., na.rm = TRUE)
\method{prod}{ProcessGraphParameter}(..., na.rm = TRUE)
\method{prod}{list}(..., na.rm = TRUE)
\method{min}{ProcessNode}(..., na.rm = TRUE)
\method{min}{ProcessGraphParameter}(..., na.rm = TRUE)
\method{min}{list}(..., na.rm = TRUE)
\method{max}{ProcessNode}(..., na.rm = TRUE)
\method{max}{ProcessGraphParameter}(..., na.rm = TRUE)
\method{max}{list}(..., na.rm = TRUE)
\method{range}{ProcessNode}(..., na.rm = TRUE)
\method{range}{ProcessGraphParameter}(..., na.rm = TRUE)
\method{range}{list}(..., na.rm = TRUE)
\method{mean}{ProcessNode}(x, na.rm = FALSE, ...)
\method{mean}{ProcessGraphParameter}(x, na.rm = FALSE, ...)
\method{mean}{list}(x, na.rm = FALSE, ...)
\method{median}{ProcessNode}(x, na.rm = FALSE, ...)
\method{median}{ProcessGraphParameter}(x, na.rm = FALSE, ...)
\method{median}{list}(x, na.rm = FALSE, ...)
sd.ProcessNode(x, na.rm = FALSE)
sd.ProcessGraphParameter(x, na.rm = FALSE)
sd.list(x, na.rm = FALSE)
var.ProcessNode(x, na.rm = FALSE)
var.ProcessGraphParameter(x, na.rm = FALSE)
var.list(x, na.rm = FALSE)
}
\arguments{
\item{...}{multiple arguments that start with a \code{ProcessNode} or a \code{ProcessGraphParameter}}
\item{na.rm}{logical to determine if NA values shall be removed in the calculation or if they are propagated}
\item{x}{a vector or list of values that are mixed or consits fully of \code{ProcessNode},
\code{ProcessGraphParameter} or numerical values}
}
\value{
\code{ProcessNode}
}
\description{
R's mathematical group primitives that are mapped into openEO processes.
}
|
6cdd5468bcb6ee75b391fb09721dff547db85e2c
|
37c78408b006646d60f6c9cd30d98bb6397f9372
|
/Cleaning UK HLS.R
|
a9388da8b602136dc099125a12ee015761830965
|
[] |
no_license
|
markagreen/ukhls_biomedical_fellowship
|
69bdf9bea05bf06d7e87210825219f2c9dcdae0e
|
6be07f4aa147d4aea041931a89912f1ff8663277
|
refs/heads/master
| 2020-04-16T03:35:53.329211
| 2020-01-13T15:41:08
| 2020-01-13T15:41:08
| 165,237,102
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,528
|
r
|
Cleaning UK HLS.R
|
#####################################
#### Selecting Outcome Variables ####
#####################################
# Open data (make sure have unencrypted folder first)
library(data.table)
#usw1 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w1/a_indresp.tab") # Wave 1
usw2 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w2/b_indresp.tab") # Wave 2
usw3 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w3/c_indresp.tab") # Wave 3
usw4 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w4/d_indresp.tab") # Wave 4
usw5 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w5/e_indresp.tab") # Wave 5
usw6 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w6/f_indresp.tab") # Wave 6
usw7 <- fread("/Volumes/Fellowship/UKDA-6614-tab/tab/us_w7/g_indresp.tab") # Wave 7
# Selected outcome variables #
# sf1/scsf1 (latter is self-completed questionnaire) - General health: categorical; 1 excellent, 2 very good, 3 good, 4 fair, 5 poor.
# sf1 wave 2 only - no proxy variable
# We will take the proxy value if an individual has missing data
# health - Long-standing illness or disability: binary; 1 yes, 2 no
# Fine as is
# disdif - Type of impariment or disability: categorical; 1 mentioned, 0 not mentioned. 13 variables
# if health == 1
# 1 mobility, 2 lifting/moving objects, 3 manual dexterity, 4 continence, 5 hearing, 6 sight, 7 communication/speech problems,
# 8 memory/ability to concentrate/learn/understand, 9 recognising in physical danger, 10 physical coordination e.g. balance,
# 11 difficulties with own personal care, 12 other problem/disability, 96 none of these
# Maybe take only one of these?
# hcondn - New health conditions since last interview: categorical; 1-17 conditions, 96 none [each individual variables]
# If interviews at previous wave
# 1 Asthma, 2 Arthritis, 3 Congestive heart failure, 4 Coronary heart disease, 5 Angina, 6 Heart attack or myocardial infarction
# 7 Stroke, 8 Emphysema, 9 Hyperthyroidism or an over-active thyroid, 10 Hypothyroidism or an under-active thyroid
# 11 Chronic bronchitis, 12 Any kind of liver condition, 13 Cancer or malignancy, 14 Diabetes, 15 Epilepsy
# 16 High blood pressure, 17 Clinical depression, 96 None of these
## Create single joined file of required variables ##
# Wave 2 #
# Edit variables
usw2$b_srh <- NA # Combine self-rated health measures (NB some people rate their health very differently across these measures for waves 2-5)
usw2$b_srh <- usw2$b_sf1 # Take
usw2$b_srh[usw2$b_srh <= 0] <- usw2$b_scsf1[usw2$b_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "b_srh", "b_health", "b_disdif1", "b_disdif2", "b_disdif3", "b_disdif4", "b_disdif5", "b_disdif6", "b_disdif7", "b_disdif8",
"b_disdif9", "b_disdif10", "b_disdif11", "b_disdif12", "b_disdif96", "b_hcondn1", "b_hcondn2", "b_hcondn3", "b_hcondn4",
"b_hcondn5", "b_hcondn6", "b_hcondn7", "b_hcondn8", "b_hcondn9", "b_hcondn10", "b_hcondn11", "b_hcondn12", "b_hcondn13",
"b_hcondn14", "b_hcondn15", "b_hcondn16", "b_hcondn17", "b_hcondn96") # Select variables
outcome_dt <- usw2[, vars, with=FALSE] # Subset variables
rm(usw2)
# Wave 3 #
# Edit variables
usw3$c_srh <- NA # Combine self-rated health measures (NB some people rate their health very differently across these measures for waves 2-5)
usw3$c_srh <- usw3$c_sf1 # Take
usw3$c_srh[usw3$c_srh <= 0] <- usw3$c_scsf1[usw3$c_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "c_srh", "c_health", "c_disdif1", "c_disdif2", "c_disdif3", "c_disdif4", "c_disdif5", "c_disdif6", "c_disdif7", "c_disdif8",
"c_disdif9", "c_disdif10", "c_disdif11", "c_disdif12", "c_disdif96", "c_hcondn1", "c_hcondn2", "c_hcondn3", "c_hcondn4",
"c_hcondn5", "c_hcondn6", "c_hcondn7", "c_hcondn8", "c_hcondn9", "c_hcondn10", "c_hcondn11", "c_hcondn12", "c_hcondn13",
"c_hcondn14", "c_hcondn15", "c_hcondn16", "c_hcondn17", "c_hcondn96") # Select variables
hold <- usw3[, vars, with=FALSE] # Subset variables
outcome_dt <- merge(outcome_dt, hold, by = "pidp", all.x = TRUE) # Join to only individuals with data in wave 2 (i.e. follow up data only - do not need new participants)
rm(usw3)
# Wave 4 #
# Edit variables
usw4$d_srh <- NA # Combine self-rated health measures (NB some people rate their health very differently across these measures for waves 2-5)
usw4$d_srh <- usw4$d_sf1 # Take
usw4$d_srh[usw4$d_srh <= 0] <- usw4$d_scsf1[usw4$d_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "d_srh", "d_health", "d_disdif1", "d_disdif2", "d_disdif3", "d_disdif4", "d_disdif5", "d_disdif6", "d_disdif7", "d_disdif8",
"d_disdif9", "d_disdif10", "d_disdif11", "d_disdif12", "d_disdif96", "d_hcondn1", "d_hcondn2", "d_hcondn3", "d_hcondn4",
"d_hcondn5", "d_hcondn6", "d_hcondn7", "d_hcondn8", "d_hcondn9", "d_hcondn10", "d_hcondn11", "d_hcondn12", "d_hcondn13",
"d_hcondn14", "d_hcondn15", "d_hcondn16", "d_hcondn17", "d_hcondn96") # Select variables
hold <- usw4[, vars, with=FALSE] # Subset variables
outcome_dt <- merge(outcome_dt, hold, by = "pidp", all.x = TRUE) # Join to only individuals with data in wave 2 (i.e. follow up data only - do not need new participants)
rm(usw4)
# Wave 5 #
# Edit variables
usw5$e_srh <- NA # Combine self-rated health measures (NB some people rate their health very differently across these measures for waves 2-5)
usw5$e_srh <- usw5$e_sf1 # Take
usw5$e_srh[usw5$e_srh <= 0] <- usw5$e_scsf1[usw5$e_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "e_srh", "e_health", "e_disdif1", "e_disdif2", "e_disdif3", "e_disdif4", "e_disdif5", "e_disdif6", "e_disdif7", "e_disdif8",
"e_disdif9", "e_disdif10", "e_disdif11", "e_disdif12", "e_disdif96", "e_hcondn1", "e_hcondn2", "e_hcondn3", "e_hcondn4",
"e_hcondn5", "e_hcondn6", "e_hcondn7", "e_hcondn8", "e_hcondn9", "e_hcondn10", "e_hcondn11", "e_hcondn12", "e_hcondn13",
"e_hcondn14", "e_hcondn15", "e_hcondn16", "e_hcondn17", "e_hcondn96") # Select variables
hold <- usw5[, vars, with=FALSE] # Subset variables
outcome_dt <- merge(outcome_dt, hold, by = "pidp", all.x = TRUE) # Join to only individuals with data in wave 2 (i.e. follow up data only - do not need new participants)
rm(usw5)
# Wave 6 #
# Edit variables
usw6$f_srh <- NA # Combine self-rated health measures
usw6$f_srh <- usw6$f_sf1 # Take
usw6$f_srh[usw6$f_srh <= 0] <- usw6$f_scsf1[usw6$f_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "f_srh", "f_health", "f_disdif1", "f_disdif2", "f_disdif3", "f_disdif4", "f_disdif5", "f_disdif6", "f_disdif7", "f_disdif8",
"f_disdif9", "f_disdif10", "f_disdif11", "f_disdif12", "f_disdif96", "f_hcondn1", "f_hcondn2", "f_hcondn3", "f_hcondn4",
"f_hcondn5", "f_hcondn6", "f_hcondn7", "f_hcondn8", "f_hcondn9", "f_hcondn10", "f_hcondn11", "f_hcondn12", "f_hcondn13",
"f_hcondn14", "f_hcondn15", "f_hcondn16", "f_hcondn17", "f_hcondn96") # Select variables
hold <- usw6[, vars, with=FALSE] # Subset variables
outcome_dt <- merge(outcome_dt, hold, by = "pidp", all.x = TRUE) # Join to only individuals with data in wave 2 (i.e. follow up data only - do not need new participants)
rm(usw6)
# Wave 7 #
# Edit variables
usw7$g_srh <- NA # Combine self-rated health measures
usw7$g_srh <- usw7$g_sf1 # Take
usw7$g_srh[usw7$g_srh <= 0] <- usw7$g_scsf1[usw7$g_srh <= 0] # Take proxy if missing
# Subset variables required for analysis
vars <- c("pidp", "g_srh", "g_health", "g_disdif1", "g_disdif2", "g_disdif3", "g_disdif4", "g_disdif5", "g_disdif6", "g_disdif7", "g_disdif8",
"g_disdif9", "g_disdif10", "g_disdif11", "g_disdif12", "g_disdif96", "g_hcondn1", "g_hcondn2", "g_hcondn3", "g_hcondn4",
"g_hcondn5", "g_hcondn6", "g_hcondn7", "g_hcondn8", "g_hcondn9", "g_hcondn10", "g_hcondn11", "g_hcondn12", "g_hcondn13",
"g_hcondn14", "g_hcondn15", "g_hcondn16", "g_hcondn17", "g_hcondn96") # Select variables
hold <- usw7[, vars, with=FALSE] # Subset variables
outcome_dt <- merge(outcome_dt, hold, by = "pidp", all.x = TRUE) # Join to only individuals with data in wave 2 (i.e. follow up data only - do not need new participants)
rm(usw7, hold, vars)
gc()
write.csv(outcome_dt, "/Volumes/Fellowship/Cleaned data/outcomes.tab") # Save
outcome_dt <- fread("/Volumes/Fellowship/Cleaned data/outcomes.tab")
## Create outcome variables ##
# Long-standing illness or disability #
# Count number of new cases per wave (nb. 1 yes, 2 no)
outcome_dt$health <- NA # Create blank variable
outcome_dt$health[outcome_dt$b_health == 2] <- 0 # Only those with good health at baseline
outcome_dt$health[outcome_dt$b_health == 2 & outcome_dt$c_health == 1] <- 1 # Identify new case cases
table(outcome_dt$health) # Print
outcome_dt$health[outcome_dt$b_health == 2 & outcome_dt$d_health == 1] <- 1 # Identify new case cases
table(outcome_dt$health) # Print
outcome_dt$health[outcome_dt$b_health == 2 & outcome_dt$e_health == 1] <- 1 # Identify new case cases
table(outcome_dt$health) # Print
outcome_dt$health[outcome_dt$b_health == 2 & outcome_dt$f_health == 1] <- 1 # Identify new case cases
table(outcome_dt$health) # Print
outcome_dt$health[outcome_dt$b_health == 2 & outcome_dt$g_health == 1] <- 1 # Identify new case cases
table(outcome_dt$health) # Print
# Self-rated health #
# Identify individuals who rated their health as fair (4) or poor (5) #
outcome_dt$poorhealth <- NA # Create blank variable
outcome_dt$poorhealth[outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3] <- 0 # Only those with good health at baseline
outcome_dt$poorhealth[(outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3) & outcome_dt$c_srh >=4] <- 1 # Identify new case cases
table(outcome_dt$poorhealth) # Print
outcome_dt$poorhealth[(outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3) & outcome_dt$d_srh >=4] <- 1 # Identify new case cases
table(outcome_dt$poorhealth) # Print
outcome_dt$poorhealth[(outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3) & outcome_dt$e_srh >=4] <- 1 # Identify new case cases
table(outcome_dt$poorhealth) # Print
outcome_dt$poorhealth[(outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3) & outcome_dt$f_srh >=4] <- 1 # Identify new case cases
table(outcome_dt$poorhealth) # Print
outcome_dt$poorhealth[(outcome_dt$b_srh >= 1 & outcome_dt$b_srh <= 3) & outcome_dt$g_srh >=4] <- 1 # Identify new case cases
table(outcome_dt$poorhealth) # Print
# Health Conditions #
# Prevalence at baseline:
# 1 Asthma = 725, 2 Arthritis = 1715, 3 Congestive heart failure = 85, 4 Coronary heart disease = 214, 5 Angina = 285,
# 6 Heart attack or myocardial infarction = 198, 7 Stroke = 210, 8 Emphysema = 127, 9 Hyperthyroidism or an over-active thyroid = 100,
# 10 Hypothyroidism or an under-active thyroid = 265, 11 Chronic bronchitis = 177, 12 Any kind of liver condition = 207,
# 13 Cancer or malignancy = 459, 14 Diabetes = 711, 15 Epilepsy = 75, 16 High blood pressure = 2069, 17 Clinical depression = 813,
# 96 None of these = 39725
# Maybe Arthritis and Hypertension are feasible - let's just look at these and see if potentially useful
# Arthritis
outcome_dt$arthritis <- NA # Create blank variable
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0] <- 0 # Only those with good health at baseline
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0 & outcome_dt$c_hcondn2 == 1] <- 1 # Identify new case cases
table(outcome_dt$arthritis) # Print
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0 & outcome_dt$d_hcondn2 == 1] <- 1 # Identify new case cases
table(outcome_dt$arthritis) # Print
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0 & outcome_dt$e_hcondn2 == 1] <- 1 # Identify new case cases
table(outcome_dt$arthritis) # Print
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0 & outcome_dt$f_hcondn2 == 1] <- 1 # Identify new case cases
table(outcome_dt$arthritis) # Print
outcome_dt$arthritis[outcome_dt$b_hcondn2 == 0 & outcome_dt$g_hcondn2 == 1] <- 1 # Identify new case cases
table(outcome_dt$arthritis) # Print
# Hypertension
outcome_dt$hypertension <- NA # Create blank variable
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0] <- 0 # Only those with good health at baseline
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0 & outcome_dt$c_hcondn16 == 1] <- 1 # Identify new case cases
table(outcome_dt$hypertension) # Print
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0 & outcome_dt$d_hcondn16 == 1] <- 1 # Identify new case cases
table(outcome_dt$hypertension) # Print
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0 & outcome_dt$e_hcondn16 == 1] <- 1 # Identify new case cases
table(outcome_dt$hypertension) # Print
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0 & outcome_dt$f_hcondn16 == 1] <- 1 # Identify new case cases
table(outcome_dt$hypertension) # Print
outcome_dt$hypertension[outcome_dt$b_hcondn16 == 0 & outcome_dt$g_hcondn16 == 1] <- 1 # Identify new case cases
table(outcome_dt$hypertension) # Print
# Type of impariment or disability #
# Prevalence at baseline (if health == 1)
# 1 mobility = 6364, 2 lifting/moving objects = 6968, 3 manual dexterity = 2628, 4 continence = 1728, 5 hearing = 1510,
# 6 sight = 1443, 7 communication/speech problems = 544, 8 memory/ability to concentrate/learn/understand = 2202,
# 9 recognising in physical danger = 402, 10 physical coordination e.g. balance = 2339, 11 difficulties with own personal care = 1810,
# 12 other problem/disability = 2923, 96 none of these = 5885
|
2e05dc28bf8a53a2705733a8e6c4f7f73df2bc70
|
9047b439ed2ad50f7a7eab5f78420aa679757100
|
/R/convert.R
|
b416b3a0432cd0188f22a4b92444c9b30a7e1b85
|
[] |
no_license
|
Drbuxie/MachineShop
|
8546608cef4a726326af4beaff6daaf1d5e8bfd1
|
572bbb803036a631e807f04ad775977550dbec6c
|
refs/heads/master
| 2023-03-07T22:11:59.357525
| 2021-02-01T01:56:17
| 2021-02-01T01:56:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
convert.R
|
setGeneric("convert_prob",
function(object, x, ...) standardGeneric("convert_prob"))
setMethod("convert_prob", c("ANY", "ANY"),
function(object, x, ...) x
)
setMethod("convert_prob", c("ANY", "numeric"),
function(object, x, ...) unname(x)
)
setMethod("convert_prob", c("BinomialVariate", "ANY"),
function(object, x, ...) unname(drop(x))
)
setMethod("convert_prob", c("factor", "array"),
function(object, x, ...) {
convert_prob(object, adrop(x, length(dim(x))))
}
)
setMethod("convert_prob", c("factor", "matrix"),
function(object, x, ...) {
if (nlevels(object) == 2) {
convert_prob(object, x[, ncol(x)])
} else {
structure(x, dimnames = list(NULL, levels(object)))
}
}
)
setMethod("convert_prob", c("matrix", "array"),
function(object, x, ...) {
convert_prob(object, adrop(x, length(dim(x))))
}
)
setMethod("convert_prob", c("matrix", "matrix"),
function(object, x, ...) {
stopifnot(ncol(object) == ncol(x))
varnames <- colnames(x)
if (is.null(varnames)) varnames <- colnames(object)
if (is.null(varnames)) varnames <- paste0("y", seq(ncol(x)))
structure(x, dimnames = list(NULL, varnames))
}
)
setMethod("convert_prob", c("numeric", "array"),
function(object, x, ...) {
num_dim <- length(dim(x))
x <- if (num_dim == 1) c(x) else adrop(x, num_dim)
convert_prob(object, x)
}
)
setMethod("convert_prob", c("numeric", "matrix"),
function(object, x, ...) {
stopifnot(ncol(x) == 1)
convert_prob(object, x[, 1])
}
)
setMethod("convert_prob", c("Surv", "matrix"),
function(object, x, times, ...) {
SurvProbs(x, times)
}
)
setGeneric("convert_response",
function(object, x, ...) standardGeneric("convert_response"))
setMethod("convert_response", c("ANY", "ANY"),
function(object, x, ...) x
)
setMethod("convert_response", c("DiscreteVariate", "numeric"),
function(object, x, ...) {
x <- round(x)
if (object@min > -Inf) x <- pmax(x, object@min)
if (object@max < Inf) x <- pmin(x, object@max)
new(class(object), x, min = object@min, max = object@max)
}
)
setMethod("convert_response", c("factor", "factor"),
function(object, x, ...) x
)
setMethod("convert_response", c("factor", "matrix"),
function(object, x, ...) {
factor(max.col(x), levels = 1:nlevels(object), labels = levels(object))
}
)
setMethod("convert_response", c("factor", "numeric"),
function(object, x, cutoff, ...) {
factor(x > cutoff, levels = c(FALSE, TRUE), labels = levels(object))
}
)
setMethod("convert_response", c("integer", "numeric"),
function(object, x, ...) {
round(x)
}
)
setMethod("convert_response", c("ordered", "matrix"),
function(object, x, ...) {
ordered(max.col(x), levels = 1:nlevels(object), labels = levels(object))
}
)
setMethod("convert_response", c("Surv", "SurvEvents"),
function(object, x, ...) x
)
setMethod("convert_response", c("Surv", "SurvProbs"),
function(object, x, cutoff, ...) {
events <- x <= cutoff
storage.mode(events) <- "integer"
SurvEvents(events, x@times)
}
)
|
7922513ae3e49784eb12e8f504b532131ee6bd83
|
04d68dc2eaeaab457cc9990f860d1b864e74db34
|
/run_analysis.R
|
adfe9acd12ef6a975b2eb5f2d565c6fe208d71be
|
[] |
no_license
|
TsygankovNickita/getting-and-cleaning-data
|
74f2d1a5b60d61c00dc8ea3992169a667850d709
|
53ef1c0baa0f00b70bbb48e1b7b3b39a24dfc6ee
|
refs/heads/master
| 2020-03-19T04:28:39.353208
| 2018-06-02T16:43:27
| 2018-06-02T16:43:27
| 135,833,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,623
|
r
|
run_analysis.R
|
## okay, lets try to perform our run_analysis.R
### we need it later
library(plyr)
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
s_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
s_test <- read.table("test/subject_test.txt")
# merge dataset 'X'
x_full <- rbind(X_train, X_test)
# merge dataset 'y'
y_full <- rbind(y_train, y_test)
# merge dataset 'subject'
s_full <- rbind(s_train, s_test)
### Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt")
# use grep to get the coumn number of the features which contains 'std' or 'mean()'
f_mean_std <- grep("std|mean\\(\\)", features$V2)
# create a table with the features we want
x_desired <- x_full[,f_mean_std]
# set the column names
names(x_desired) <- features[f_mean_std, 2]
### Uses descriptive activity names to name the activities in the data set
a_labels <- read.table("activity_labels.txt")
y_fulll[,1] <- a_labels[y_fulll[,1], 2]
names(y_full) <- "activity"
###
### Appropriately labels the data set with descriptive variable names.
###
names(s_full) <- "subject"
# bind data into a single data table
final_data <- cbind(x_desired, y_full, s_full)
###
### From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###
tidy <- ddply(.data = final_data, .variables = c("subject", "activity"), .fun = numcolwise(mean))
write.table(tidy, "tidy.txt", row.names = FALSE)
|
e1465bca06aec9d4abb1e4bbce8978a00b24a296
|
2ecd31c869dd8b964becfe2a64ab79d4a110d6a7
|
/waffle-charts.R
|
90f2ea766a82ec4a8b48323971eaa9ec87d9bfce
|
[] |
no_license
|
dmcglone/agre-v-wolf
|
1c366bbd1a9e111a6a94e8ceea0fd70212520d6a
|
dbc545e7e1be32a1b9030f2157a3cd699b9a8036
|
refs/heads/master
| 2021-07-24T12:23:55.294414
| 2017-11-03T18:56:04
| 2017-11-03T18:56:04
| 107,730,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,260
|
r
|
waffle-charts.R
|
library(waffle)
iron(
waffle(title="1st Congressional District", c("GOP"=22, "Democratic"=78), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="2nd Congressional District", c("GOP"=11, "Democratic"=89), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="3rd Congressional District", c("GOP"=53, "Democratic"=47), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="4th Congressional District", c("GOP"=59, "Democratic"=41), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="5th Congressional District", c("GOP"=54, "Democratic"=46), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="6th Congressional District", c("GOP"=52, "Democratic"=48), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="7th Congressional District", c("GOP"=52, "Democratic"=48), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="8th Congressional District", c("GOP"=49, "Democratic"=51), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="9th Congressional District", c("GOP"=56, "Democratic"=44), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="10th Congressional District", c("GOP"=60, "Democratic"=40), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="11th Congressional District", c("GOP"=54, "Democratic"=46), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="12th Congressional District", c("GOP"=51, "Democratic"=49), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="13th Congressional District", c("GOP"=35, "Democratic"=65), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="14th Congressional District", c("GOP"=30, "Democratic"=70), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="15th Congressional District", c("GOP"=51, "Democratic"=49), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="16th Congressional District", c("GOP"=56, "Democratic"=44), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="17th Congressional District", c("GOP"=57, "Democratic"=43), colors=c("#F37561", "#3EC0F3"), rows=5)
)
iron(
waffle(title="18th Congressional District", c("GOP"=52, "Democratic"=48), colors=c("#F37561", "#3EC0F3"), rows=5)
)
|
5feab7ca727571ac2088788adedfc3f0dfde25d3
|
251a3940e544dd6277fbdbc9f5cfebd53f98d3af
|
/R/print.R
|
610451fbf2d716d1d0a75ae250c980d7ebc9c0bc
|
[
"MIT"
] |
permissive
|
lionel-/tidytable
|
641c45c6a367a17b8ece2babfeb27b89ca0e101b
|
72c29f0e14d96343ba6e5dcd0517f7032d67c400
|
refs/heads/master
| 2022-07-11T22:33:29.352960
| 2020-05-10T18:33:10
| 2020-05-10T18:33:10
| 263,058,395
| 0
| 0
|
NOASSERTION
| 2020-05-11T13:59:12
| 2020-05-11T13:59:11
| null |
UTF-8
|
R
| false
| false
| 12,364
|
r
|
print.R
|
#' @export
print.tidytable <- function(x, topn=getOption("datatable.print.topn"),
nrows=15,
class=TRUE,
row.names=getOption("datatable.print.rownames"),
col.names=getOption("datatable.print.colnames"),
print.keys=getOption("datatable.print.keys"),
trunc.cols=TRUE,
quote=FALSE,
timezone=FALSE, ...) {
# topn - print the top topn and bottom topn rows with '---' inbetween (5)
# nrows - under this the whole (small) table is printed, unless topn is provided (100)
# class - should column class be printed underneath column name? (FALSE)
# trunc.cols - should only the columns be printed that can fit in the console? (FALSE)
if (!col.names %chin% c("auto", "top", "none"))
stop("Valid options for col.names are 'auto', 'top', and 'none'")
if (col.names == "none" && class)
warning("Column classes will be suppressed when col.names is 'none'")
# if (!shouldPrint(x)) {
# # := in [.data.table sets .global$print=address(x) to suppress the next print i.e., like <- does. See FAQ 2.22 and README item in v1.9.5
# # The issue is distinguishing "> DT" (after a previous := in a function) from "> DT[,foo:=1]". To print.data.table(), there
# # is no difference. Now from R 3.2.0 a side effect of the very welcome and requested change to avoid silent deep copy is that
# # there is now no longer a difference between > DT and > print(DT). So decided that DT[] is now needed to guarantee print; simpler.
# # This applies just at the prompt. Inside functions, print(DT) will of course print.
# # Other options investigated (could revisit): Cstack_info(), .Last.value gets set first before autoprint, history(), sys.status(),
# # topenv(), inspecting next statement in caller, using clock() at C level to timeout suppression after some number of cycles
# SYS = sys.calls()
# if (length(SYS) <= 2L || # "> DT" auto-print or "> print(DT)" explicit print (cannot distinguish from R 3.2.0 but that's ok)
# ( length(SYS) >= 3L && is.symbol(thisSYS <- SYS[[length(SYS)-2L]][[1L]]) &&
# as.character(thisSYS) == 'source') || # suppress printing from source(echo = TRUE) calls, #2369
# ( length(SYS) > 3L && is.symbol(thisSYS <- SYS[[length(SYS)-3L]][[1L]]) &&
# as.character(thisSYS) %chin% mimicsAutoPrint ) ) {
# return(invisible(x))
# # is.symbol() temp fix for #1758.
# }
# }
if (!is.numeric(nrows)) nrows = 100L
if (!is.infinite(nrows)) nrows = as.integer(nrows)
if (nrows <= 0L) return(invisible(x)) # ability to turn off printing
if (!is.numeric(topn)) topn = 5L
topnmiss = missing(topn)
topn = max(as.integer(topn),1L)
if (print.keys){
if (!is.null(ky <- key(x)))
cat("Key: <", paste(ky, collapse=", "), ">\n", sep="")
if (!is.null(ixs <- indices(x)))
cat("Ind", if (length(ixs) > 1L) "ices" else "ex", ": <",
paste(ixs, collapse=">, <"), ">\n", sep="")
}
if (any(dim(x)==0L)) {
class = if (is.data.table(x)) "table" else "frame" # a data.frame could be passed to print.data.table() directly, #3363
if (all(dim(x)==0L)) {
cat("Null data.",class," (0 rows and 0 cols)\n", sep="") # See FAQ 2.5 and NEWS item in v1.8.9
} else {
cat("Empty data.",class," (", dim(x)[1L], " rows and ",length(x)," cols)", sep="")
if (length(x)>0L) cat(": ",paste(head(names(x),6L),collapse=","),if(length(x)>6L)"...",sep="")
cat("\n")
}
return(invisible(x))
}
n_x = nrow(x)
if ((topn*2L+1L)<n_x && (n_x>nrows || !topnmiss)) {
toprint = rbindlist(list(head(x, topn), tail(x, topn)), use.names=FALSE) # no need to match names because head and tail of same x, and #3306
rn = c(seq_len(topn), seq.int(to=nrow(x), length.out=topn))
printdots = TRUE
} else {
toprint = x
rn = seq_len(n_x)
printdots = FALSE
}
toprint=format.data.table(toprint, na.encode=FALSE, timezone = timezone, ...) # na.encode=FALSE so that NA in character cols print as <NA>
require_bit64_if_needed(x)
# FR #353 - add row.names = logical argument to print.data.table
if (isTRUE(row.names)) rownames(toprint)=paste0(format(rn,right=TRUE,scientific=FALSE),":") else rownames(toprint)=rep.int("", nrow(toprint))
if (is.null(names(x)) || all(names(x) == ""))
# fixes bug #97 and #545
colnames(toprint)=rep("", ncol(toprint))
if (isTRUE(class) && col.names != "none") {
#Matching table for most common types & their abbreviations
class_abb = c(list = "<list>", integer = "<int>", numeric = "<dbl>",
character = "<chr>", Date = "<date>", complex = "<cpl>",
factor = "<fct>", POSIXct = "<dttm>", logical = "<lgl>",
IDate = "<IDat>", integer64 = "<i64>", raw = "<raw>",
expression = "<expr>", ordered = "<ord>")
classes = vapply_1c(x, function(col) class(col)[1L], use.names=FALSE)
abbs = unname(class_abb[classes])
if ( length(idx <- which(is.na(abbs))) ) abbs[idx] = paste0("<", classes[idx], ">")
toprint = rbind(abbs, toprint)
rownames(toprint)[1L] = ""
}
if (isFALSE(class) || (isTRUE(class) && col.names == "none")) abbs = ""
if (quote) colnames(toprint) <- paste0('"', old <- colnames(toprint), '"')
if (isTRUE(trunc.cols)) {
# allow truncation of columns to print only what will fit in console PR #4074
widths = dt_width(toprint, n_x, class, row.names, col.names)
cons_width = getOption("width")
cols_to_print = widths < cons_width
not_printed = colnames(toprint)[!cols_to_print]
if (!any(cols_to_print)) {
trunc_cols_message(not_printed, abbs, class, col.names)
return(invisible(x))
}
# When nrow(toprint) = 1, attributes get lost in the subset,
# function below adds those back when necessary
toprint = toprint_subset(toprint, cols_to_print)
}
if (printdots) {
toprint = rbind(head(toprint, topn + isTRUE(class)), "---"="", tail(toprint, topn))
rownames(toprint) = format(rownames(toprint), justify="right")
if (col.names == "none") {
cut_top(print(toprint, right=TRUE, quote=quote))
} else {
print(toprint, right=TRUE, quote=quote)
}
if (trunc.cols && length(not_printed) > 0L)
# prints names of variables not shown in the print
trunc_cols_message(not_printed, abbs, class, col.names)
return(invisible(x))
}
if (nrow(toprint)>20L && col.names == "auto")
# repeat colnames at the bottom if over 20 rows so you don't have to scroll up to see them
# option to shut this off per request of Oleg Bondar on SO, #1482
toprint=rbind(toprint, matrix(if (quote) old else colnames(toprint), nrow=1L)) # fixes bug #97
if (col.names == "none") {
cut_top(print(toprint, right=TRUE, quote=quote))
} else {
print(toprint, right=TRUE, quote=quote)
}
if (trunc.cols && length(not_printed) > 0L)
# prints names of variables not shown in the print
trunc_cols_message(not_printed, abbs, class, col.names)
invisible(x)
}
format.data.table = function (x, ..., justify="none", timezone = FALSE) {
if (is.atomic(x) && !is.null(x)) {
stop("Internal structure doesn't seem to be a list. Possibly corrupt data.table.")
}
format.item = function(x) {
if (is.null(x)) # NULL item in a list column
""
else if (is.atomic(x) || inherits(x,"formula")) # FR #2591 - format.data.table issue with columns of class "formula"
paste(c(format(head(x, 6L), justify=justify, ...), if (length(x) > 6L) "..."), collapse=",") # fix for #37 - format has to be added here...
else
paste0("<", class(x)[1L], paste_dims(x), ">")
}
# FR #2842 add timezone for posix timestamps
format.timezone = function(col) { # paste timezone to a time object
tz = attr(col,'tzone', exact=TRUE)
if (!is.null(tz)) { # date object with tz
nas = is.na(col)
col = paste0(as.character(col)," ",tz) # parse to character
col[nas] = NA_character_
}
return(col)
}
# FR #1091 for pretty printing of character
# TODO: maybe instead of doing "this is...", we could do "this ... test"?
char.trunc = function(x, trunc.char = getOption("datatable.prettyprint.char")) {
trunc.char = max(0L, suppressWarnings(as.integer(trunc.char[1L])), na.rm=TRUE)
if (!is.character(x) || trunc.char <= 0L) return(x)
idx = which(nchar(x) > trunc.char)
x[idx] = paste0(substr(x[idx], 1L, as.integer(trunc.char)), "...")
x
}
do.call("cbind",lapply(x,function(col,...) {
if (!is.null(dim(col))) return("<multi-column>")
if(timezone) col = format.timezone(col)
if (is.list(col)) col = vapply_1c(col, format.item)
else col = format(char.trunc(col), justify=justify, ...) # added an else here to fix #37
col
},...))
}
mimicsAutoPrint = c("knit_print.default")
# add maybe repr_text.default. See https://github.com/Rdatatable/data.table/issues/933#issuecomment-220237965
# shouldPrint <- function(x) {
# ret = (.global$print=="" || # to save address() calls and adding lots of address strings to R's global cache
# address(x)!=.global$print)
# .global$print = ""
# ret
# }
# for removing the head (column names) of matrix output entirely,
# as opposed to printing a blank line, for excluding col.names per PR #1483
cut_top = function(x) cat(capture.output(x)[-1L], sep = '\n')
# for printing the dims for list columns #3671; used by format.data.table()
paste_dims = function(x) {
dims = if (isS4(x)) {
length(methods::slotNames(x))
} else {
if (is.null(dim(x))) length(x) else dim(x)
}
paste0("[", paste(dims,collapse="x"), "]")
}
# to calculate widths of data.table for PR #4074
# gets the width of the data.table at each column
# and compares it to the console width
# pass nrow because x is the head/tail only so nrow(x) is wrong, #4266
dt_width = function(x, nrow, class, row.names, col.names) {
widths = apply(nchar(x, type='width'), 2L, max)
if (class) widths = pmax(widths, 6L)
if (col.names != "none") names = sapply(colnames(x), nchar, type="width") else names = 0L
dt_widths = pmax(widths, names)
rownum_width = if (row.names) as.integer(ceiling(log10(nrow))+2) else 0L
cumsum(dt_widths + 1L) + rownum_width
}
# keeps the dim and dimnames attributes
toprint_subset = function(x, cols_to_print) {
if (nrow(x) == 1L){
atts = attributes(x)
atts$dim = c(1L, sum(cols_to_print))
atts$dimnames[[2L]] = atts$dimnames[[2L]][cols_to_print]
x = x[, cols_to_print, drop=FALSE]
attributes(x) = atts
x
} else {
x[, cols_to_print, drop=FALSE]
}
}
# message for when trunc.cols=TRUE and some columns are not printed
trunc_cols_message = function(not_printed, abbs, class, col.names){
n = length(not_printed)
if (class && col.names != "none") classes = paste0(" ", tail(abbs, n)) else classes = ""
cat(sprintf(
ngettext(n,
"%d variable not shown: %s\n",
"%d variables not shown: %s\n"),
n, brackify(paste0(not_printed, classes))
))
}
require_bit64_if_needed <- function(DT) {
# called in fread and print.data.table
if (!isNamespaceLoaded("bit64") && any(sapply(DT,inherits,"integer64"))) {
# nocov start
# a test was attempted to cover the requireNamespace() by using unloadNamespace() first, but that fails when nanotime is loaded because nanotime also uses bit64
if (!requireNamespace("bit64",quietly=TRUE)) {
warning("Some columns are type 'integer64' but package bit64 is not installed. Those columns will print as strange looking floating point data. There is no need to reload the data. Simply install.packages('bit64') to obtain the integer64 print method and print the data again.")
}
# nocov end
}
}
# vapply for return value character(1)
vapply_1c = function (x, fun, ..., use.names = TRUE) {
vapply(X = x, FUN = fun, ..., FUN.VALUE = NA_character_, USE.NAMES = use.names)
}
brackify = function(x, quote=FALSE) {
# arbitrary
CUTOFF = 10L
# keep one more than needed to trigger dots if needed
if (quote && is.character(x)) x = paste0("'",head(x,CUTOFF+1L),"'")
if (length(x) > CUTOFF) x = c(x[1:CUTOFF], '...')
sprintf('%s', paste(x, collapse = ', '))
}
|
e9e86e316429e5059643f0da60b07ed6253d0a06
|
3a4d1342e102490d0a1e881326f3afd47f428926
|
/scrape/xbrl-parse2.R
|
f2815db5d20ed98ca840701deaeb6f7ce1bc1bfc
|
[
"Apache-2.0"
] |
permissive
|
xbrlware/ernest
|
a7c64f6517d891eb9f42b074ee322b5a4e26ac30
|
526a1e3d4f89c6ca7e2a944362e8a704006f2d6f
|
refs/heads/master
| 2021-01-24T01:52:34.996442
| 2017-05-01T13:35:29
| 2017-05-01T13:35:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,200
|
r
|
xbrl-parse2.R
|
require(R.utils, warn.conflicts = FALSE)
library(Rcpp, warn.conflicts = FALSE)
library(rvest, warn.conflicts = FALSE)
library(XML, warn.conflicts = FALSE)
library(XBRL, warn.conflicts = FALSE)
options(stringsAsFactors = TRUE)
args <- commandArgs(trailingOnly = TRUE)
finalDir <-file.path(paste('/home/ubuntu/sec/parsed_min__', args[1], '__0', args[2], sep=''))
unzippedDir <-file.path(paste('/home/ubuntu/sec/unzipped__', args[1], '__0', args[2], sep=''))
unzippedFiles <-list.files(unzippedDir)
print(finalDir)
print(unzippedDir)
dir.create(finalDir)
buildFrame <- function(name, xbrl.vars) {
x <- name
name <- as.data.frame(xbrl.vars[name])
colnames(name) <- c(gsub(paste('^', x, '.', sep = ""), '', colnames(name)))
return(name)
}
parseDoc <- function(finalDir, unzippedDir) {
tryCatch({
for(m in list.files(unzippedDir)){
if(length(grep(pattern="[[:digit:]].xml", x=m))==1) {
print(m)
inst <- file.path(unzippedDir, m)
xbrl.vars <- xbrlDoAll(inst, verbose=FALSE)
# build frames
fact <- buildFrame('fact', xbrl.vars)
context <- buildFrame('context', xbrl.vars)
# joins tables to fact
join1 <- merge(x = fact, y = context, by = "contextId", all.x = TRUE)
# write out file
title <-gsub("-|.xml", "", m)
print(title)
loc <- file.path(finalDir,paste0(title,'.csv'))
print(loc)
write.table(join1, file = loc, sep = "," , append = TRUE)
unlink(paste(unzippedDir, '/*', sep = ''))
}
}
},
error = function(e) {unlink(paste(unzippedDir, '/*', sep = ''))}
)
}
tryCatch(
expr = {
evalWithTimeout(
{parseDoc(finalDir, unzippedDir)}, timeout = 300)},
TimeoutException = function(ex) cat("Timeout. Skipping."))
unlink(paste(unzippedDir, '/*', sep = ''))
|
2e09ee34016f08aa4ed3585ae638822e2878bd93
|
a54e3d6251c5d8a20e5e5c82006fa1848c325b2d
|
/R/hello1.R
|
5e2d4ab99af1c8c9e391ec125904f822fffa0c3b
|
[] |
no_license
|
brijeshyati/brijeshkumar.github.io
|
30223d62748042a36211e5b659043b5941198d4a
|
9a636094202b4ed46845c33cf056bc17f131c209
|
refs/heads/master
| 2022-06-30T19:15:54.074595
| 2020-05-09T07:54:43
| 2020-05-09T07:54:43
| 261,801,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,184
|
r
|
hello1.R
|
#' @title Predicts Stock Price Movement for Given Stock Symbol
#'
#' @description This package predicts whether the stock price at tommorow's market close would be higher or lower compared to today's closing place.
#'
#' @param symbol
#'
#' @return NULL
#'
#' @examples statwiseprogress(10)
#'
#' @export statwiseprogress
statwiseprogress<-function(symbol)
{
library("anytime")
library("jsonlite")
########## library("data.table")
library("tidyverse")
library("zoo")
library("ggplot2")
library("gridExtra")
library("grid")
url1 <- "https://api.covid19india.org/data.json"
mydata <- fromJSON(url1)
DF1 <- mydata[["cases_time_series"]] #### india cases_time_series
DF1$year <- 2020
DF1$dateyear = paste(DF1$date,DF1$year)
DF1$dateyear <- anydate(DF1$dateyear)
DF1 <- DF1[rev(order(as.Date(DF1$dateyear, format = "%Y-%m-%d"))),]
DF1 <- DF1[,c("dateyear","dailyconfirmed","totalconfirmed","dailydeceased","totaldeceased","dailyrecovered","totalrecovered")]
DF2 <- mydata[["statewise"]] #### india statewise
DF2$date <- as.Date(DF2$lastupdatedtime,"%d/%m/%y")
DF2 <- DF2[rev(order(as.Date(DF2$date, format = "%Y-%m-%d"))),]
DF2 <- DF2[,c("state","statecode","date","confirmed","active","recovered",
"deaths","deltaconfirmed","deltadeaths","deltarecovered")]
DF2$date <- as.Date(as.POSIXct(DF2$date ,"%y-%m-%d")) ### character to date format
tt <- symbol
pd1 <- DF2 %>% filter(deltaconfirmed > tt) %>%
ggplot(aes(x = reorder(state, deltaconfirmed), y = deltaconfirmed)) +
geom_bar(stat = "identity",aes(fill=state),color="red")+
coord_flip()+
geom_text(aes(label=deltaconfirmed),hjust=1) +
theme(legend.position = "none")+
labs(### title = "COVID-19 dailywise confirmed count in indian State",
#### subtitle = paste("confirmed as of", format(max(DF2$date), "%A, %B %e, %Y")),
x = "statewise", y = "today confirmed count") +
#### ,caption = "With reference to COVID Tracking Project(covid19india.org)") +
theme_minimal()
pd2 <- DF2 %>% filter(deltarecovered > tt) %>%
ggplot(aes(x = reorder(state, deltarecovered), y = deltarecovered)) +
geom_bar(stat = "identity",aes(fill=state),color="red")+
coord_flip()+
geom_text(aes(label=deltarecovered),hjust=1) +
theme(legend.position = "none")+
labs(### title = "COVID-19 dailywise recovered count in indian State",
#### subtitle = paste("recovered as of", format(max(DF2$date), "%A, %B %e, %Y")),
x = "statewise", y = "today recovered count") +
#### ,caption = "With reference to COVID Tracking Project(covid19india.org)") +
theme_minimal()
grid.arrange(pd1,pd2,nrow=1,
top = paste("Covid19 india ",format(max(DF2$date), "%A, %B %e, %Y"),"(",
format(Sys.time()," %H:%M",tz="Asia/Kolkata",usetz=TRUE),")"),
bottom = textGrob("W.r.t COVID Tracking Project(covid19india.org)",
gp = gpar(fontface = 3, fontsize = 9),hjust = 1,x = 1))
#Printing results
print("done")
### print(pred)
}
|
41997d4c0f38e2b7dbd6724e6a122d4496f5be3e
|
6a53749a38f2e539b6b0b549f258e819ebd90cf1
|
/titanicsripts.R
|
1782d862504f14eb14fe28577a94f94df288f259
|
[] |
no_license
|
techtenant/Titanic-Prediction-Using-R
|
9bf3a3ef01acaf31234a886e94682ea13e695e83
|
3c6988208ae838462eb231585dbee538b154333c
|
refs/heads/master
| 2020-04-18T05:35:15.191764
| 2019-01-24T01:56:40
| 2019-01-24T01:56:40
| 167,284,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,836
|
r
|
titanicsripts.R
|
#Load Data
traindata <- read.csv("train.csv", header = TRUE)
testdata <- read.csv("test.csv", header = TRUE)
#Add Survived variable to the test set to allow for combining of data sets
testdata.survived <- data.frame(Survived =rep("None", nrow(testdata)), testdata[,])
#Combining Data Sets
data.combined <- rbind(traindata, testdata.survived)
#R Datatypes
str(data.combined)
data.combined$Survived <- as.factor(data.combined$Survived)
data.combined$Pclass <- as.factor(data.combined$Pclass)
#Gross Survival Rates
table(data.combined$Survived)
#Distribution Across Classes of Boarding
table(data.combined$Pclass)
#Loading Visualization Library
library(ggplot2)
#Finding Hypothesis of Rich people survived from the 1st Pclass
traindata$Pclass <- as.factor(traindata$Pclass)
ggplot(traindata, aes(x = Pclass, fill = factor(Survived)))+
geom_histogram(stat="count") +
ggtitle("Pclass Vs Survived")+
xlab("Pclass")+
ylab("Total Count")+
labs(fill = "Survived")
#First few names in the data set
head(as.character(traindata$Name))
#Last Names in the Data set Name variable name
tail(as.character(traindata$Name))
#Unique names found between the training and Testing data set
length(unique(as.character(data.combined$Name)))
#Checking on two duplicate Names
#Then Storing them as vectors
dupl.names <- as.character(data.combined[which(duplicated(as.character(data.combined$Name))), "Name"])
#Checking on the records in combined data set
data.combined[which(data.combined$Name %in% dupl.names),]
#What to explicitly know what does the salutations in the records actually mean
#So we start by loading the library into working environment
library(stringr)
#Checking on their correlation with other variables i.e name or Sibsp variables
missTitle <- data.combined[which(str_detect(data.combined$Name, "Miss.")),]
missTitle[1:5,]
mrsTitle <- data.combined[which(str_detect(data.combined$Name, "Mrs.")),]
mrsTitle[1:5,]
mrTitle <- data.combined[which(str_detect(data.combined$Name, "Mr.")),]
mrTitle[1:5,]
#Cheching on Men with gender
males <- data.combined[which(traindata$Sex == "male"),]
males[1:5,]
#Delving deeper into Variable Correlation by trying to ascertain 3D rshp
#That is relationship between "Survived", "pclass" and now "title"
#Starting with creating a utility function to help with title/ salutation extraction
extractTitles <- function(name) {
name <- as.character(name)
if(length(grep("Miss.", name))>0){
return("Miss.")
}
else if(length(grep("Master.", name))>0){
return("Master")
}
else if(length(grep("Mrs.", name))>0){
return("Mrs.")
}
if(length(grep("Mr.", name))>0){
return("Mr.")
} else{
return("Others")
}
}
titles <- NULL
for (i in 1:nrow(data.combined)) {
titles <- c(titles, extractTitles(data.combined[i, "Name"]))
}
data.combined$Title <- as.factor(titles)
#Grabbing the first 891 records and all columns
ggplot(data.combined[1:891,], aes(x = Title, fill = Survived))+
geom_bar(stat="count")+
facet_wrap(~Pclass)+
ggtitle("Passenger class vs Title and Survived")+
xlab("Tittle")+
ylab("Total Count")+
labs(fill = "Survived")
# Number of people with their titles in combined data set
table(data.combined$Title)
# sex (male and felame) Across training and Test data
table(data.combined$Sex)
#Visual Gender Representation
ggplot(data.combined[1:891,], aes(x= Sex, fill = Survived))+
geom_bar(stat = "count")+
ggtitle("Gender Distribution")+
xlab("Gender/Sex")+
ylab("Total Count")+
labs(fill = "Survived")
# sex (male and felame) togethe with Pclass Across training and Test data checking Survived
table(data.combined$Sex, data.combined$Pclass)
#Visual Representation
ggplot(data.combined[1:891,], aes(x= Sex, fill = Survived))+
geom_bar(stat = "count")+
facet_wrap(~Pclass)+
ggtitle("Gender Distribution in Passenger Classes")+
xlab("Gender/Sex")+
ylab("Total Count")+
labs(fill = "Survived")
#Checking clearly on Age and Sex in depth if the coorelate
#Clearly analysing age distribution among the whole data set
#Converting Age variable to a factor
data.combined$Age <- as.numeric(data.combined$Age)
#data.combined$Age <- as.double(data.combined$Age)
summary(data.combined$Age)
#Deeper Analysis of the 3-way relationship beetwen Age, Sex and Pclass Visually
ggplot(data.combined[1:891,], aes(x= Age, fill = Survived))+
geom_histogram( binwidth = 0.5)+
facet_wrap(~Sex + Pclass)+
ggtitle("3-way Rshp Age, Sex and Passenger class")+
xlab("Age")+
ylab("Total Count")
#Time to ascertain "Master" Title equates a young man
youngmen <- data.combined[which(data.combined$Title == "Master"),]
summary(youngmen$Age)
#Ascertain Miss. Title age
missladies <- data.combined[which(data.combined$Title == "Miss."),]
summary(missladies$Age)
ggplot(missladies[missladies$Survived != "None",], aes(x= Age, fill = "Survived"))+
geom_histogram(binwidth = 5)+
facet_wrap(~Pclass)+
ggtitle("Miss. Title Ladies Ages in PClasses")+
xlab("Age")+
ylab("Total Count")+
labs("Survived")
#Checking the young ladies in miss
missladies.alone <- missladies[which(missladies$SibSp ==0 & missladies$Parch ==0),]
summary(missladies.alone$Age)
length(which(missladies.alone$Age <=14.5))
#On siblings now Summary of Sibsp variable
summary(data.combined$SibSp)
#Getting unique values in Sibsp Variable
length(unique(data.combined$SibSp))
#Converting Sibsp Variable from Int to Factor for better Visualization
data.combined$SibSp <- as.factor(data.combined$SibSp)
ggplot(data.combined[1:891,], aes(x = SibSp, fill = Survived))+
geom_histogram(binwidth = 0.5)+
facet_wrap(~Pclass + Title)+
ggtitle("Pclass & Title")+
xlab("No. of Siblings")+
ylab("Total Count")+
ylim(0,300)+
labs(fill = "Survived")
#Having explored the sibling variable its time to Brainstorm on Parent
#Parch
#First Converting it to a Factor
data.combined$Parch <- as.factor(data.combined$Parch)
#Secondly visualizing it
ggplot(data.combined[1:891,], aes(x = Parch, fill = Survived))+
geom_histogram(stat = "count")+
facet_wrap(~Pclass + Title)+
ggtitle("Pclass & Title")+
xlab("Parents/ Parch")+
ylab("Total Count")+
ylim(0,300)+
labs(fill = "Survived")
#Brainstorming to create family size if posible By:
#First Concat sibling variables of 2 sets
now.Sibsp <- c(traindata$SibSp, testdata$SibSp)
#Secondly Concat Parch variable of 2 sets
now.Parch <- c(traindata$Parch, testdata$Parch)
#Joining the 2 into one set
data.combined$FamilySize <- as.factor(now.Parch + now.Sibsp + 1)
summary(data.combined$FamilySize)
#Finding Predictives Patterns and visualize
#ggplot(data.combined[1,891,], aes(x= FamilySize, fill = Survived))+
# geom_bar(width = 0.25)+
# facet_wrap(~Title)+
#ggtitle("Pclass & Title")+
#xlab("Family Size")+
#ylab("Total Count")+
#ylim(0,300)+
#labs(fill = "Survived")
|
af6ee19535d9bb9090b0b8621311d78cb47254c1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/epiR/examples/epi.descriptives.Rd.R
|
0296fccc0600b189e656b7fa8bc89c78a804aaa5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 290
|
r
|
epi.descriptives.Rd.R
|
library(epiR)
### Name: epi.descriptives
### Title: Descriptive statistics
### Aliases: epi.descriptives
### Keywords: univar univar
### ** Examples
id <- 1:1000
tmp <- rnorm(1000, mean = 0, sd = 1)
id <- sample(id, size = 20)
tmp[id] <- NA
epi.descriptives(tmp, conf.level = 0.95)
|
9609e321bcd8762253ebeaf329644dc84f419180
|
673e813b89de8f8ccffe671c6b6070026abbc53d
|
/inst/unitTests/test_CDFfromGTF.R
|
3920baaffc619bdf2adaa56273f9765d4f13dde5
|
[] |
no_license
|
jpromeror/EventPointer
|
4eaa1f3a6bc653e72afef317517eec42dff41627
|
aa24e3a15c6bdbd7b6c950b962b3d24c3eb80950
|
refs/heads/master
| 2023-05-25T16:48:24.853661
| 2023-05-15T11:14:22
| 2023-05-15T11:14:22
| 80,099,260
| 4
| 0
| null | 2022-11-28T11:24:50
| 2017-01-26T09:01:24
|
R
|
UTF-8
|
R
| false
| false
| 374
|
r
|
test_CDFfromGTF.R
|
test_CDFfromGTF <- function() {
obs <- tryCatch(CDFfromGTF(input="Test"), error=conditionMessage)
checkIdentical("Microarray field empty", obs)
obs <- tryCatch(CDFfromGTF(input = "CustomGTF", inputFile = NULL, PSR="T1", Junc="T2"
, PathCDF="T3", microarray = "RTA"), error=conditionMessage)
checkIdentical("inputFile parameter is empty", obs)
}
|
e2339aa3072d3fb7bbf9b8d7a63f370d9c0a340a
|
67c2a90c7edfac3cfd891cb332c45e71cf4a6ad1
|
/R/cdm_add_ridge_diagonal.R
|
9b82cd3956ca7448d8ede2995382252f2555b7ea
|
[] |
no_license
|
alexanderrobitzsch/CDM
|
48316397029327f213967dd6370a709dd1bd2e0a
|
7fde48c9fe331b020ad9c7d8b0ec776acbff6a52
|
refs/heads/master
| 2022-09-28T18:09:22.491208
| 2022-08-26T11:36:31
| 2022-08-26T11:36:31
| 95,295,826
| 21
| 11
| null | 2019-06-19T09:40:01
| 2017-06-24T12:19:45
|
R
|
UTF-8
|
R
| false
| false
| 178
|
r
|
cdm_add_ridge_diagonal.R
|
## File Name: cdm_add_ridge_diagonal.R
## File Version: 0.03
cdm_add_ridge_diagonal <- function(x, eps=1E-10 )
{
D <- ncol(x)
rx <- x + diag( eps, D )
return(rx)
}
|
4788153f412bfb72a271ee32784862bdd8c795eb
|
8615a79ededec2faaf0fee8ddf047280d3f9cad9
|
/R/PredictoROutput.R
|
ead6d8288e8eccd3b0a93bbf775f04f72be2232e
|
[
"Apache-2.0"
] |
permissive
|
htssouza/predictoR
|
5923720ce35b8d57216a14ad2b2c2910e9c3e501
|
dd920ca1edda92816859300caf100d0dc7987ffc
|
refs/heads/master
| 2021-01-11T15:28:16.498822
| 2018-01-08T23:33:00
| 2018-01-08T23:33:00
| 80,353,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
r
|
PredictoROutput.R
|
################################################################################
# PredictoR Output
################################################################################
################################################################################
# External dependencies
################################################################################
library(data.table)
library(logging)
################################################################################
# Functions
################################################################################
PredictoROutput <- function(x, ...) UseMethod("PredictoROutput")
PredictoROutput.PredictoRParams <- function(params, fits, prediction=NULL) {
this <- list()
this$params <- params
this$fits <- fits
this$prediction <- prediction
class(this) <- "PredictoROutput"
return (this)
}
print.PredictoROutput <- function(object) {
writeLines("PredictoROutput:")
writeLines("params:")
writeLines(capture.output(object$params))
writeLines("fits:")
writeLines(capture.output(object$fits))
writeLines("prediction:")
writeLines(capture.output(object$prediction))
}
|
f7b022312d22929c13b5db6c8fbd4d656225c12e
|
e0b87eb63633d601f3e9e23e35b62b72835f091c
|
/man/transfquad.Rd
|
251e5a0a61627a9deef1ac7247c8bd74a6e5c0af
|
[] |
no_license
|
Allisterh/nlr
|
7737c85aa80485f5173469d2479d89be72c427ba
|
7f45b89f1748d356af588920b6f7ae2a5269f80d
|
refs/heads/master
| 2022-04-02T20:53:11.162698
| 2019-07-31T11:40:02
| 2019-07-31T11:40:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
rd
|
transfquad.Rd
|
\name{transfquad}
\alias{transfquad}
\alias{transfquadvec}
\title{
Variance to standard deviation transform.
}
\description{
This function compute the standard deviation of an output from a parameteric nonlinear variance function object, usualy stored as \code{\link{nl.form}}. The gradient and hessian attributes also will be transformed.
\code{transfquadvec} transform a vector similarly.
}
\usage{
transfquad(varcomp)
}
\arguments{
\item{varcomp}{
Is vector of variances, with (n by p) \code{"gradient"} and (three dimentional n by p by p) \code{"hessian"} attributes.
}
}
\details{
The standard deviation is simply square root of variance. The gradinet is transformed of square root of gradient. Hessian is transformed of second derivative of square root function as well.
}
\value{
Vector of transformed standard deviation, including (n by p) \code{"gradient"} and (three dimentional n by p by p) \code{"hessian"} attributes.
}
\references{
Riazoshams H, Midi H, and Ghilagaber G, 2018,. Robust Nonlinear Regression, with
Application using R, Joh Wiley and Sons.
}
\author{
Hossein Riazoshams, May 2014.
Email: \email{riazihosein@gmail.com}
URL \url{http://www.riazoshams.com/nlr/}
}
\note{
In \code{nlr} variance function is stored in \code{\link{nl.form}} for computations.
This function call by \code{\link{nlr}}, for compatibility it is better to call from \code{\link{nlr}} rather than directly by user.
}
\seealso{
\code{\link{nl.form}}
}
\examples{
## The function is currently defined as
"transfquad"
}
\keyword{ variance }
\keyword{ standard deviation }
\keyword{gradient}
\keyword{hessian}
|
79482f4b830fa6f3024363473fefc753e95d9e91
|
5b67a3e15e7d0f4474ea029c9a56bd49953d04e5
|
/faultDetectionSkySpark/data-raw/study_set.R
|
010f8f54d942835999229c6435c1183c7d10d1f0
|
[] |
no_license
|
yujiex/gsa_2018
|
9ae9b978331f93ae8c6a7e8199f5545a5255b06c
|
ff7a2f1dd879c833d320b6e22f66d35174ee849e
|
refs/heads/master
| 2020-03-18T02:52:27.096253
| 2019-05-24T15:51:04
| 2019-05-24T15:51:04
| 134,211,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,568
|
r
|
study_set.R
|
library("readr")
library("dplyr")
allspark = readxl::read_excel("list_of_gsalink_buildings.xlsx", sheet=2) %>%
dplyr::mutate(`Building_Number`=substr(`spark_entry`, 1, 8)) %>%
dplyr::select(-`spark_entry`) %>%
dplyr::rename(`building`=`Building_Number`) %>%
{.}
allspark %>%
dplyr::group_by(`status`) %>%
dplyr::summarise(n())
# A tibble: 2 x 2
## status `n()`
## <chr> <int>
## 1 downloaded 89
## 2 no data 64
energy_status = readr::read_csv("energy_data_existance.csv")
has_energy =
allspark %>%
dplyr::filter(`status`=="downloaded") %>%
dplyr::left_join(energy_status) %>%
dplyr::mutate_if(is.numeric, function(x) ifelse(is.na(x), 0, x)) %>%
dplyr::mutate(`has_energy`=(`kWh Del Int` + `kWh del-rec Int` + `kWh Rec Int` + `Natural Gas Vol Int` > 0)) %>%
dplyr::mutate(`has_energy_water`=(`Domestic H2O Int gal` +
+ `kWh Del Int` + `kWh del-rec Int` + `kWh Rec Int` + `Natural Gas Vol Int` > 0)) %>%
dplyr::filter(`has_energy_water`) %>%
{.}
nrow(has_energy)
hasEnergyCost =
readr::read_csv("rule_summary.csv") %>%
dplyr::filter(`eCost` > 0) %>%
dplyr::select(`building`, `eCost`) %>%
dplyr::group_by(`building`) %>%
dplyr::summarise(`eCost`=sum(`eCost`)) %>%
{.}
has_energy_ecost = has_energy %>%
dplyr::left_join(hasEnergyCost) %>%
dplyr::select(`building`, `status`, `has_energy_water`, `has_energy`, `eCost`) %>%
dplyr::filter(!is.na(`eCost`)) %>%
{.}
head(has_energy_ecost)
## this is the current study set
has_energy_ecost %>%
readr::write_csv("../data/has_energy_ecost.csv")
|
05d0ad5a08708e097db5b17c94a1012be74ddfa5
|
47941ce8c8512b65d32aceb374475591f693e16d
|
/DMC-MBN2019/tutorial/dmc_3_4_DDM.R
|
e354e0cdda51a0fff9318802f21c5afcc294e21d
|
[] |
no_license
|
StevenM1/summerschool_mbn_2019
|
525ccad673677073ed6bad70d6b30c86b9efeffd
|
a0d4b0ce1d7df2a625fcf55c35cb15f33cd661be
|
refs/heads/master
| 2020-06-28T11:30:39.791795
| 2019-08-08T12:16:11
| 2019-08-08T12:16:11
| 200,222,576
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,862
|
r
|
dmc_3_4_DDM.R
|
################## DMC Lesson 3: Sampling
### Lesson 3.4: Sampling and assessing a single DDM subject
rm(list=ls())
# Current working directory must be set to the top-level folder
# containing the dmc and tutorial subfolders
source ("dmc/dmc.R")
load_model ("DDM","ddm.R")
# load_data ("dmc_3_4.RData")
p.map <- list(a="1",v="1",z="1",d="1",sz="1",sv="1",t0="1",st0="1")
const <- c(st0=0,d=0)
# Design is the same simple one used in previous examples.
model <- model.dmc(p.map,constants=const,
match.map=list(M=list(s1="r1",s2="r2")),
factors=list(S=c("s1","s2")),
responses=c("r1","r2"),
type="rd")
p.vector <- c(a=1,v=1,z=0.5,sv=1,sz=0.2,t0=.15)
data.model <- data.model.dmc(simulate.dmc(p.vector,model,n=1e4),model)
# Accuracy around 70%
par(mfrow=c(1,2))
plot.cell.density(data.cell=data.model[data.model$S=="s1",],C="r1",xlim=c(0,2),main="s1")
plot.cell.density(data.cell=data.model[data.model$S=="s2",],C="r2",xlim=c(0,2),main="s2")
# Profiles are broad for variability parameters and a little
# off for the sz variability parameter in particular.
# NOTE: This can be very slow!
par(mfrow=c(2,3)); ylim = c(200,475)
profile.dmc("a",.9,1.1,p.vector,data.model,ylim=ylim)
profile.dmc("v",.5,1.5,p.vector,data.model,ylim=ylim)
profile.dmc("z",.45,.55,p.vector,data.model,ylim=ylim)
profile.dmc("sv",.1,2,p.vector,data.model,ylim=ylim)
profile.dmc("sz",.1,.45,p.vector,data.model,ylim=ylim)
profile.dmc("t0",.14,.16,p.vector,data.model,ylim=ylim)
# Note if you look at ddm.R you will see a function "bad" in the
# likelihood.dmc function (see also tutorial 1_5). It enforces these bounds by
# setting the likelihood to zero when these constraints are breached.
# You will note that there are other bounds enforced, including some a
# little inside the theoretically allowable bounds in order to avoid numerical
# problems in the C code that performs the numerical integration to get the
# ddm likelihood. It can sometimes have problems with the extreme estimates that
# can occur during sampling. Note that the set implemented in "bad" may
# sometimes need augmentation by editing the ddm.R script.
# Here we use beta(1,1) (i.e., uniform) priors on the z and sz parameters
# (so they are bounded in the 0-1 range, recall both are defined RELATIVE to a),
# the same for t0 (to keep it greater than zero, which it must be by definition,
# and < 1 to impose prior knowledge that values larger than 1 second are very
# unlikley in most applications) and finally truncated normal priors
# on the remaining parameters (to enforce a > 0, sv > 0 and t0 > 0) with broad
# bounds otherwise but not unbounded in order to avoid numerical issues, e.g.
# very large a or very large or small v). These choices might vary in other
# applications, and interact with the likelihood restrictions described above.
p.prior <- prior.p.dmc(
dists = c("tnorm","tnorm","beta","tnorm","beta","beta"),
p1=c(a=1,v=0,z=1,sv=1,sz=1,t0=1),
p2=c(a=1,v=2,z=1,sv=1,sz=1,t0=1),
lower=c(0,-5,NA,0,NA,NA),
upper=c(2, 5,NA,2,NA,NA)
)
par(mfcol=c(2,3)); for (i in names(p.prior)) plot.prior(i,p.prior)
# Parameters of the DDM are more strongly correlated than those of the LNR
# hence longer burnin and more stuck chains are to be expected, so try a
# similar longer burnin as for LBA.
samples <- samples.dmc(nmc=400,p.prior,data.model)
samples <- run.dmc(samples, report = 25, cores=4,p.migrate=.05)
plot.dmc(samples,pll.chain=TRUE,layout=c(1,3))
plot.dmc(samples)
plot.dmc(samples,pll.chain=TRUE,start=300,layout=c(1,3))
plot.dmc(samples,start=300)
# Looks good, get 500 without migration.
samples1 <- run.dmc(samples.dmc(nmc=500,samples=samples),cores=4,report=50)
plot.dmc(samples1,pll.chain=TRUE)
plot.dmc(samples1)
# R-hat shows whole series not fully converged, but it doesn't look to be moving
gelman.diag.dmc(samples1)
# Point est. Upper C.I.
# a 1.10 1.15
# v 1.10 1.16
# z 1.10 1.15
# sz 1.22 1.42
# sv 1.12 1.18
# t0 1.12 1.18
#
# Multivariate psrf
#
# 1.19
# Effective size is still fairly small, so probably just need a longer series.
effectiveSize.dmc(samples1)
# a v z sz sv t0
# 339 314 344 292 326 291
samples2 <- run.dmc(samples.dmc(nmc=500,samples=samples1,add=TRUE),
cores=4,report=25)
plot.dmc(samples2)
# Now close to converged, sz is a little tardy.
gelman.diag.dmc(samples2)
# Point est. Upper C.I.
# a 1.04 1.06
# v 1.04 1.07
# z 1.04 1.05
# sz 1.14 1.23
# sv 1.04 1.06
# t0 1.05 1.08
#
# Multivariate psrf
#
# 1.14
effectiveSize.dmc(samples2)
# a v z sz sv t0
# 594 573 625 486 562 526
# Quite autocorrelted
acf.dmc(samples2)
# Looks like we might need an even longer series. Lets try this with automatic
# instead so we don’t have to figure it out manually.
samples.auto <- samples.dmc(nmc=100,p.prior,data=samples$data)
# Takes 4 cycles of 100 to get rid of stuck chains
samples1.auto <- run.unstuck.dmc(samples.auto,p.migrate=.05,verbose=TRUE)
# 10 20 30 40 50 60 70 80 90 100
# ...
# Bad chains: 8 7 1 13 5 4 6 9 18
# ...
# Bad chains: 13 18 6 1 12 16 11 14 5
# ...
# Bad chains: 18 15
# ...
# Bad chains: None
# Then sample to convergence and a minimum effectiveSize of 500. Takes 1300 to
# get there and near the end Rhat is not monotonic decreasing.
samples2.auto <- run.converge.dmc(samples1.auto,minN=500,nmc=50,max.try=50,verbose=TRUE)
# 110 120 130 140 150
# [1] "N = 150 Multivariate psrf achieved = 1.33899146633459"
# ...
# [1] "N = 1100 Multivariate psrf achieved = 1.10805186781507"
# 1110 1120 1130 1140 1150
# [1] "N = 1150 Multivariate psrf achieved = 1.11406650045681"
# 1160 1170 1180 1190 1200
# [1] "N = 1200 Multivariate psrf achieved = 1.11669803801465"
# 1210 1220 1230 1240 1250
# [1] "N = 1250 Multivariate psrf achieved = 1.10591229677169"
# 1260 1270 1280 1290 1300
# [1] "Final multivariate psrf = 1.09652788931662"
# Effective sample size
# a v z sz sv t0
# 716 669 772 508 732 591
# Looking in more detail we see sz is still the highest. However, this is
# probably OK, Gelman et al. (2004) recommendation in the BDA text was that
# values < 1.2 are OK.
gelman.diag.dmc(samples2.auto)
# Point est. Upper C.I.
# a 1.03 1.05
# v 1.03 1.05
# z 1.03 1.04
# sz 1.11 1.17
# sv 1.03 1.05
# t0 1.06 1.09
#
# Multivariate psrf
#
# 1.1
# Good number of samples, but strong autocorrelation
acf.dmc(samples2.auto)
# Confirm that no chains are stuck
pick.stuck.dmc(samples2.auto,cut=10,verbose=TRUE)
# Tabled estimates show good recovery, although sz in particular has a wide CI.
summary.dmc(samples2.auto)
# 2.5% 25% 50% 75% 97.5%
# a 0.99093 1.0014 1.0080 1.0137 1.0260
# v 0.95276 0.9972 1.0225 1.0480 1.0965
# z 0.49555 0.4980 0.4993 0.5007 0.5031
# sz 0.06808 0.2363 0.2806 0.3147 0.3673
# sv 0.83226 0.9890 1.0625 1.1372 1.2720
# t0 0.14657 0.1497 0.1511 0.1522 0.1539
# Good updating although sv and sz less so than the other parameters
plot.dmc(samples2.auto,p.prior=p.prior)
# Strong correlations but not as strong as LBA
pairs.dmc(samples2.auto)
# Sample posterior predictive to check fit
pp <- post.predict.dmc(samples2.auto)
# Good fit to pdf
plot.pp.dmc(pp)
# And cdf
plot.pp.dmc(pp,"cdf")
# Posterior predictive p value for robust skew (rather slow!)
ppp.dmc(samples2.auto,plot.density=TRUE,
fun=function(data) diff(diff(quantile(data$RT,probs=c(.25,.5,.75)))) )
# [1] 0.278
# save_data (p.vector,data,samples,samples1,samples2,
# samples1.auto,samples2.auto,pp,file="dmc_3_4.RData")
|
008834b588d495ab72cbebbf06fae9ba1cc45276
|
010c833ed30f99f9b8f7fb840f9333d8471bc80d
|
/Code_plot1.R
|
46434e3a4489de32bd2b37ef2a852040043dee20
|
[] |
no_license
|
ArianeBiais/ExData_Plotting1
|
8a7ca43359a751e7fe0c1508c71a23f55dab0d6c
|
3afa24e042a625b938aeec262b9dbdfa425de3d7
|
refs/heads/master
| 2022-12-07T09:09:14.406845
| 2020-08-11T11:38:05
| 2020-08-11T11:38:05
| 286,572,035
| 0
| 0
| null | 2020-08-10T20:26:48
| 2020-08-10T20:26:47
| null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
Code_plot1.R
|
## Read the txt file and transform it into a data frame
data<-readLines("household_power_consumption.txt")
data2<-strsplit(data,";")
## Name the columns correctly
data2<-do.call(rbind.data.frame,data2)
colnames(data2)<-data2[1,]
data2<-data2[-1,]
## Convert the Date variable to Date classe in R
data2$Date<-as.Date(data2$Date,"%e/%m/%Y")
## Extract only the subsets from the dates 2007-02-01 and 2007-02-02
data3<-data2[data2$Date=="2007-2-2",]
data4<-data2[data2$Date=="2007-2-1",]
finaldata<-rbind(data4,data3)
## Create a new columns with Date + Time with the correct format
finaldata$DateTime<-paste(finaldata$Date,finaldata$Time)
library(lubridate)
finaldata$DateTime<-ymd_hms(finaldata$DateTime)
#Creating the first plot
finaldata$Global_active_power<-as.numeric(finaldata$Global_active_power)
hist(finaldata$Global_active_power,xlab="Global Active Power (kilowatts)",ylab="Frequency",main="Global Active Power", col="red",ylim=c(0,1200))
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
|
fd21fb567d95022e1eaec46b3f4084814d744f75
|
da255c56d1636f2c630c5885403030f0d0b57899
|
/man/confirm.Rd
|
d4032d467a3525d7683d929427dc03fd544d5c76
|
[] |
no_license
|
donaldRwilliams/hypMuVar
|
593fd72a978c879481514a34cb9ea02026e64033
|
dc09aaef8c4d649bdeabf5583088f5ae0b901cc2
|
refs/heads/master
| 2020-08-01T06:01:16.419033
| 2020-01-10T01:12:26
| 2020-01-10T01:12:26
| 210,891,469
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 375
|
rd
|
confirm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confirm.R
\name{confirm}
\alias{confirm}
\title{S3 \code{confirm} method}
\usage{
confirm(object, hyp, ...)
}
\arguments{
\item{object}{\code{melsm} object}
\item{hyp}{hypothesis to test}
\item{...}{currently ignored}
}
\description{
S3 \code{confirm} method
}
\seealso{
\code{\link{confirm}}
}
|
4eaa1d9ee71f8131124cf7bdcb51524590defb86
|
c4dbff372e59bc873f0b9b636c4c9a3efe2d1cfb
|
/app.R
|
fe4dbe587c6775fa6891eea9025ace7456b0ffb7
|
[] |
no_license
|
sambeettiady/cricket_ipl
|
424909afebebf5b729594bc8ec7c607bce0bc0d8
|
a3a08c7f4b3b3383ea9290c2dbbf9dd3afcbfbef
|
refs/heads/master
| 2020-03-30T03:36:56.683121
| 2018-09-28T07:05:10
| 2018-09-28T07:05:10
| 150,697,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,546
|
r
|
app.R
|
rm(list =ls())
library(shiny)
library(dplyr)
library(readr)
library(ggplot2)
library(jpeg)
source('ipl_2016_fantasy.R')
player_data = read_csv(file = 'ipl_players.csv')
player_data = player_data[player_data$team != 'Not Playing',]
players = unique(player_data$player)
ui = fluidPage(fluidRow(column(width = 3,selectInput(inputId = 'player',label = 'Choose Player',choices = players)),
column(width = 3,selectInput(inputId = 'analysis_type',label = 'Choose Analysis Type',choices = c('Batting','Bowling')))),
title = 'IPL Player Analysis',
fluidRow(
column(width = 2,imageOutput("Player_Photo",width = "150px",height = "150px")),
column(width = 2,textOutput(outputId = 'team')),
column(width = 2,textOutput(outputId = 'role')),
column(width = 2,textOutput(outputId = 'foreign')),
column(width = 2,textOutput(outputId = 'batting_style')),
column(width = 2,textOutput(outputId = 'bowling_style'))
),
navbarPage(
title = 'Player Performance',
tabPanel(title = 'Overall', DT::dataTableOutput('Overall_Performance')),
tabPanel(title = 'Innings No.', DT::dataTableOutput('innings')),
tabPanel('Vs Teams', DT::dataTableOutput('team_wise')),
tabPanel('All Stadiums', DT::dataTableOutput('stadium_wise')),
tabPanel('Batting/Bowling Type',DT::dataTableOutput('type')),
tabPanel('Season/Year', DT::dataTableOutput('season')),
tabPanel('Batting Position', DT::dataTableOutput('batting_position')),
tabPanel('Over Category', DT::dataTableOutput('over_category')),
tabPanel('Vs Batsman/Bowler', DT::dataTableOutput('vs_batsman_or_bowler')),
tabPanel('Cumulative',
fluidRow(column(width = 2,conditionalPanel(condition = "input.analysis_type == 'Batting'",
radioButtons("batting", "Variable to Plot:",
c("Runs" = "cum_runs",
"Average" = "cum_batting_avg",
"Strike Rate" = "strike_rate",
"Fours" = "cum_fours",
"Sixes" = "cum_sixes"))),
conditionalPanel(condition = "input.analysis_type == 'Bowling'",
radioButtons("bowling", "Variable to Plot:",
c("Wickets" = "cum_wickets",
"Average" = "cum_bowling_avg",
"Strike Rate" = "cum_strike_rate",
"Economy Rate" = "cum_economy_rate",
"Dot Ball Percentage" = "cum_dot_ball_pct")))),
column(width = 10,plotOutput('cumulative'))))
),
fluidRow(textOutput(outputId = 'creator'))
)
server = function(input,output){
output$Player_Photo = renderImage({
filename = normalizePath(file.path('./player_photos',paste(input$player, '.jpg', sep='')))
list(src = filename,width = 150,height = 150)
}, deleteFile = FALSE)
output$team = renderText(expr = paste('Team:',player_data$team[player_data$player == input$player]))
output$role = renderText(paste('Player Role:',player_data$role[player_data$player == input$player]))
output$batting_style = renderText(paste('Batting:',player_data$batting_hand[player_data$player == input$player]))
output$bowling_style = renderText(paste('Bowling:',player_data$bowling_hand[player_data$player == input$player],player_data$bowling_style[player_data$player == input$player]))
output$foreign = renderText(paste('Overseas Player:',ifelse(player_data$foreign_player[player_data$player == input$player] == 1,'Yes','No')))
output$Overall_Performance <- DT::renderDataTable(expr =
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player),options = list(pageLength = 1,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player),options = list(pageLength = 1,searching = F,paging = F),rownames = F)}
)
output$innings <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'innings_no'),options = list(pageLength = 2,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'innings_no'),options = list(pageLength = 2,searching = F,paging = F),rownames = F)}
)
output$team_wise <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'bowling_team'),options = list(pageLength = 15,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'batting_team'),options = list(pageLength = 15,searching = F,paging = F),rownames = F)}
)
output$stadium_wise <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'stadium'),options = list(pageLength = 10),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'stadium'),options = list(pageLength = 10),rownames = F)}
)
output$type <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'bowling_style'),options = list(pageLength = 10,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'batting_hand'),options = list(pageLength = 2,searching = F,paging = F),rownames = F)}
)
output$season <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'season_year'),options = list(pageLength = 10),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'season_year'),options = list(pageLength = 10),rownames = F)}
)
output$batting_position <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'batting_position'),options = list(pageLength = 11,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'batting_position'),options = list(pageLength = 11,searching = F,paging = F),rownames = F)}
)
output$over_category <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'over_category'),options = list(pageLength = 4,searching = F,paging = F),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'over_category'),options = list(pageLength = 4,searching = F,paging = F),rownames = F)}
)
output$vs_batsman_or_bowler <- DT::renderDataTable(
if(input$analysis_type == 'Batting'){DT::datatable(get_player_batting_performance(player = input$player,overall = F,grouping_var_string = 'bowler'),options = list(pageLength = 10),rownames = F)
}else if(input$analysis_type == 'Bowling'){DT::datatable(get_player_bowling_performance(player = input$player,overall = F,grouping_var_string = 'batsman'),options = list(pageLength = 10),rownames = F)}
)
output$cumulative <- renderPlot(expr =
if(input$analysis_type == 'Batting'){
cum_plot = ggplot(data = get_player_batting_performance(player = input$player, overall = F, cumulative_avg = T))
cum_plot = cum_plot + geom_point(mapping = aes_string(x = "match_index", y = input$batting)) + xlab('Innings No.')
cum_plot
}else if(input$analysis_type == 'Bowling'){
cum_plot = ggplot(data = get_player_bowling_performance(player = input$player, overall = F, cumulative_avg = T))
cum_plot = cum_plot + geom_point(mapping = aes_string(x = "match_index", y = input$bowling)) + xlab('Innings No.')
cum_plot
}
)
output$creator = renderText(expr = 'Created By: Sambeet Tiady, Email: sambeet.tiady@gmail.com')
}
shinyApp(ui = ui,server = server)
|
fcc5de496e55dc59b7d568660ebd9a4421317778
|
2811cadbc4eea45d9cb7585057ff2672367213ff
|
/R/parse.R
|
fbd312d38ef54da9e86fcf1ab612be95d587844f
|
[
"MIT"
] |
permissive
|
coolbutuseless/minihtml
|
c7ee63fe67a394ca1d2e17587d6786c3d743ffcf
|
f5fbeb90936934782fc31b7a58698eba93816fe2
|
refs/heads/master
| 2020-07-28T19:42:23.725014
| 2020-03-07T01:07:38
| 2020-03-07T01:07:38
| 209,514,838
| 19
| 2
|
MIT
| 2020-03-07T01:07:39
| 2019-09-19T09:31:17
|
R
|
UTF-8
|
R
| false
| false
| 2,410
|
r
|
parse.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Recursively parse an XML2 node tree into a `minixml` document
#'
#' This uses 'xml2' package to do the parsing.
#'
#' @param xml2_node root node of a document or an element node
#' @param as_document whether or not to make the root node a document node. Otherwise
#' just create an element node.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parse_inner <- function(xml2_node, as_document = TRUE) {
name <- xml2::xml_name(xml2_node)
attrs <- xml2::xml_attrs(xml2_node)
children <- xml2::xml_contents(xml2_node)
if (as_document) {
doc <- html_doc(name = name)
} else {
doc <- html_elem(name = name)
}
do.call(doc$update, as.list(attrs))
child_nodes <- lapply(children, function(x) {
if (xml2::xml_name(x) == 'text' && length(xml2::xml_attrs(x)) == 0) {
as.character(x)
} else {
parse_inner(x, as_document = FALSE)
}
})
do.call(doc$append, child_nodes)
doc
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Parse HTML text or file into an HTMLDocument or HTMLElement
#'
#' @param x,encoding,...,as_html,options options passed to \code{xml2::read_xml()}
#'
#' @return XMLDocument or XMLElement
#' @export
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parse_html_doc <- function(x, encoding='', ..., as_html = FALSE,
options = c('RECOVER', 'NOERROR', 'NOBLANKS')) {
if (!requireNamespace("xml2", quietly = TRUE)) {
stop("parse_html_doc(): need 'xml2' installed to read HTML", call. = FALSE)
}
xml2_node <- xml2::read_xml(x=x, encoding=encoding, ..., options=options)
parse_inner(xml2_node, as_document = TRUE)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' @rdname parse_html_doc
#' @export
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parse_html_elem <- function(x, encoding='', ..., as_html = FALSE,
options = c('RECOVER', 'NOERROR', 'NOBLANKS')) {
if (!requireNamespace("xml2", quietly = TRUE)) {
stop("parse_html_elem(): need 'xml2' installed to read HTML", call. = FALSE)
}
xml2_node <- xml2::read_xml(x=x, encoding=encoding, ..., options=options)
parse_inner(xml2_node, as_document = FALSE)
}
|
766540eb200687c6ef52dfdef7ad079486359506
|
eaff08fd993bf809dfabfbe46a3c9874d75ffcf4
|
/src/blog-posts/delta_method.R
|
8efe32302935bc74064ecf259e44f9ef125cde2b
|
[] |
no_license
|
aartimalik/test
|
55491349f17c51e0392dca857c2a8c2c16e62897
|
47e0ef4c0936ca41c3963b511cdfe691e346b188
|
refs/heads/master
| 2023-04-24T17:39:37.319471
| 2021-05-02T16:21:01
| 2021-05-02T16:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,725
|
r
|
delta_method.R
|
library(data.table)
library(lubridate)
library(ggplot2)
library(gridExtra)
# Load data ====================================================================
# https://finance.yahoo.com/quote/TSLA/history
dt = fread("https://query1.finance.yahoo.com/v7/finance/download/TSLA?period1=1321444326&period2=1610832766&interval=1d&events=history")
dt[, lClose := log(Close)]
# Show why log transformation is useful ========================================
lm_1 = lm(Close ~ Date, data=dt)
lm_2 = lm(lClose ~ Date, data=dt)
r2_1 = summary(lm_1)$r.squared
r2_2 = summary(lm_2)$r.squared
theme_blog = theme_bw() +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(), # This and the next line: left and bottom border
axis.line.x = element_line(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.position = c(0.2, 0.8)
)
g1 = ggplot(dt, aes(x=Date, y=Close)) +
geom_line() +
geom_smooth(data=dt, formula="y~x", method="lm", col="red", se=FALSE, fullrange=TRUE) +
theme_blog +
annotate("text", x = dt$Date[200], y = 700, label = paste0("R2=",round(r2_1,3)), size=8)
g2 = ggplot(dt, aes(x=Date, y=lClose)) +
geom_line() + geom_smooth(data=dt, formula="y~x", method="lm", col="red", se=FALSE, fullrange=TRUE) +
theme_blog +
annotate("text", x = dt$Date[200], y = log(500), label = paste0("R2=",round(r2_2,3)), size=8)
ggsave(file.path(.github,
"harveybarnhard.github.io/public/img/blog",
"delta-method/tsla.svg"),
plot = g1,
device ="svg",
width = 13/1.1, height=4/1.1, units="in")
ggsave(file.path(.github,
"harveybarnhard.github.io/public/img/blog",
"delta-method/log-tsla.svg"),
plot = g2,
device ="svg",
width = 13/1.1, height=4/1.1, units="in")
# Predict Price ================================================================
dt_new = data.table(
Date = seq(ymd('2020-01-17'),ymd('2030-01-16'),by='days')
)
predictions = predict(lm_2, newdata=dt_new, interval="prediction")
predictions = data.table(predictions)
fit_dollars_lin = predict(lm_1, newdata=dt_new)
dt_new = cbind(dt_new, predictions, fit_dollars_lin)
g3 = ggplot() +
geom_line(data=dt, mapping=aes(x=Date, y=lClose)) +
geom_line(data=dt_new, mapping=aes(x=Date, y=fit), linetype = "dashed") +
geom_ribbon(data=dt_new, mapping=aes(x=Date, ymin=lwr, ymax=upr),
alpha=0.2) +
theme_blog
ggsave(file.path(.github,
"harveybarnhard.github.io/public/img/blog",
"delta-method/log-tsla-predict.svg"),
plot = g3,
device ="svg",
width = 13/1.1, height=4/1.1, units="in")
# Transform standard errors ====================================================
# Calculate standard error of prediction
dt_new[, std_err := (upr-lwr)/(2*1.96)]
dt_new[, fit_dollars := exp(fit)]
# Naive method
dt_new[, std_err_naive := (exp(fit+std_err) - exp(fit-std_err))/2]
dt_new[, lwr_naive := fit_dollars - 1.96*std_err_naive]
dt_new[, upr_naive := fit_dollars + 1.96*std_err_naive]
# Delta method
dt_new[, std_err_delta := exp(fit)*std_err]
dt_new[, lwr_delta := fit_dollars - 1.96*std_err_delta]
dt_new[, upr_delta := fit_dollars + 1.96*std_err_delta]
# Calculate annual growth rate =================================================
date_present = ymd('2020-01-15')
date_future = ymd('2030-01-15')
present = dt[Date==date_present]$Close
future = dt_new[Date==date_future]$fit_dollars
growth = (future/present)^(1/9) - 1
future_naive = dt_new[Date==date_future]$std_err_naive
future_delta = dt_new[Date==date_future]$std_err_delta
# Create plot comparing methods ================================================
g4 = ggplot() +
geom_line(data=dt, mapping=aes(x=Date, y=Close)) +
geom_line(data=dt_new, mapping=aes(x=Date, y=fit_dollars), linetype = "dashed") +
geom_ribbon(data=dt_new, mapping=aes(x=Date, ymin=lwr_naive, ymax=upr_naive, color=paste0("Naive Method: $", round(future)," ($", round(future_naive), ")")),
alpha=0.2) +
geom_ribbon(data=dt_new, mapping=aes(x=Date, ymin=lwr_delta, ymax=upr_delta, color=paste0("Delta Method: $", round(future)," ($", round(future_delta), ")")),
alpha=0.2) +
theme_blog +
xlim(c(ymd('2019-01-17'), ymd('2030-01-17'))) +
scale_colour_manual("Prediction and Standard Error on January 15, 2030", values=c("blue", "red"))
ggsave(file.path(.github,
"harveybarnhard.github.io/public/img/blog",
"delta-method/tsla-predict.svg"),
plot = g4,
device ="svg",
width = 13/1.1, height=6/1.1, units="in")
|
5c38d126d0a5d78b632daced5ff0af4c5587d391
|
a63d084f3359fb975735abbeef66048e20e3a7a4
|
/CodeAnalysisRW/callTree_Analysys.r
|
b09fd032d1a308aca5f3480ef7613fab5144aa54
|
[] |
no_license
|
ric1280/VC-Market
|
54c2ff6a4d2e4752ae092fbce0cf2ebd6955d4b5
|
428bd637415117ec1fcac81f2dacb32e6148bf85
|
refs/heads/master
| 2021-06-26T18:32:30.029584
| 2019-03-28T02:24:27
| 2019-03-28T02:24:27
| 134,711,610
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,864
|
r
|
callTree_Analysys.r
|
library(doParallel)
library(stringr)
registerDoParallel()
source("draw-tree.R")
doubleCheckfFunction <- function(parentFunction, functionName) {
fbody <- capture.output(eval(parse(text = paste("body(",parentFunction, ")"))))
pattern <- paste(functionName, "\\(", sep = "")
hasParticularFunctionTotal <- str_detect(fbody, pattern)
hasParticularFunction <- grepl('TRUE', toString(hasParticularFunctionTotal))
return(hasParticularFunction)
}
findFunctionNamespace <- function(f , type="TOTAL") {
f <- gsub(" ", "", f)
names<-find(paste(f))
if(type=="TOTAL") {
n <- c()
for(namespace in rev(names)) {
if(namespace == ".GlobalEnv") {
fName <- namespace
} else {
fName <- strsplit( namespace[[1]], ":")
fName <- fName[[1]][2]
}
n <- c(n, fName)
}
fNamespace <-paste(n, sep="", collapse=";")
} else {
namespace <- tail(names, 1)
if(namespace == ".GlobalEnv") {
fNamespace <- namespace
}else {
fName <- strsplit( namespace[[1]], ":")
fNamespace <- fName[[1]][2]
}
}
return(fNamespace)
}
structureTreeDF <- function(f.expression, enablePrint=FALSE) {
if(enablePrint) {cat("\n\n========== function:", f.expression, " ===========\n")}
function.body <- eval(parse(text = paste("body(", f.expression, ")")))
stuffsx <- as.character(function.body)
DF <- data.frame(FunctionName=character(), TreeAspect=character(), Namespace=character(), AllNamespaces=character(), ComposedTree = character(), FunctID=character())
print(length(stuffsx))
if(length(stuffsx) > 8) {
x <- foreach(majorElement = stuffsx) %dopar% {
if(majorElement!="{" ){
try(
{
sintaxTree <- call_tree_mod(parse(text=majorElement))
outputAST <- capture.output(cat(sintaxTree, "\n"))
callTreeAnalysis(f.expression, DF, outputAST, enablePrint)
}, silent=TRUE)
}
}
for(df in x) {
if(is.data.frame(df)){
DF <- rbind(DF, df)
}
}
} else {
for (majorElement in stuffsx) {
if(majorElement!="{" ){
try(
{sintaxTree <- call_tree_mod(parse(text=majorElement))
outputAST <- capture.output(cat(sintaxTree, "\n"))
updatedDF <- callTreeAnalysis(f.expression, DF, outputAST, enablePrint)
DF <- updatedDF}, silent=TRUE)
}
}
}
#kable(DF)
orderedDF <- DF[rev(order(DF$FunctionName)),]
return(orderedDF)
}
callTreeAnalysis <- function(parentFunction, DF, outputAST, enablePrint) {
majorTreeLevel <- 0 #represents the identation level in the new tree, generated in this script
identationLevel <- 0 #represents the identation level in the tree generated by the "call_tree()" function
previousTreeLevel <- 0 #saves the previous tree identation level, so that a behavior can be interpreted when moving on in the tree
expressionsList <- list() #saves vectors of aspect (nameofexpression, currentIdentationLevel, majorTreeLevel), each time a new major identation occurs
lastIfTreeLevel <- 0 #used to identify the "Else If" and "Else" expressions, since they appear with a identation value of only one above the respective "If"
insideNewIfClause <- FALSE
insideNewElseIf <- FALSE
insideNewElse <- FALSE
insideIfArgumentsLookup <- FALSE # used to identify if the "If" is processing the arguments or not. (arguments must have the same majorTreeLevel than the respective if)
insideIfBodyLookup <- FALSE
elseExpressionSuspect <- FALSE # used to suspect of an "Else" expression when there is a reduce in the identation Level and appears a "{" bracket right away
lastForTreeLevel <- 0
insideForArgumentsLookup <- FALSE # used to identify if the "For" is processing the arguments or not. (arguments must have the same majorTreeLevel than the respective if)
previousTreeElement <- ""
printData <- c()
for(line in outputAST) {
lineSplit <- unlist(strsplit(line, "\\| "))
# ------------------------------------------------------------------------------------------------
#COUNTS THE NUMBER OF SPACES BEFORE THE SEPARATOR AND DIVIDES IT BY 2 TO FIND THE IDENTATION LEVEL
currentTreeLevel <- nchar(lineSplit[1])/2
treeElement <- lineSplit[2]
treeElement <- gsub(" ", "", treeElement)
if(is.na(treeElement)) {
treeElement = "MISSING"
currentTreeLevel <- previousTreeLevel}
treeAspect <- c()
# -------------------------------------------------------------
# IF IS FUNCTION, THEN SAVES THE TREE_ELEMENT IN THE DATA FRAME
if(isFunction(treeElement) && isMeaningful(treeElement)){ #these two depend from where the callTree_Analysis.R is called, and what it has imported
if(previousTreeElement == "()" || doubleCheckfFunction(parentFunction,treeElement)) {
for(element in expressionsList) {
if (length(expressionsList) > 0){
treeAspect <- c(treeAspect,head(element,1))
} else {
treeAspect <- c(treeAspect,"")
}
}
collapsedTree <- paste(treeAspect, collapse = " / ")
composedTree <- paste(c(treeAspect, treeElement), collapse = " / ")
majorFNamespace <- findFunctionNamespace(treeElement, type="MAJOR")
fNamespaces <- findFunctionNamespace(treeElement, type="TOTAL")
functID <- paste(c(treeElement, majorFNamespace), collapse = "_")
DF <- rbind(DF, data.frame(FunctionName = treeElement, TreeAspect = collapsedTree, Namespace = majorFNamespace, AllNamespaces=fNamespaces, ComposedTree = composedTree, FunctID = functID ))
}
}
if(treeElement == "if") {
if((currentTreeLevel - lastIfTreeLevel) == 1 && lastIfTreeLevel != 0 ){
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "<< ELSEIF >>","\n"))}
insideNewElseIf <- TRUE
} else {
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "<< IF >>","\n"))}
insideNewIfClause <- TRUE
}
lastIfTreeLevel <- currentTreeLevel
insideIfArgumentsLookup <- TRUE
elseExpressionSuspect <- FALSE
} else if(treeElement == "for") {
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "<< FOR >>","\n"))}
lastForTreeLevel <- currentTreeLevel
insideForArgumentsLookup <- TRUE
expressionsList[[ length(expressionsList) + 1 ]] <- c(treeElement, currentTreeLevel, majorTreeLevel)
elseExpressionSuspect <- FALSE
} else if(insideIfArgumentsLookup) {
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "arg: ", treeElement,"\n"))}
# ----------------------------------------------------------------------------------------------------------------
# IDENTIFIES WHEN DOES THE ARGUMENT ANALYSIS PHASE ENDS, MEANING, IT FINDS WHEN THE ACTUAL BODY OF THE "IF" STARTS
if((currentTreeLevel < previousTreeLevel && currentTreeLevel == lastIfTreeLevel) || (treeElement != "()" && previousTreeElement == "if")){
insideIfArgumentsLookup <-FALSE
if(insideNewIfClause) {
expressionsList[[ length(expressionsList) + 1 ]] <- c("if", currentTreeLevel, majorTreeLevel)
insideNewIfClause <- FALSE
} else if (insideNewElseIf) {
expressionsList[[ length(expressionsList) + 1 ]] <- c("elseif", currentTreeLevel - 1, majorTreeLevel)
insideNewElseIf <- FALSE
}
majorTreeLevel <- majorTreeLevel + 1
}
elseExpressionSuspect <- FALSE
} else if(insideForArgumentsLookup) {
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "arg: ", treeElement,"\n"))}
if((currentTreeLevel - lastForTreeLevel)==1 && treeElement == "{"){ #currentTreeLevel < previousTreeLevel &&
insideForArgumentsLookup <-FALSE
majorTreeLevel <- majorTreeLevel + 1
}
elseExpressionSuspect <- FALSE
} else if (treeElement == "{" && elseExpressionSuspect) {
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), "<< ELSE >>","\n"))}
expressionsList[[ length(expressionsList) + 1 ]] <- c("else", currentTreeLevel - 1, majorTreeLevel)
majorTreeLevel <- majorTreeLevel + 1
elseExpressionSuspect <- FALSE
} else{
elseExpressionSuspect <- FALSE
if(enablePrint) {printData <- c(printData,paste(currentTreeLevel,"|\t", str_dup(" ", majorTreeLevel), treeElement,"\n"))}
expressionsList_size <- length(expressionsList)
if (expressionsList_size > 0) {
lastElement_expressionsList <- expressionsList[[expressionsList_size]]
expressionName_LastElement <- head(lastElement_expressionsList,3)[1]
treeLevel_LastElement <- head(tail(lastElement_expressionsList,2),1)
majorTreeLevel_LastElement <- tail(lastElement_expressionsList,1)
#if the last treelevel element was an else if, it means that the tree level is going to be unmatched.
#This happens because the "elseif" element is saved with a tree level less than it actually is, so it matches the "if" tree level
if(expressionName_LastElement == "elseif") {
treeLevel_LastElement <- as.integer(treeLevel_LastElement) + 1
}
# gets the last treeLevelExpression that apeared in the code.
# If the new value is smaller than the previous tree level, means that there was an expression
if(currentTreeLevel < previousTreeLevel && as.numeric(currentTreeLevel) <= as.numeric(treeLevel_LastElement)) {
if(enablePrint) {printData <- c(printData,paste("close\n"))}
# ------------------------------------------------------------------------------------------------
# WHILE THE DIFFERENCE BETWEEN THE TREE LEVEL FROM THE LAST ELEMENT AND THE CURRENT TREE LEVEL
#IS GREATER THAN ONE, IT CONTINUES TO CLOSE THE TREE ELEMENTS OF THE LIST
while(abs(as.numeric(treeLevel_LastElement) - as.numeric(currentTreeLevel)) > 1){
expressionsList[[length(expressionsList)]] <- NULL
expressionsList_newsize <- length(expressionsList)
if (expressionsList_newsize == 0) {
break
}
lastElement_expressionsList <- expressionsList[[expressionsList_newsize]]
treeLevel_LastElement <- head(tail(lastElement_expressionsList,2),1)
majorTreeLevel_LastElement <- tail(lastElement_expressionsList,1)
}
if(length(expressionsList) == 0) {
majorTreeLevel <- 0
} else {
majorTreeLevel <- as.numeric(majorTreeLevel_LastElement)
expressionsList[[length(expressionsList)]] <- NULL
elseExpressionSuspect <- TRUE
}
}
}
}
previousTreeLevel <- currentTreeLevel
previousTreeElement <- treeElement
}
if(enablePrint) {printData <- c(printData,paste("\n###########################finish section #####################################\n\n\n"))}
if(enablePrint) {cat(printData, "\n")}
return(DF)
}
|
22365af47e342df64611007c699826d8792031e0
|
16a1d7ec1187e32b80364c10ddc4a106bf810948
|
/man/probeR.parallel.plot.Rd
|
346fc7d94a6570edeb565e49aa57b78b83d68ae8
|
[] |
no_license
|
cran/ProbeR
|
74a3cf933bbd35dd120375c093db58a30995d15f
|
18996406185c33abaf87074a48a26d7552e623cb
|
refs/heads/master
| 2016-09-03T07:28:37.670093
| 2009-03-01T00:00:00
| 2009-03-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
rd
|
probeR.parallel.plot.Rd
|
\name{probeR.parallel.plot}
\alias{probeR.parallel.plot}
\title{ Parallel coordinate plot of summary value and probe level data}
\description{
Parallel coordinate plot of summary value and probe level data is very useful to explore
the probe level data with summary value gene by gene.
}
\usage{
probeR.parallel.plot(affy.ID,data.summary, data.probe)
}
\arguments{
\item{affy.ID}{affymetrix id}
\item{data.summary}{ the summary values from the function exprs }
\item{data.probe}{the normalized probe level data. They should be normalized using the same
method for the summary values. Also they should be the same probe level data calculating
the summary values. If the user use RMA with PM values, this should be only PM values.}
}
\references{ Using Reliability with Gene Expression Models }
\author{Eun-Kyung Lee, Dianne Cook, Heike Hofmann, Maneesha Aluru, and Steve Rodermel }
\seealso{ \code{\link{probeR}}, ~~~ }
\examples{
library(affy)
data(affybatch.example)
eset<-expresso(affybatch.example,bg.correct=FALSE,normalize.method="quantiles",pmcorrect.method="pmonly",summary.method="medianpolish")
data.summary<-exprs(eset)
probe.norm<-normalize.AffyBatch.quantiles(affybatch.example,type="pmonly")
data.probe<-log(probes(probe.norm),base=2)
probeR.parallel.plot("D78156_at",data.summary,data.probe)
}
\keyword{ models }% at least one, from doc/KEYWORDS
\keyword{ univar }% at least one, from doc/KEYWORDS
|
e5f6352b2c4e7556593330247b6e7a483dd71aad
|
76434d63930c563cb9bab7d263df2c80da04cb6f
|
/man/mixnorm.Rd
|
5d27222de32a860403bfaf02afcb64c4429d25f0
|
[] |
no_license
|
cran/bda
|
45de77f9d513cbeea00fc34120308f1d37dd2fd0
|
b7cc310ed8ce18c2327f99647f024727e28e59dd
|
refs/heads/master
| 2023-06-22T14:56:20.682683
| 2023-06-18T21:40:09
| 2023-06-18T21:40:09
| 17,694,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,144
|
rd
|
mixnorm.Rd
|
% This is a template R document file
% Distributed under GPL 3 or later
\name{mnorm}
\alias{pmnorm}
\alias{dmnorm}
\alias{qmnorm}
\alias{rmnorm}
\title{The mixed normal distribution}
\description{
Density, distribution function, quantile function and random
generation for the normal mixture distribution with means equal to
'mu' and standard deviations equal to 's'.}
\usage{
dmnorm(x,p,mean,sd)
pmnorm(q,p,mean,sd)
qmnorm(prob,p,mean,sd)
rmnorm(n,p,mean,sd)
}
\arguments{
\item{x,q}{vector of quantiles in dmixnorm and pmixnorm.
In qmixnorm, 'x' is a vector of probabilities.}
\item{p}{proportions of the mixture components.}
\item{prob}{A vector of probabilities.}
\item{n}{number of observations. If 'length(n) > 1', the length is
taken to be the number required.}
\item{mean}{vector of means}
\item{sd}{vector of standard deviations}
}
\value{Return the density, probability, quantile and random value, respectively.}
\examples{
p <- c(.4,.6)
mu <- c(1,4)
s <- c(2,3)
dmnorm(c(0,1,2,20),p,mu,s)
pmnorm(c(0,1,2,20),p,mu,s)
qmnorm(c(0,1,.2,.20),p,mu,s)
rmnorm(3,p,mu,s)
}
\keyword{distribution}
|
89322cb64a6c06291cdf12023315ddfaab2aa640
|
2940f0f81e966f8608939f078ff2e437f4cd7f80
|
/man/get_diff_hic.Rd
|
53dc2c5524a807b6a57f2393f1263dd8ce67addc
|
[] |
no_license
|
duydnguyen/TreeHiC
|
a1becedd1f67cd78d7ba607dc26e0cbec95a6dfc
|
7775e527cffaadf8609fe6b50dfd21df770b1a19
|
refs/heads/master
| 2021-09-13T09:35:21.232017
| 2018-04-16T12:11:16
| 2018-04-16T12:11:16
| 123,157,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 468
|
rd
|
get_diff_hic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{get_diff_hic}
\alias{get_diff_hic}
\title{Title}
\usage{
get_diff_hic(testingTree, alpha, use_adjusted_pvals = TRUE)
}
\arguments{
\item{testingTree}{a slot \code{testingTree} of object \code{treeHiCDataSet}}
\item{alpha}{FDR control at \code{alpha} level}
\item{use_adjusted_pvals}{Logical, default to TRUE if adjusted pvalues are used}
}
\description{
Title
}
|
c097f5c999dccb94e86c3d832ba7fea9b2c640b2
|
b8e6b20dcb26f9227773a9f560f4c26702ce7ce7
|
/scripts/adventure.R
|
17e3d788deab38b62bc5a3cbdf0a95e76576f0ec
|
[] |
no_license
|
rugnepal/nepal_tourism_dashboard
|
28bc757a6b77b917ec824c8f960d0d587c61e354
|
099a173ab77b45956fa1464804c2aaa4dafdc755
|
refs/heads/master
| 2022-08-25T22:25:03.432275
| 2020-03-07T13:47:36
| 2020-03-07T13:47:36
| 245,492,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,362
|
r
|
adventure.R
|
# Adventure Data
National_park_df <- read_excel("data/61. NUMBER OF FOREIGN VISITORS TO NATIONAL PARKS _ CONSERVATION AREAS, 2070_71- 074_75.xlsx",
col_types = c("text", "numeric", "numeric",
"numeric", "numeric", "numeric")) %>% clean_names()
names(National_park_df) <- c("protected_area","2070_71","2071_72","2072_73","2073_74","2074_75")
national_park_arrange_df <- arrange(National_park_df,-`2074_75`)
National_park_df<- head(national_park_arrange_df,5)%>% pivot_longer(-protected_area,names_to = "fiscal_year",values_to = "count")
National_park_df_chart <- hchart(National_park_df, "line", hcaes(x = fiscal_year, y = count, group = protected_area))
# Treking Charts
trek_df <- read_excel("data/58. NUMBER OF TREKKERS IN DIFFERENT TREKKING AREA, 2001-2018..xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric")) %>% clean_names()
new_trek_df <- trek_df%>% select(-t_valley,-narphu,-others,-total)%>%tail(5)%>%pivot_longer(-year,names_to = "trekking_route",values_to = "count")
trek_df_chart <- hchart(new_trek_df, "line", hcaes(x = year, y = count, group = trekking_route))
trek_df_chart
|
e08e174dd01047185f92f3dd4749a6ef70e7d320
|
7e4b3ad15e8ea1fbaeff4905be68940b6b182c75
|
/Plot2.R
|
2b69e800900f3d9056a178d5f4a2759b5d8b5f59
|
[] |
no_license
|
JorgenUnger/ExData_Plotting1
|
6d1388e21619ee96fa0b09fec163ab06f457a757
|
801e3e271202f31a96922de4044d3fb756fa77ca
|
refs/heads/master
| 2020-12-30T17:10:36.022141
| 2017-05-12T12:52:05
| 2017-05-12T12:52:05
| 91,059,116
| 0
| 0
| null | 2017-05-12T06:32:21
| 2017-05-12T06:32:20
| null |
UTF-8
|
R
| false
| false
| 1,257
|
r
|
Plot2.R
|
#If file "household_power_consumption.txt" not exist then download zip file if it doesent exist. Then unzip file.
if(!file.exists("household_power_consumption.txt")){
if(!file.exists("exdata_data_household_power_consumption.zip"))
{
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="exdata_data_household_power_consumption.zip",method="auto")
}
unzip("exdata_data_household_power_consumption.zip")
}
#Read file
hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na = "?")
#Subset rows with date 2007-02-01 or 2007-02-02
hpc <- hpc[(hpc$Date == "1/2/2007" | hpc$Date == "2/2/2007"),]
#Create new column with DateTime format based on column Date and Time
hpc$datetime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
#Create Plot2
png(filename = "Plot2.png", width = 480, height = 480, units = "px", bg = "transparent")
#If not English local settings, you may need to uncomment a run Sys.setlocale below to get x labels correct.
#Sys.setlocale("LC_ALL","English")
with(hpc, plot(datetime, Global_active_power, type="l", xlab="", ylab = "Global Active Power (kilowatts)"))
#Sys.setlocale("LC_ALL")
dev.off()
|
66f604e7af0684303ed30a911c47c05f81d59483
|
9969b02c26fa5388ac971b8212c761c6abf98efb
|
/man/plotbsSpline.Rd
|
d816da2257c83ac1ac829de42503531dced2e6c1
|
[] |
no_license
|
tmcd82070/CAMP_RST
|
0cccd7d20c8c72d45fca31833c78cd2829afc169
|
eca3e894c19936edb26575aca125e795ab21d99f
|
refs/heads/master
| 2022-05-10T13:33:20.464702
| 2022-04-05T21:05:35
| 2022-04-05T21:05:35
| 10,950,738
| 0
| 0
| null | 2017-05-19T20:42:56
| 2013-06-25T21:24:52
|
R
|
UTF-8
|
R
| false
| true
| 2,129
|
rd
|
plotbsSpline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.bs.spline.r
\name{plotbsSpline}
\alias{plotbsSpline}
\title{plotbsSpline}
\usage{
plotbsSpline(X, fit, beg.x, end.x, eff0, option, bd2, ...)
}
\arguments{
\item{X}{The basis b-spline matrix resulting from a call to function
\code{bs}.}
\item{fit}{The generalized linear model resulting from a call to function
\code{glm}.}
\item{beg.x}{The POSIX-formatted start date to use for plotting.}
\item{end.x}{The POSIX-formatted end date to use for plotting.}
\item{eff0}{The default data frame containing efficiency trials for a
particular \code{TrapPositionID}. Typically \code{tmp.df}.}
\item{option}{The plotting option. Value \code{option=1} incorporates logic
surrounding spline knots, while \code{option=2} does not.}
\item{bd2}{The \code{batchDate2} column, typically from data frame \code{df},
containing the \code{batchDate}s mapped to the 1959-1960 spline paradigm.}
\item{...}{Additional plotting control.}
}
\value{
A plot of the fitted cubic spline, its originating data points, and
the knots utilized to achive the fit.
}
\description{
Plot the results of fitting a temporal b-spline.
}
\details{
Function \code{plotbsSpline} simply organizes all the pieces
necessary to graph the prediction cubic piecewise polynomial resulting from
the use of a b-spline. It plots not only the (necessarily) smooth spline,
but also the original points used to estimate it. It also plots all knots,
i.e., both boundary and interior. It calculates the prediction via matrix
multiplication of the provided matrix basis \eqn{X} and the vector of
parameter \eqn{\beta} coefficients from object \code{fit}.
This function is customized for use with the CAMP project, and will not
work for splines derived from data originating elsewhere without
modification.
}
\examples{
\dontrun{
# ---- Plot results from an efficiency model. Note that no parameter
# ---- is provided for argument bd2 (batchDate2).
plotbsSpline(X,fit,beg.x,end.x,tmp.df)
}
}
\seealso{
\code{F.efficiency.model.enh}
}
\author{
WEST Inc.
}
|
9ec3a15f3e40e0c4bdd7a7179c61660f74ca5082
|
a37122475660395c7306c661f8baa33421228a75
|
/man/PIRequestTemplate.Rd
|
f5aa4de4b3e84ca9846445ebab42deecc7d28398
|
[
"Apache-2.0"
] |
permissive
|
eddyrene/PI-Web-API-Client-R
|
726b1edbea0a73bf28fe9b2f44259972ddecd718
|
7eb66c08f91e4a1c3a479a5fa37388951b3979b6
|
refs/heads/master
| 2020-04-17T01:01:27.260251
| 2018-11-14T10:48:46
| 2018-11-14T10:48:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
PIRequestTemplate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PIRequestTemplate.r
\name{PIRequestTemplate}
\alias{PIRequestTemplate}
\title{Generate an instance of the PIRequestTemplate PI Web API class}
\usage{
PIRequestTemplate(resource = NULL)
}
\arguments{
\item{resource}{(string)}
}
\value{
PIRequestTemplate
}
\description{
Generate an instance of the PIRequestTemplate PI Web API class
}
\examples{
requestTemplate <- PIRequestTemplate(resource = "")
}
|
54d404ba1a5c45c7b9fc9ac9391cd700a3097efe
|
c712c1fbe217789cbd473ce5fefd493fb11349a7
|
/man/bhlm.traceplot.Rd
|
68fc7a918c447a4c09a6b7ece1af97c792291638
|
[] |
no_license
|
arcuo/BHLM_package
|
cacd613853fa6bf0814c05fae28da3fa48f6f32b
|
b8e14bd6cd8cb5d0ccd379606b72582433c33ae9
|
refs/heads/master
| 2020-03-08T20:13:27.745950
| 2019-03-03T22:09:22
| 2019-03-03T22:09:22
| 128,376,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 670
|
rd
|
bhlm.traceplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bhlm_plots.R
\name{bhlm.traceplot}
\alias{bhlm.traceplot}
\title{Show trace plots for outcomes}
\usage{
bhlm.traceplot(bhlm_object, outcome_options = NULL, return_plots = FALSE)
}
\arguments{
\item{bhlm_object}{Object returned from \code{bhlm}, of class \code{bhlm_object}.}
\item{outcome_options}{Character vector specifying which outcomes should be plotted.
Defaults to all outcome options from \code{bhlm_object@outcome_options}.}
\item{return_plots}{Return ggplot objects in \code{list}.}
}
\description{
Plot outcomes with object from \code{bhlm}.
}
\author{
Hugh Benjamin Zachariae
}
|
33bf8e1c2f601885b459be99d9da5e3d333ebe4d
|
99c9c5e797c5b6220a94748cb56f647fcac28e43
|
/R/ftrf.R
|
e046ba2364d9f344f99939c80dd38dfb0124eb8e
|
[] |
no_license
|
cran/Bhat
|
c1f27d30235b41043d364bdb77a503503e7f4374
|
6eba3bcdb4b6c18138028e23b033b9b7bb87e302
|
refs/heads/master
| 2022-05-22T16:39:32.685891
| 2022-05-10T12:30:02
| 2022-05-10T12:30:02
| 17,678,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 851
|
r
|
ftrf.R
|
##' Generalized logit transform
##'
##' maps a bounded parameter x onto the real line according to
##' y=log((x-xl)/(xu-x))), with xl < x < xu. If this constraint is violated, an
##' error occurs. x may be vector
##'
##'
##' @param x a numeric vector
##' @param xl a numeric vector of same length as x with x > xl
##' @param xu a numeric vector of same length as x with x < xu
##' @return returns numerical vector of transforms
##' @author E. Georg Luebeck (FHCRC)
##' @seealso \code{\link{btrf}}
##' @keywords optimize misc
##' @export
##'
"ftrf" <-
function(x,xl,xu) {
#### forward transformation
#### this assumes logit transformations of the parameters
#### bounded from below by xl and from above by xu
if(any((x-xl) <= 0)) stop('ftrf requires x > xl')
if(any((xu-x) <= 0)) stop('ftrf requires x < xu')
return(log((x-xl)/(xu-x)))
}
|
1cc76d52e4623b43a3af5978c500cb116a6d449f
|
45387729fd6ee661a7b9d852a515124832c880c2
|
/R/mymult.R
|
9097fa2b7b902d167edd9a8abbaffa5f06594e25
|
[] |
no_license
|
lindseydeluga/MATH4753P1
|
bc2716552c7375b3a551d271fb833db74e4a5aad
|
e503fa129a39e4d67662abe3b3029c96ad8eb553
|
refs/heads/master
| 2022-11-12T08:32:23.498811
| 2020-06-29T21:40:00
| 2020-06-29T21:40:00
| 265,069,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
mymult.R
|
#' mymult
#'
#' creates a barplot of relative frequencies
#'
#' @param iter 100
#' @param n Sample Size
#' @param p Probability
#'
#' @return A barplot
#' @export
#'
#' @examples
mymult=function(iter=100,n=10, p=c(1,1,1,1)/4){
# make a matrix to hold the samples
#initially filled with NA's
sam.mat=matrix(NA,nr=n,nc=iter, byrow=TRUE)
#The number of categories is k
k=length(p)
# Make a matrix that will hold the frequencies in each sample
tab.mat=matrix(NA,nr=k,nc=iter, byrow=TRUE)
for(i in 1:iter){
#Fill each column with a new sample
sam.mat[,i]=sample(1:k,n,replace=TRUE, prob=p)
#Collect all the frequencies of each of the k values
tab.mat[,i]=table(factor(sam.mat[,i],levels=1:k))
}
# sum the frequecies
freq=apply(tab.mat,1,sum)
# put names to them
names(freq)=1:k
#create a barplot of refative freq
barplot(freq/(n*iter),col=rainbow(k) )
tab.mat
}
mymult(iter=1000,n=10,p=c(1,2,3,4,2)/12)
|
6bf3b732e00fc4eece0f31b05d219c56511e9d25
|
cad506b49106d267e20433fca31963a7a4218e42
|
/man/mGetIntersecting.Rd
|
471cb53e75ffdbec64f6aaca35d2d4a27c4bda0b
|
[] |
no_license
|
townleym/mSpatial
|
929f674e1dacd148c912504b3d7270cfc7d41d91
|
9e7d790813f779806736f1fba5b8341827ce329a
|
refs/heads/master
| 2020-05-21T23:13:22.945779
| 2018-02-07T19:34:02
| 2018-02-07T19:34:02
| 59,143,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,200
|
rd
|
mGetIntersecting.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mSpatial.R
\name{mGetIntersecting}
\alias{mGetIntersecting}
\title{Return polygons enclosed within a container (Deprecated)}
\usage{
mGetIntersecting(container, reference)
}
\arguments{
\item{container}{A \code{SpatialPolygons} or \code{SpatialPolygonsDataFrame} object of a single, container polygon}
\item{reference}{A \code{SpatialPolygons} or \code{SpatialPolygonsDataFrame} object with the values to be summarized within a containing polygon}
\item{centroids}{A set of centroids for the reference polygons obtained by \code{gCentroids}}
}
\description{
Kinda like \code{gEnclosedArea}
}
\details{
Returns all the reference polygons from a SpatialPolygonsDataFrame that intersect with a container. Adds an attribute to the @data slot with the percent of overlapping area.
This function uses name matching / extraction. It's a kludge and often fails. use \code{gPolyByIntersect} instead. Preserved for existing code that uses it.
\strong{Note}: all inputs should be of class \code{sp::SpatialPolygons|Points}
}
\examples{
gPolyByCentroid(blockgroups, sp::gCentroid(blockgroups, byid = T), cbsa)
}
\keyword{spatial}
|
dccc6fba5ee3e3400290bd96010f1190609e374b
|
02bfa480aff5ef40adbebae46b4eecd7046a3f6d
|
/app.R
|
ed59d85bc3cfc7d81fe8fc76deb8d13d71296e1d
|
[] |
no_license
|
mfoos/shinyresumedemo
|
652110ca710e019f169ca929871ff5c8662f2382
|
510185a045fdcc8b55537fc8602ef7e972ee7df4
|
refs/heads/master
| 2020-05-25T00:03:24.621531
| 2017-04-27T00:59:20
| 2017-04-27T00:59:20
| 84,892,424
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,405
|
r
|
app.R
|
library(shiny)
library(rmarkdown)
shslines <- c("Increased student return rates by 50%",
"Reached out to troubled populations",
"Stepped in to ensure student well-being")
dblmtlines <- c("Employee of the month - March 2002",
"Promoted to grill after one day",
"Closed with 100% drawer accuracy")
watcherslines <- c("Died twice",
"Earned inaugural Class Protector award",
"Saved the world. A lot.")
skills <- c("Microsoft Office",
"Research",
"Mentoring",
"Bullying prevention")
ui <- fluidPage(
titlePanel("Shiny Resume Builder"),
sidebarLayout(
sidebarPanel(
checkboxInput("incl_address", "Include contact info", value = TRUE),
checkboxGroupInput("employers", "Choose employers to include:",
choices = c("Doublemeat Palace", "Sunnydale High School", "Watchers Council"),
selected = c("Doublemeat Palace", "Sunnydale High School", "Watchers Council")),
uiOutput("choose_emp3"),
uiOutput("choose_emp"),
uiOutput("choose_emp2"),
selectizeInput("skills", "Choose skills:", choices = skills,
multiple = TRUE, options = list(plugins = list('drag_drop'))),
checkboxInput("incl_orgs", "Include organizations", value = TRUE),
radioButtons("format", "Output format:",
choices = c("HTML" = "html_document",
"PDF" = "pdf_document",
"Word" = "word_document")),
actionButton("goknit", "I am the plan")
),
mainPanel(
uiOutput("buttonappear"),
tableOutput("preview"),
tableOutput("skills")
)
)
)
server <- function(input, output) {
reportdone <- eventReactive(input$goknit, {
render("bsummers_resume.Rmd",
output_format = isolate(input$format),
params = list(
shs_strings = isolate(input$shs),
dblmt_strings = isolate(input$dblmt),
watcher_strings = isolate(input$wc),
incl_address = isolate(input$incl_address),
incl_orgs = isolate(input$incl_orgs),
skills = isolate(input$skills)))
})
output$buttonappear <- renderUI({
reportdone()
downloadButton("knitdoc", "It's ready!")
})
output$knitdoc <- downloadHandler(
filename = function(){
ext <- switch(isolate(input$format),
"html_document" = ".html",
"pdf_document" = ".pdf",
"word_document" = ".docx")
paste0("bsummers_resume", ext)
},
content = function(file){
file.copy(reportdone(), file, overwrite = TRUE)
}
)
output$preview <- renderTable({
if (length(input$employers) > 0){
rlist <- NULL
if (!(is.null(input$wc))){
rlist <- c(rlist, c("WATCHERS COUNCIL", input$wc))
}
if (!(is.null(input$shs))){
rlist <- c(rlist, c("SUNNYDALE HIGH", input$shs))
}
if (!(is.null(input$dblmt))){
rlist <- c(rlist, c("DOUBLEMEAT PALACE", input$dblmt))
}
data.frame("Employers" = rlist)
}
})
output$skills <- renderTable({
data.frame("Skills" = input$skills)
})
output$choose_emp <- renderUI({
if ("Sunnydale High School" %in% input$employers){
selectizeInput("shs", "Choose accomplishments for Sunnydale High School:", choices = shslines,
multiple = TRUE, options = list(plugins = list('drag_drop')))
}
})
output$choose_emp2 <- renderUI({
if ("Doublemeat Palace" %in% input$employers){
selectizeInput("dblmt", "Choose accomplishments for Doublemeat Palace:", choices = dblmtlines,
multiple = TRUE, options = list(plugins = list('drag_drop')))
}
})
output$choose_emp3 <- renderUI({
if ("Watchers Council" %in% input$employers){
selectizeInput("wc", "Choose accomplishments for Watchers Council:", choices = watcherslines,
multiple = TRUE, options = list(plugins = list('drag_drop')))
}
})
output$test <- renderText(
input$format
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
85d6590bd613131a4391d4c4d6423a2eb0379470
|
2ec9bbe01050d4a41609b665d790a3d9b683a2af
|
/Customize Axis.R
|
6bc62027bcc201374d80cc5c1786f31cde68479e
|
[
"MIT"
] |
permissive
|
krishnakesari/Data-Visualization-R
|
e90c25a1aee83f8c4c39c2479726e913e259cb82
|
8f43d2b110c2484188fe3e5bd780da5138454470
|
refs/heads/master
| 2022-11-24T05:16:44.584483
| 2020-07-23T01:16:27
| 2020-07-23T01:16:27
| 274,914,592
| 0
| 0
|
MIT
| 2020-07-10T15:21:05
| 2020-06-25T12:48:32
|
R
|
UTF-8
|
R
| false
| false
| 254
|
r
|
Customize Axis.R
|
# Add x-axis and y-axis labels, and a title to sp2
sp2 %>%
layout(
xaxis = list(title = " "),
xaxis2 = list(title = "Year"),
yaxis = list(title = "Global Sales (M units)"),
yaxis2 = list(title = "Global Sales (M units)")
)
|
1a5964884a42919887663049608390a32c4de296
|
8d7a31292817d4f1a39df17487cbe97ea6bc0a6f
|
/R/utilperm.R
|
71a9911157e45ff59fc654784496112c06290921
|
[] |
no_license
|
cran/FrF2
|
497fb84bf3db0d020e6184a3bf1571729c411d1d
|
d671685a92d3b9447a0ee76d89ae13aec2d3566c
|
refs/heads/master
| 2023-08-22T23:49:38.210713
| 2023-08-15T11:00:08
| 2023-08-15T12:44:22
| 17,691,780
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,839
|
r
|
utilperm.R
|
permutations <- function(n){
## from package e1071
if (n == 1)
return(matrix(1))
else if (n < 2)
stop("n must be a positive integer")
z <- matrix(1)
for (i in 2:n) {
x <- cbind(z, i)
a <- c(1:i, 1:(i - 1))
z <- matrix(0, ncol = ncol(x), nrow = i * nrow(x))
z[1:nrow(x), ] <- x
for (j in 2:i - 1) {
z[j * nrow(x) + 1:nrow(x), ] <- x[, a[1:i + j]]
}
}
dimnames(z) <- NULL
z
}
invperm <- function(perm){
sort(perm,index.return=TRUE)$ix
}
reord <- function(hilf, perm){
## reorder matrix of binary numbers
## generating factors are isomorphic with binary numbers
## A=1, B=10, C=100, D=1000, usw.
## AB=A+B=11 usw.
## column numbers for reordered generating factors can be obtained by
## switching digit positions
## reord does this and prepares the resulting matrix for calculating the column numbers
## with package sfsmisc
aus <- hilf[nrow(hilf):1,,drop=FALSE][perm,,drop=FALSE][nrow(hilf):1,,drop=FALSE]
class(aus) <- "basedInt"
attr(aus,"base") <- 2
aus
}
getNext <- function(perm){
## function for next permutation in lexicographical order
## adapted from http://www.cut-the-knot.org/do_you_know/AllPerm.shtml
## provided by Alexander Bogomolny, based on Dijkstra 1997 p.71
N <- length(perm)
i <- N
while (perm[i-1] >= perm[i] & i>2) i <- i-1
j <- N+1
while (perm[j-1] <= perm[i-1] & j>2) {
j <- j-1
}
## swap values at positions (i-1) and (j-1)
perm[c(i-1, j-1)] <- perm[c(j-1, i-1)]
i <- i+1; j <- N+1
while (i < j)
{
perm[c(i-1, j-1)] <- perm[c(j-1, i-1)]
i <- i+1; j <- j-1
}
perm
}
|
c4dec07b0ed0a9beefa408359834ebfe34782ca2
|
3b9fe0b9da2ce6951908b983c5bfeeafb4d11d4f
|
/ui.R
|
a8f4591a136ed0eb60ce77bc2302dc7682870066
|
[] |
no_license
|
carlosespino11/marginalizationApp
|
1b81a0c216a5cc9d7051f74207390f29ce8af4d2
|
e047373c30ae72cd129d5aee4cf5824f2f5d74d6
|
refs/heads/master
| 2021-01-20T04:32:27.313321
| 2015-08-10T20:44:32
| 2015-08-10T20:44:32
| 40,504,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,264
|
r
|
ui.R
|
library(shiny)
library(leaflet)
library(ggthemes)
source("utils.R")
exploratoryPanel = sidebarLayout(
sidebarPanel(
selectInput("var",
label = c("Choose a variable to display"),
choices = varNames),
p("To create a marginalization index click the 'Create Index' tab. "),
p("To cluster states by similarity click the 'Clusterize' tab. ")
),
mainPanel(
h3(textOutput("mapCaption")),
leafletOutput("map")
)
)
pcaPanel = sidebarLayout(
sidebarPanel(
p("Now we are going to create an marginalization index. This is a single number for each state that summarizes its marginalization variables."),
p("A common way to do this is to perform a principal component analysis and use the first component as the index."),
checkboxGroupInput("pcaVars",
label = c("Select variables to include in the index"),
choices = varNames,
selected = varNames)
),
mainPanel(
h3("Marginalization Index"),
leafletOutput("indexMap")
)
)
clusteringPanel = sidebarLayout(
sidebarPanel(
p("Now we are going to group states into groups. The objective is to make groups such that states in the same group are more similar to each other than to those in other groups."),
p("The clustering algorithm that we are going to use is k-means."),
selectInput("numClust",
label = c("Choose the desired number of clusters"),
choices = 1:7,
selected = 3)
),
mainPanel(
h3("State groups"),
leafletOutput("clusterMap")
)
)
appPanel <- fluidPage(
tabsetPanel(type = "tabs",
tabPanel("Explore", exploratoryPanel),
tabPanel("Create Index", pcaPanel),
tabPanel("Clusterize", clusteringPanel)
)
)
overviewPanel <- fluidPage(
mainPanel(
tags$p(class="lead", "The purpose of this app is to make an analysis of social exclusion (marginalization)
in Mexico. Marginalization is defined as a social disadvantage and relegation to the fringe of society.
Social exclusion is the process in which individuals or entire communities of people are systematically
blocked from (or denied full access to) various rights, opportunities and resources that are normally
available to members of a different group, and which are fundamental to social integration within
that particular group."),
tags$p(class="lead", "To make this analysis, we use some commonly used variables to measure marginalization. These are measured by state"),
tags$p(class="lead", "The app has three panels:"),
tags$ul(
tags$li(tags$p(class="lead", tags$strong("Visualize"), "contains tools to visualize the marginalization variables in the map.")),
tags$li(tags$p(class="lead", tags$strong("Create Index"), "contains tools to create a marginalization index using variables chosen by the user.")),
tags$li(tags$p(class="lead", tags$strong("Clusterize"), "contains tools to cluster similiar states into groups."))
),
tags$p(class="lead", "The variables used to measure marginalization in Mexico are:"),
tags$ul(
tags$li(tags$p(class="lead", "% Of illiterate population above 15 years old")),
tags$li(tags$p(class="lead","% Of population above 15 years old without elementary school")),
tags$li(tags$p(class="lead","% Occupants in dwellings without drainage or toilet")),
tags$li(tags$p(class="lead","% Occupants in dwellingss without electricity")),
tags$li(tags$p(class="lead", "% Occupants in dwellings without piped water")),
tags$li(tags$p(class="lead","% Overcrowded dwellings")),
tags$li(tags$p(class="lead", "% Occupants in dwellingss with dirt floor")),
tags$li(tags$p(class="lead","% Population in towns with less than 5000 inhabitants")),
tags$li(tags$p(class="lead","% Employed population with an income less than 2 minimum wages"))
),
tags$p(class="lead", tags$strong("Click on the 'App' button on the navigation bar to begin using the app."))
)
)
ui <- navbarPage("Marginalization in Mexico",
tabPanel("Overview", overviewPanel),
tabPanel("App", appPanel)
)
|
10eb76b93e2779afa4a1976778a4b701d8983bab
|
4d998b49a1541a3827902c9a999c8affa50338c9
|
/Correlations.R
|
71cad59dc4456986df08c8546aea72c3a8b0f7ab
|
[] |
no_license
|
jorditijsseclaase/Assignment
|
177df811c6119331f23cb7b0a4214e06d75907e5
|
d22b9e047a84486a4145934407aad9717b921946
|
refs/heads/master
| 2020-04-24T10:10:28.005256
| 2019-05-15T15:27:31
| 2019-05-15T15:27:31
| 171,884,534
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,749
|
r
|
Correlations.R
|
rm(list=ls())
library(tidyverse)
library(XML)
require(XML)
library(writexl)
library("readxl")
Total <- read_excel("Cleaned_Total.xlsx")
#plot of AQI to users street 30a
ggplot(data = Total) +
geom_point(mapping = aes(x=users_Street30, y=AQI))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
ylim(0,500)+
ggtitle("Users street 30 to AQI")
#plot of PM10 to users street 30
ggplot(data = Total) +
geom_point(mapping = aes(x=users_Street30, y=PM10))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
# ylim(0,500)+
ggtitle("Users street 30 to PM10")
#plot of PM2.5 to users street 30
ggplot(data = Total) +
geom_point(mapping = aes(x=users_Street30, y=PM2.5))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
# ylim(0,500)+
ggtitle("Users street 30 to PM2.5")
#plot of AQI to users street 30
ggplot(data = Total) +
geom_point(mapping = aes(x=avg_Speed, y=AQI))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
ylim(0,500)+
ggtitle("Average speed to AQI")
#plot of NO2 to users street 30
ggplot(data = Total) +
geom_point(mapping = aes(x=users_Street30, y=NO2))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
# ylim(0,500)+
ggtitle("Users street 30 to NO2")
#plot of AQI to average T
ggplot(data = Total) +
geom_point(mapping = aes(x=Avg_T, y=AQI))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
ylim(0,500)+
ggtitle("Avg T to AQI")
#plot of NO2 to average T
ggplot(data = Total) +
geom_point(mapping = aes(x=Avg_T, y=NO2))+
# geom_smooth(mapping = aes(x= Date, y=AQI))+
# ylim(0,500)+
ggtitle("Avg T to NO2")
#plot of Date to users m30
ggplot(data = Total) +
geom_point(mapping = aes(x=users_Street30, y=Date))+
ggtitle("Date to number of users street 30")
|
f74f174bd9fde360e409f9e88a6eb6c9772b8d72
|
1fdd33233f2416a926b903e5494d278ebb208a37
|
/plot4.R
|
320afa120d0e996a550b48d630d974fda753e22e
|
[] |
no_license
|
boizette/ExData_Plotting1
|
032aed7ea0ee22e3fe6b9c531486c67459609fad
|
0906feaaea37928186a5ab8a07d0842928238ee0
|
refs/heads/master
| 2021-05-09T13:51:15.143171
| 2018-02-02T14:36:06
| 2018-02-02T14:36:06
| 119,046,718
| 0
| 0
| null | 2018-01-26T12:00:17
| 2018-01-26T12:00:16
| null |
ISO-8859-1
|
R
| false
| false
| 2,285
|
r
|
plot4.R
|
plot4<-function(){
##Initialisations diverses
##install.packages("data.table")
library(data.table)
Sys.setlocale("LC_TIME", "English")
##Récupération des données
f_url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp<-tempfile()
download.file(f_url,temp)
unzip(temp,"household_power_consumption.txt")
ColNames=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
fic<-read.table("household_power_consumption.txt",stringsAsFactors=FALSE,header=TRUE,col.names=ColNames,sep=";" )
##Filtrage des lignes relatives aux deux premiers jours de février 2007
gic<-fic[between(as.Date(fic$Date,"%d/%m/%Y"),"2007-02-01","2007-02-02",incbounds = TRUE), ]
write.table(gic,file="test.txt")
##Mise en conformité des données en abscisse et ordonnée
x<-strptime(paste(gic$Date,gic$Time),"%d/%m/%Y %H:%M:%S")
##Mise en conformité des données en abscisse et ordonnée
x<-strptime(paste(gic$Date,gic$Time),"%d/%m/%Y %H:%M:%S")
G_A_POW<-as.numeric(gic$Global_active_power)
G_R_POW<-as.numeric(gic$Global_reactive_power)
G_V<-as.numeric(gic$Voltage)
Gsub1<-as.numeric(gic$Sub_metering_1)
Gsub2<-as.numeric(gic$Sub_metering_2)
Gsub3<-as.numeric(gic$Sub_metering_3)
##Traçage graphique
par(mfrow=c(2,2),mar=c(4,4,1,1),oma=c(0,1,0,0))
plot(x,G_A_POW,type="l",ylab="Globale Active Power",xlab="")
plot(x,G_V,type="l",ylab="Voltage",xlab="datetime")
plot(x,Gsub1,type="l",ylim=range(c(Gsub1,Gsub2,Gsub3)),ylab="Energy sub metering",xlab="",col="black")
par(new=TRUE)
plot(x,Gsub2,type="l",ylim=range(c(Gsub1,Gsub2,Gsub3)),axes=FALSE,xlab="",ylab="",col="red")
par(new=TRUE)
plot(x,Gsub3,type="l",ylim=range(c(Gsub1,Gsub2,Gsub3)),axes=FALSE,xlab="",ylab="",col="blue")
##Affichage légende
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n",lty=c(1,1,1),lwd=c(1.5,1.5,1.5),col=c("black","red","blue"),cex=0.65)
plot(x,G_R_POW,type="l",ylab="Globale reactive Power",xlab="datetime",yaxt="n")
ytick<-seq(0,0.5,by=0.1)
axis(side=2,at=ytick,labels=FALSE)
text(par("usr")[1],ytick,labels=ytick,pos=2,xpd=TRUE)
##Sauvegarde graphe dans le fichier PLOT4.PNG
dev.copy(png,file="plot4.png",width=480,height=480)
dev.off()
}
|
b65550fc7791674c87c439c863c6e5b14208f784
|
1b0df2e6b5e1ef6d98ac7408a51d826e1104ef79
|
/man/decrypt_object.Rd
|
94d553a9c7f2719e374d41974f4132ff54a2903a
|
[] |
no_license
|
talegari/safer
|
b7c8d9625180d4dffe058ddbe8ffdd2b0097c78f
|
7e08b0ba5cf783f7672c7ab0b994c31a7641c0b1
|
refs/heads/master
| 2023-02-19T23:03:20.788486
| 2023-02-09T14:31:28
| 2023-02-09T14:31:28
| 88,793,630
| 22
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,331
|
rd
|
decrypt_object.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/object.R
\name{decrypt_object}
\alias{decrypt_object}
\title{Decrypt a object}
\usage{
decrypt_object(object, key = "pass", pkey = NULL)
}
\arguments{
\item{object}{Object to be decrypted}
\item{key}{For symmetric decryption, 'pkey' should be NULL (default) and
'key' can be either a string (Default is 'pass') or a raw object. For
asymmetric decryption, both 'key' (private key of the decrypter) and 'pkey'
(public key of the encrypter) should be raw objects.}
\item{pkey}{See 'key'}
}
\value{
A raw object if \code{ascii} is \code{FALSE}. A string if
\code{ascii} is \code{TRUE}.
}
\description{
\code{encrypt_object} encrypts a R object as a raw object or a
string and \code{decrypt_object} decrypts a raw object or a
string(encrypted by \code{encrypt_object})
}
\examples{
# symmetric case:
temp <- encrypt_object(1:3)
all(
is.raw(temp)
, decrypt_object(temp) == 1:3)
temp <- encrypt_object(iris, ascii = TRUE)
all(
is.character(temp)
, decrypt_object(temp) == iris
, identical(decrypt_object(temp), iris))
rm(temp)
# asymmetric case:
alice <- keypair()
bob <- keypair()
temp <- encrypt_object(1:10, alice$private_key, bob$public_key)
temp2 <- decrypt_object(temp, bob$private_key, alice$public_key)
identical(1:10, temp2)
}
|
b0c946ef310cabc2ae9dc3a6e415ed3fdd38d2cf
|
53f6608a8f31d2aa39fae0e899b144c98818ff54
|
/R/dailyWB_SSURGO.R
|
7f01e0bfcee3198985417c1e0c6b966aa950936d
|
[] |
no_license
|
ncss-tech/sharpshootR
|
4b585bb1b1313d24b0c6428182a5b91095355a6c
|
1e062e3a4cdf1ea0b37827f9b16279ddd06d4d4a
|
refs/heads/master
| 2023-08-20T06:16:35.757711
| 2023-08-08T19:11:52
| 2023-08-08T19:11:52
| 54,595,545
| 18
| 6
| null | 2023-02-24T21:00:28
| 2016-03-23T21:52:31
|
R
|
UTF-8
|
R
| false
| false
| 3,858
|
r
|
dailyWB_SSURGO.R
|
# x: SpatialPoint with single feature
# bufferRadiusMeters: radius in meters
.getSSURGO_at_point <- function(x, bufferRadiusMeters) {
# buffer point (GCS WGS84) before intersection with SSURGO data
x.buff <- sf::st_buffer(x, dist = bufferRadiusMeters)
# simplify, buffer is far too detailed
x.buff <- sf::st_simplify(x.buff, dTolerance = 0.2)
## TODO: consider using BBOX vs actual buffer
# st_as_sfc(st_bbox(x))
# debug: complexity of buffer
# print(nrow(st_coordinates(x.buff)))
# convert to WKT
x.wkt <- sf::st_as_text(sf::st_as_sfc(x.buff))
q <- paste0("SELECT mapunit.mukey, cokey, comppct_r, compkind, compname
FROM
mapunit
JOIN component ON mapunit.mukey = component.mukey
WHERE
majcompflag = 'Yes'
AND mapunit.mukey IN (
SELECT * from SDA_Get_Mukey_from_intersection_with_WktWgs84('", x.wkt, "')
)")
res <- suppressMessages(SDA_query(q))
# only unique instances of each component
cokeys <- unique(res$cokey)
return(cokeys)
}
#' @title Perform daily water balance modeling using SSURGO and DAYMET
#'
#' @description Pending.
#'
#' @param x `sf` object representing a single point
#' @param cokeys vector of component keys to use
#' @param start starting year (limited to DAYMET holdings)
#' @param end ending year (limited to DAYMET holdings)
#' @param modelDepth soil depth used for water balance, see details
#' @param MS.style moisture state classification style, see [`estimateSoilMoistureState`]
#' @param a.ss recession coefficients for subsurface flow from saturated zone, should be > 0 (range: 0-1)
#' @param S_0 fraction of water storage filled at time = 0 (range: 0-1)
#' @param bufferRadiusMeters spatial buffer (meters) applied to `x` for the look-up of SSURGO data
#'
#'
#' @author D.E. Beaudette
#'
#' @references
#'
#' Farmer, D., M. Sivapalan, Farmer, D. (2003). Climate, soil and vegetation controls upon the variability of water balance in temperate and semiarid landscapes: downward approach to water balance analysis. Water Resources Research 39(2), p 1035.
#'
#'
#'
#' @return `data.frame` of daily water balance results
#'
#' @export
#'
dailyWB_SSURGO <- function(x, cokeys = NULL, start = 1988, end = 2018, modelDepth = 100, MS.style = 'default', a.ss = 0.1, S_0 = 0.5, bufferRadiusMeters = 1) {
# required packages
if(!requireNamespace('daymetr', quietly=TRUE) |
!requireNamespace('elevatr', quietly=TRUE) |
!requireNamespace('sf', quietly=TRUE) |
!requireNamespace('Evapotranspiration', quietly=TRUE)
) {
stop('this function requires the following packages: daymetr, elevatr, Evapotranspiration', call.=FALSE)
}
## TODO: relax constraints: other object types / iteration over features
# sanity checks
stopifnot(inherits(x, 'sf'))
stopifnot(length(x) == 1)
## TODO: this contains a lot more data than we actually need
# get daily input data as list
daily.data <- prepareDailyClimateData(x, start = start, end = end, onlyWB = TRUE)
# use component keys at `x` if none provided
if(is.null(cokeys)) {
cokeys <- .getSSURGO_at_point(x, bufferRadiusMeters = bufferRadiusMeters)
}
# get SSURGO hydraulic data for select components
s <- suppressMessages(
prepare_SSURGO_hydro_data(cokeys = cokeys, max.depth = modelDepth)
)
# extract required variables
vars <- c('compname', 'sat', 'fc', 'pwp', 'corrected_depth')
s <- s$agg[, vars]
## TODO: more flexible / intelligent specification of a.ss and S_0
# * interpretation of drainage class
# * empirical values from SCAN network
# soil-specific thickness and recession coef.
s$thickness <- s$corrected_depth
s$a.ss <- a.ss
# daily water balance and moisture state classification
wb <- dailyWB(s, daily.data, id = 'compname', S_0 = S_0, MS.style = MS.style)
return(wb)
}
|
bfadb3733f72a4866d72461a67d017d0ba811a4a
|
3612f70fda75aad09582c3a3ecd5a6e1acc77b7b
|
/app.R
|
672f8dc8be09887d5655a43a06d5dfe5096da583
|
[] |
no_license
|
lindsayevanslee/whomds_viz
|
d1a55f24f917faa88f08c5efd58b8c7227c0b30d
|
c94e655e67f0aa9f13ef5c5b8941eb1e48337359
|
refs/heads/master
| 2018-12-19T06:11:05.874382
| 2018-09-15T13:43:48
| 2018-09-15T13:43:48
| 116,181,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,551
|
r
|
app.R
|
library(shiny)
library(shinythemes)
library(plyr)
library(tidyverse)
library(broom)
library(whomds)
# Read in the pre-formatted data ----
df_all <- list(srilanka_brief = read_csv("data/srilanka_brief.csv"),
chile_brief = read_csv("data/chile_brief.csv"))
# srilanka_brief <- read_csv("data/srilanka_brief.csv")
# chile_brief <- read_csv("data/chile_brief.csv")
# Load tidy table producing function ----
source("tab_for_plot.R")
# Load plotting function ----
source("mds_barplot.R")
# Save indicator choices input (all countries must have same column labels)
indicator_choices <- list(
"Places where you socialize or engage in community activities" = "BE1",
"Shops, banks and post office" = "BE2",
"Transportation" = "BE3",
"Dwelling including the toilet" = "BE4"
)
# Define UI ----
ui <-
fluidPage(theme = shinytheme("yeti"),
titlePanel("DRAFT MODEL DISABILITY SURVEY VISUALIZATION"),
sidebarLayout(
sidebarPanel(
h3("Options"),
selectInput(
"country",
h5("Country"),
choices = list("Chile" = "chile_brief",
"Sri Lanka" = "srilanka_brief"),
selected = "srilanka_brief"
),
selectInput(
"indicators",
h5("Indicators"),
choices = indicator_choices,
selected = NULL,
selectize = TRUE,
multiple = TRUE
),
checkboxGroupInput(
inputId = "resp_values",
label = "Response options to include in indicator",
choices = c("1" = "1",
"2" = "2",
"3" = "3",
"4" = "4",
"5" = "5"),
selected = c("4", "5")),
h5("Disaggregators"),
radioButtons(
"fill_col",
h6("Fill column"),
choices = list(
"none" = "",
"Sex" = "sex",
"Age group" = "age_cat",
"Disability level" = "performance_cat"
),
selected = NULL
),
radioButtons(
"facet_col",
h6("Facet column (must be different than fill column)"),
choices = list(
"none" = "",
"Sex" = "sex",
"Age group" = "age_cat",
"Disability level" = "performance_cat"
),
selected = NULL
),
checkboxInput(inputId = "weighted",
label = "Use survey weights?",
value = TRUE)
),
mainPanel(
"THE VALUES LISTED HERE ARE A DEMONSTRATION ONLY AND DO NOT REPRESENT ACCURATE DATA GATHERED BY THE WORLD HEALTH ORGANIZATION.",
plotOutput("graph"),
uiOutput("button")
)
)
)
# Define server logic ----
server <- function(input, output) {
#reactive object that selects data set and creates survey object if desired
df_final <- reactive({
if (input$weighted) {
des <- df_all[[input$country]] %>%
gather(key = "q",
value = "resp", !!!rlang::syms(input$indicators)) %>%
mutate(q = ordered(q),
resp = ordered(resp, levels = 1:5)) %>%
as_survey_design(
ids = contains("CLUSTER"),
strata = contains("STRATA"),
weights = contains("WEIGHT"),
nest = TRUE
)
return(des)
} else {
df <- df_all[[input$country]]
return(df)
}
})
#plot------
#create function with plot
myplot <- function() {
if (!is.null(input$indicators)) {
#calculate table
tab <- tab_for_plot(df = df_final(),
cols = input$indicators,
disaggs = c(input$fill_col, input$facet_col),
weighted = input$weighted,
resp_values = as.numeric(input$resp_values))
#print graph
mds_barplot(tab,
fill_col = input$fill_col,
facet_col = input$facet_col,
indicator_choices = indicator_choices)
}
}
#render the plot
output$graph <- renderPlot({
myplot()
})
# make download button appear after data uploaded ----------
output$button <- renderUI({
if (is.null(myplot())) return()
downloadButton("download", "Download plot")
})
#allow user to download plot ---------
output$download <- downloadHandler(
filename = function() { paste0("MDS_PLOT", ".png") },
content = function(file) {
ggsave(file,
plot = myplot(),
device = png(),
width = 7, height = 7,
units = "cm")
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
##FIX: fix how the downloaded plots look
##FIX: make x-axis labels prettier
##FIX: expand list of indicators
|
b8ca0f25daee933bfdaa9b53ab2fb3b8942987d6
|
e6bff3ffd1627d74c1dc5ee0d704c6548e686886
|
/donepezil/prediction model summary.R
|
355480d66ddcb545167774eaff518bb07b04bcff
|
[] |
no_license
|
MikeJSeo/phd
|
465b3f777737fdc873479094587faa0341bc54da
|
5004b90a2b221ed3a3a02ba97802336f1ae8af47
|
refs/heads/master
| 2022-12-17T04:21:08.873245
| 2022-11-19T01:24:32
| 2022-11-19T01:24:32
| 202,161,141
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,901
|
r
|
prediction model summary.R
|
library(jomo) #for imputation
library(mitools) #convenient function to combine imputations
library(dplyr)
library(tidyr)
library(lme4) #linear mixed effects model
library(glmnet)
library(rjags) #for running JAGS
library(ggplot2)
library(coda)
# Load in data
setwd("E:/EST-RA-9140/EQUIPERCENTILE1/Study3_prediction_model")
data <- readRDS('dt_pm.rds')
data <- as_tibble(data)
setwd("E:/EST-RA-9140/EQUIPERCENTILE1/Seo") #change back the directory
source("helpful.functions.R")
##### Primary outcome: ADAS_TRANSFORMED_OBS
study_inclusion <- c(161, 231, 302, 304, 311, 312, 315, 339)
covariates <- c("AGE", "SEX", "WEIGHT", "AP", "AMNOTAP", "ADAS_TRANSFORMED_BASE", "CDR_TRANSFORMED_BASE")
data_cleaned <- clean_data(data, study_inclusion = study_inclusion, Outcome = "ADAS_TRANSFORMED_OBS", covariates = covariates)
data_full <- data_cleaned$data_full
#check how many NAs are in each column:
#checkNA <- data_full %>% summarize_all(~sum(is.na(.)))
#View(checkNA)
data_full <- data_full %>% drop_na(studyid:CDR_TRANSFORMED_BASE) %>% filter(treat == 0)
load("prediction-ADAS-result.RData")
summary(samples) # standardized model result
## for arbitrary patient
## Age 70, Female (SEX = 1), WEIGHT = 60, AP = 0, AMNOTAP = 1, ADAS_TRANSFORMED_BASE = 30, CDR_TRANSFORMED_BASE = 8
arbitrary_patient <- c(70, 1, 60, 0, 1, 30, 8)
arbitrary_patient_standardized <- (arbitrary_patient - data_cleaned$mean_val) / data_cleaned$sd_val
matrix_coefs<- as.matrix(samples[, c("alpha", paste0("beta[", 1:7,"]"))])
matrix_mean <- matrix_coefs %*% c(1,arbitrary_patient_standardized)
matrix_sigma <- as.matrix(samples[,c("sigma")])
ypred <- rnorm(length(matrix_mean), matrix_mean, matrix_sigma)
mean(ypred)
set.seed(1)
quantile(ypred, probs = c(0.025, 0.975))
### unstandardize result
result <- summarize_each_sample(samples)
result2 <- unstandardize_cofficient(result, data_cleaned)
result2_mean <- matrix(apply(result2, 2, mean), nrow = 1)
result2_lower <- matrix(apply(result2, 2, quantile, prob = 0.025), nrow = 1)
result2_upper <- matrix(apply(result2, 2, quantile, prob = 0.975), nrow = 1)
result3 <- rbind(result2_mean, result2_lower, result2_upper)
colnames(result3) <- colnames(result$samples_result)
rownames(result3) <- c("mean", "0.025 quantile", "0.975 quantile")
result3
##### Secondary outcome: CIBIC_PLUS_TRANSFORMED
study_inclusion <- c(161, 231, 302, 304, 311, 312, 315, 339)
covariates <- c("AGE", "SEX", "WEIGHT", "AP", "AMNOTAP", "ADAS_TRANSFORMED_BASE", "CDR_TRANSFORMED_BASE")
data_cleaned <- clean_data(data, study_inclusion = study_inclusion, Outcome = "CIBIC_PLUS_TRANSFORMED", covariates = covariates)
data_full <- data_cleaned$data_full
#check how many NAs are in each column:
#checkNA <- data_full %>% summarize_all(~sum(is.na(.)))
#View(checkNA)
data_full <- data_full %>% drop_na(studyid:CDR_TRANSFORMED_BASE) %>% filter(treat == 0)
load("prediction-CIBIC-result.RData")
summary(samples) # standardized model result
## for arbitrary patient
## Age 70, Female (SEX = 1), WEIGHT = 60, AP = 0, AMNOTAP = 1, ADAS_TRANSFORMED_BASE = 30, CDR_TRANSFORMED_BASE = 8
arbitrary_patient <- c(70, 1, 60, 0, 1, 30, 8)
arbitrary_patient_standardized <- (arbitrary_patient - data_cleaned$mean_val) / data_cleaned$sd_val
matrix_coefs<- as.matrix(samples[, c("alpha", paste0("beta[", 1:7,"]"))])
matrix_mean <- matrix_coefs %*% c(1,arbitrary_patient_standardized)
matrix_sigma <- as.matrix(samples[,c("sigma")])
set.seed(1)
ypred <- rnorm(length(matrix_mean), matrix_mean, matrix_sigma)
mean(ypred)
quantile(ypred, probs = c(0.025, 0.975))
### Unstandardize result
result <- summarize_each_sample(samples)
result2 <- unstandardize_cofficient(result, data_cleaned)
result2_mean <- matrix(apply(result2, 2, mean), nrow = 1)
result2_lower <- matrix(apply(result2, 2, quantile, prob = 0.025), nrow = 1)
result2_upper <- matrix(apply(result2, 2, quantile, prob = 0.975), nrow = 1)
result3 <- rbind(result2_mean, result2_lower, result2_upper)
colnames(result3) <- colnames(result$samples_result)
rownames(result3) <- c("mean", "0.025 quantile", "0.975 quantile")
result3
#######Analyzing Dropout
study_inclusion <- c(161, 231, 302, 304, 311, 315, 339)
covariates <- c("AGE", "SEX", "WEIGHT", "AP", "AMNOTAP", "ADAS_TRANSFORMED_BASE", "CDR_TRANSFORMED_BASE")
data_cleaned <- clean_data(data, study_inclusion = study_inclusion, Outcome = "Dropout", covariates = covariates)
data_full <- data_cleaned$data_full
#check how many NAs are in each column:
#checkNA <- data_full %>% summarize_all(~sum(is.na(.)))
#View(checkNA)
data_full <- data_full %>% drop_na(studyid:y) %>% filter(treat == 0)
load("prediction-Dropout-result.RData")
summary(samples) # standardized model result
## for arbitrary patient
## Age 70, Female (SEX = 1), WEIGHT = 60, AP = 0, AMNOTAP = 1, ADAS_TRANSFORMED_BASE = 30, CDR_TRANSFORMED_BASE = 8
arbitrary_patient <- c(70, 1, 60, 0, 1, 30, 8)
arbitrary_patient_standardized <- (arbitrary_patient - data_cleaned$mean_val) / data_cleaned$sd_val
matrix_coefs<- as.matrix(samples[, c("alpha", paste0("beta[", 1:7,"]"))])
matrix_mean <- matrix_coefs %*% c(1,arbitrary_patient_standardized)
inv_logit <- function(x) {
exp(x) / (1 + exp(x))
}
mean(inv_logit(matrix_mean))
quantile(inv_logit(matrix_mean), probs = c(0.025, 0.975))
#set.seed(1)
#ypred <- rbinom(length(matrix_mean), 1, inv_logit(matrix_mean) )
#mean(ypred)
#quantile(ypred, probs = c(0.025, 0.975))
result <- summarize_each_sample(samples)
result2 <- unstandardize_cofficient(result, data_cleaned, "prediction")
result2_mean <- matrix(apply(result2, 2, mean), nrow = 1)
result2_lower <- matrix(apply(result2, 2, quantile, prob = 0.025), nrow = 1)
result2_upper <- matrix(apply(result2, 2, quantile, prob = 0.975), nrow = 1)
result3 <- rbind(result2_mean, result2_lower, result2_upper)
colnames(result3) <- colnames(result$samples_result)
rownames(result3) <- c("mean", "0.025 quantile", "0.975 quantile")
result3
|
7b06003fbb93597a466f32ee0fcece14f407aacb
|
38116111ccbbb1c4580d8e8c5ac3f9775e1fa384
|
/R/testData.R
|
e9ecc40a14472aebf0c5c2994732b012a3644704
|
[
"MIT"
] |
permissive
|
terminological/tidy-info-stats
|
6c1e37684eeac8d765384b773a23f0488eb7b467
|
1b1f19a718edb44c7178943c322b45fd1e3c93b1
|
refs/heads/master
| 2022-11-30T08:16:46.311945
| 2022-11-18T20:37:21
| 2022-11-18T20:37:21
| 232,600,275
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,838
|
r
|
testData.R
|
# TODO: move this to data_raw & use usethis::use_data
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyUSArrests = function() {
USArrests %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyUSArrestsCooccurrence = function() {
lhs = tidyUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrests = function() {
infotheo::discretize(USArrests) %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrestsCooccurrence = function() {
lhs = tidyDiscreteUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyDiscreteUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the Iris data with features & outcomes
#'
#' @import dplyr
#' @export
tidyIris = function() {
iris %>%
mutate(sample = row_number()) %>%
rename(
Sepal_Length = Sepal.Length,
Sepal_Width = Sepal.Width,
Petal_Length = Petal.Length,
Petal_Width = Petal.Width
) %>%
tidyr::pivot_longer(cols=c(Sepal_Length,Sepal_Width,Petal_Length,Petal_Width), names_to = "feature") %>% rename(outcome = Species)
}
#
# ```{r}
# # devtools::load_all("..")
# testData = bloodResultsSimulation(1000)$data
#
# #### Continuous probability estimation ----
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="SGolay"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# # debug(probabilitiesFromContinuous_SGolay)
# # debug(applySGolayFilter)
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="Kernel"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# ```
missingData = function() {
# start with a defintion for our test data
# feature A is present in 80% of outcome 1; 20% of outcome 2 - there is information in missingness
# feature B is present in 10% of outcome 1; 10% of outcome 2 - there is no information in missingness
# feature C is present in 40% of outcome 1; 20% of outcome 2 - there is information but less than in A
# feature D is present in 100% of outcome 1; 100% of outcome 2 - not missing / no information
missingness = tibble(
feature = c("A","A","B","B","C","C","D","D"),
outcome = c(1,2,1,2,1,2,1,2),
presence = c(0.8,0.2,0.1,0.1,0.4,0.2,1,1)
)
# outcome 1 seen in 60% of cases outcome 2 in 40%
expectedness = tibble(
outcome = c(1,2),
expected = c(60,40)
)
# generate a complete data set with a random value and missingness flag
equivData = expectedness %>% left_join(missingness, by="outcome") %>% group_by(feature,outcome,expected,presence) %>% group_modify(function(d,g,..) {
return(tibble(
value = sample.int(4,size = g$expected, replace = TRUE),
status = c(rep("present",round(g$presence*g$expected)),rep("absent",round((1-g$presence)*g$expected)))
))
}) %>% group_by(feature) %>% arrange(outcome) %>% mutate(sample = c(1:100))
# create test data set with missing values
data = equivData %>% filter(status != "absent")
return(list(missingness= missingness, expectedness = expectedness, data=data,equivData=equivData))
}
|
36f422bd0c4e32255fe32fdbaf1fffa298a1588a
|
6fbc0b4555e94e5dbbb1b6ce58ca2cbd98340b43
|
/Juego catafixia.R
|
8f3f3707f051e8a39d22aa4c41b5561cf869063e
|
[] |
no_license
|
CEAC333/R-repository
|
c6dc9b43c89ffe2a50fd280c32068bdc4a36bac0
|
79d8d2640c6f29cc92f8d1d1d005f1600c6dbe6b
|
refs/heads/master
| 2021-05-13T19:02:53.602809
| 2018-01-08T01:32:00
| 2018-01-08T01:32:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
Juego catafixia.R
|
#programa juego puertas
gano<-0
n<-250
for(i in 1:n){
i
participante<-runif(1)
if(participante<(1/3)){
elegida<-1
}
if(participante>(1/3)&participante<(2/3)){
elegida<-2
}
if(participante>(2/3)){
elegida<-3
}
catafixia<-runif(1)
if(catafixia<(1/3)){
ganadora<-1
}
if(catafixia>(1/3)&catafixia<(2/3)){
ganadora<-2
}
if(catafixia>(2/3)){
ganadora<-3
}
if(ganadora==elegida){
gano<-gano+1
}
}#fin del for
prob_exito<-gano/n
ganoS<-0
n<-250
for(i in 1:n){
i
participanteS<-runif(1)
if(participanteS<(1/3)){
elegidaS<-1
}
if(participanteS>(1/3)&participanteS<(2/3)){
elegidaS<-2
}
if(participanteS>(2/3)){
elegidaS<-3
}
catafixiaS<-runif(1)
if(catafixiaS<(1/3)){
ganadoraS<-1
}
if(catafixia>(1/3)&catafixiaS<(2/3)){
ganadoraS<-2
}
if(catafixiaS>(2/3)){
ganadoraS<-3
}
if(ganadoraS!=elegidaS){
ganoS<-ganoS+1
}
}#fin del for
prob_exitoS<-ganoS/n
|
b568832a56fcf8cb79b2d81243fcbc512cf3f366
|
beb91d0e06e5b260011ea5c55da32ab21bece500
|
/R/goodfit.R
|
7a3e2339f77f5fe5086dbe998af6239dcd53fef5
|
[] |
no_license
|
cran/vcd
|
7169e004f662d4d33305a3b7d1246bba7058b924
|
86cb80436f2a1d4733905710f52c5f630a348cef
|
refs/heads/master
| 2023-02-02T18:55:54.519633
| 2023-02-01T12:22:08
| 2023-02-01T12:22:08
| 17,700,741
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,898
|
r
|
goodfit.R
|
goodfit <- function(x, type = c("poisson", "binomial", "nbinomial"),
method = c("ML", "MinChisq"), par = NULL)
{
if(is.vector(x)) {
x <- table(x)
}
if(is.table(x)) {
if(length(dim(x)) > 1) stop ("x must be a 1-way table")
freq <- as.vector(x)
count <- as.numeric(names(x))
} else {
if(!(!is.null(ncol(x)) && ncol(x) == 2))
stop("x must be a 2-column matrix or data.frame")
freq <- as.vector(x[,1])
count <- as.vector(x[,2])
}
## fill-in possibly missing cells
nfreq <- rep(0, max(count) + 1)
nfreq[count + 1] <- freq
freq <- nfreq
count <- 0:max(count)
n <- length(count)
## starting value for degrees of freedom
df <- -1
type <- match.arg(type)
method <- match.arg(method)
switch(type,
"poisson" = {
if(!is.null(par)) {
if(!is.list(par))
stop("`par' must be a named list")
if(names(par) != "lambda")
stop("`par' must specify `lambda'")
par <- par$lambda
method <- "fixed"
}
else if(method == "ML") {
df <- df - 1
par <- weighted.mean(count,freq)
}
else if(method == "MinChisq") {
df <- df - 1
chi2 <- function(x)
{
p.hat <- diff(c(0, ppois(count[-n], lambda = x), 1))
expected <- sum(freq) * p.hat
sum((freq - expected)^2/expected)
}
par <- optimize(chi2, range(count))$minimum
}
par <- list(lambda = par)
p.hat <- dpois(count, lambda = par$lambda)
},
"binomial" = {
size <- par$size
if(is.null(size)) {
size <- max(count)
warning("size was not given, taken as maximum count")
}
if(size > max(count)) {
nfreq <- rep(0, size + 1)
nfreq[count + 1] <- freq
freq <- nfreq
count <- 0:size
n <- length(count)
}
if(!is.null(par$prob)) {
if(!is.list(par))
stop("`par' must be a named list and specify `prob'")
par <- par$prob
method <- "fixed"
}
else if(method == "ML") {
df <- df - 1
par <- weighted.mean(count/size, freq)
}
else if(method == "MinChisq") {
df <- df - 1
chi2 <- function(x)
{
p.hat <- diff(c(0, pbinom(count[-n], prob = x, size = size), 1))
expected <- sum(freq) * p.hat
sum((freq - expected)^2/expected)
}
par <- optimize(chi2, c(0,1))$minimum
}
par <- list(prob = par, size = size)
p.hat <- dbinom(count, prob = par$prob, size = par$size)
},
"nbinomial" = {
if(!is.null(par)) {
if(!is.list(par)) stop("`par' must be a named list")
if(!(isTRUE(all.equal(names(par), "size")) | isTRUE(all.equal(sort(names(par)), c("prob", "size")))))
stop("`par' must specify `size' and possibly `prob'")
if(!is.null(par$prob)) method <- "fixed"
}
switch(method,
"ML" = {
if(is.null(par$size)) {
df <- df - 2
par <- fitdistr(rep(count, freq), "negative binomial")$estimate
par <- par[1]/c(1, sum(par))
} else {
df <- df - 1
method <- c("ML", "with size fixed")
size <- par$size
xbar <- weighted.mean(count,freq)
par <- c(size, size/(xbar+size))
}
},
"MinChisq" = {
if(is.null(par$size)) {
df <- df - 2
## MM
xbar <- weighted.mean(count,freq)
s2 <- var(rep(count,freq))
p <- xbar / s2
size <- xbar^2/(s2 - xbar)
par1 <- c(size, p)
## minChisq
chi2 <- function(x)
{
p.hat <- diff(c(0, pnbinom(count[-n], size = x[1], prob = x[2]), 1))
expected <- sum(freq) * p.hat
sum((freq - expected)^2/expected)
}
par <- optim(par1, chi2)$par
} else {
df <- df - 1
method <- c("MinChisq", "with size fixed")
chi2 <- function(x)
{
p.hat <- diff(c(0, pnbinom(count[-n], size = par$size, prob = x), 1))
expected <- sum(freq) * p.hat
sum((freq - expected)^2/expected)
}
par <- c(par$size, optimize(chi2, c(0, 1))$minimum)
}
},
"fixed" = {
par <- c(par$size, par$prob)
})
par <- list(size = par[1], prob = par[2])
p.hat <- dnbinom(count, size = par$size, prob = par$prob)
})
expected <- sum(freq) * p.hat
df <- switch(method[1],
"MinChisq" = { length(freq) + df },
"ML" = { sum(freq > 0) + df },
"fixed" = { c(length(freq), sum(freq > 0)) + df }
)
structure(list(observed = freq,
count = count,
fitted = expected,
type = type,
method = method,
df = df,
par = par),
class = "goodfit")
}
# does this need a residuals_type arg?
print.goodfit <- function(x, residuals_type = c("pearson", "deviance", "raw"), ...)
{
residuals_type <- match.arg(residuals_type)
cat(paste("\nObserved and fitted values for", x$type, "distribution\n"))
if(x$method[1] == "fixed")
cat("with fixed parameters \n\n")
else
cat(paste("with parameters estimated by `", paste(x$method, collapse = " "), "' \n\n", sep = ""))
resids <- residuals(x, type = residuals_type)
RVAL <- cbind(x$count, x$observed, x$fitted, resids)
colnames(RVAL) <- c("count", "observed", "fitted",
paste(residuals_type, "residual"))
rownames(RVAL) <- rep("", nrow(RVAL))
print(RVAL, ...)
invisible(x)
}
summary.goodfit <- function(object, ...)
{
df <- object$df
obsrvd <- object$observed
count <- object$count
expctd <- fitted(object)
G2 <- sum(ifelse(obsrvd == 0, 0, obsrvd * log(obsrvd/expctd))) * 2
n <- length(obsrvd)
pfun <- switch(object$type,
poisson = "ppois",
binomial = "pbinom",
nbinomial = "pnbinom")
p.hat <- diff(c(0, do.call(pfun, c(list(q = count[-n]), object$par)), 1))
expctd <- p.hat * sum(obsrvd)
X2 <- sum((obsrvd - expctd)^2 / expctd)
names(G2) <- "Likelihood Ratio"
names(X2) <- "Pearson"
if(any(expctd < 5) & object$method[1] != "ML")
warning("Chi-squared approximation may be incorrect")
RVAL <- switch(object$method[1],
ML = G2,
MinChisq = X2,
fixed = c(X2, G2)
)
RVAL <- cbind(RVAL, df, pchisq(RVAL, df = df, lower.tail = FALSE))
colnames(RVAL) <- c("X^2", "df", "P(> X^2)")
cat(paste("\n\t Goodness-of-fit test for", object$type, "distribution\n\n"))
print(RVAL, ...)
invisible(RVAL)
}
plot.goodfit <- function(x, ...)
{
rootogram(x, ...)
}
fitted.goodfit <- function(object, ...)
{
object$fitted
}
residuals.goodfit <- function(object, type = c("pearson", "deviance", "raw"), ...)
{
obsrvd <- object$observed
expctd <- fitted(object)
count <- object$count
n <- length(obsrvd)
pfun <- switch(object$type,
poisson = "ppois",
binomial = "pbinom",
nbinomial = "pnbinom")
p.hat <- diff(c(0, do.call(pfun, c(list(q = count[-n]), object$par)), 1))
expctd <- p.hat * sum(obsrvd)
res <- switch(match.arg(type),
pearson = (obsrvd - expctd) / sqrt(expctd),
deviance = ifelse(obsrvd == 0, 0,
obsrvd * log(obsrvd / expctd)),
obsrvd - expctd)
return(res)
}
predict.goodfit <- function(object, newcount = NULL, type = c("response", "prob"), ...)
{
if(is.null(newcount)) newcount <- object$count
type <- match.arg(type)
densfun <- switch(object$type,
poisson = "dpois",
binomial = "dbinom",
nbinomial = "dnbinom")
RVAL <- do.call(densfun, c(list(x = newcount), object$par))
if (type == "response") RVAL <- RVAL * sum(object$observed)
return(RVAL)
}
|
516823e022f653af89ad27478d04ab3707ae4979
|
c6afabcce18d420c400221e09d6d6a3cd179138e
|
/DataAnalysis.R
|
4c9740dc2dac05d2a5243ef2978a28b7673c5ec7
|
[] |
no_license
|
anampc/Ssid_Nutrients
|
529b22ab8e179a99853e67af7efbcf2a977d6156
|
de992dedf5c5784dc0aa43c2e8ba155ebd1fbddd
|
refs/heads/master
| 2021-01-10T13:48:35.849807
| 2015-12-01T07:22:14
| 2015-12-01T07:22:14
| 47,094,643
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,059
|
r
|
DataAnalysis.R
|
# -------------------------------------------------------------------------------------------------
# LONG FORMAT DATA ANALYSES FOR SSID
# Modify Ssid.csv and save it with the day of last data added
# Import last data
# -------------------------------------------------------------------------------------------------
getwd()
library (lattice)
library(ggplot2)
library(nlme)
# 1. ----Data structure and order of levels in each factor
data<-read.csv("data/Ssid-11-26-15.csv")
data$Colony <- as.factor(data$Colony)
data$Community<-factor(as.character(data$Community),
levels=c("C","DC","D")) # DC = (0.1:0.9)Dporportion
data$Time <- as.factor(data$Time)
data$Repli <- as.factor(data$Repli)
data$Treatment <- factor (as.character(data$Treatment),
levels=c("CO2","Cont","Dark","Fe", "N", "NP", "NPF"))
data$D.Prop.t<-asin(sqrt(data$D.Prp))
Summary<-summary(data)
# 2. EXPLORE DATA WITH GGPLOT
# A. Frecuency of dominant Symbionts by factors
data$Colony<-factor(as.character(data$Colony),
levels=c("S1","S4","S3","S5","S6","S2"))
# Due to D dominance order
# Histograms of D dominates vs C dominates and DCmixed
Com <- ggplot(data = data, aes(x=Time))
Com <- Com + geom_histogram(aes(fill=Community))
Com <- Com + scale_fill_brewer(palette="Set3")
# Few C dominated samples, pool Cdom and DC
# Figure 1: Hitogram - Colony Symbionts through Time
Fig1 <- Com + facet_wrap( ~ Colony, ncol=1)
# Figure 2: Hitogram -Treatment * Colony Symbionts through Time
Fig2 <- Com + facet_wrap( ~ Treatment, ncol=1)
# Maybe saperate data and run analysis for only D vs (C + DC)?
# New variable = Type ("Ddom">90%D, "DCmix"<90%D)
data$Type<-c("Ddom", "DCmix") # Ddom.90% D
data$Type[which(data$Colony=="S1" | data$Colony=="S4" | data$Colony=="S3")] <- "DCmix"
data$Type[which(data$Colony=="S5" | data$Colony=="S6" | data$Colony=="S2")] <- "Ddom"
# B. S:H and D proportions Ratio by factors through Time
# Figure 3: Total SH by Treatmemnt*Time*Colony
logSHTreat <- ggplot(data, aes(factor(Treatment), logTot.SH))
Fig3<-logSHTreat + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Colony~., margins=TRUE)
# Very difficult to interpret, only Dark clearly increased
# pool data by Type of Community or
# Split total by C:H and D:H changes through time
# Figure 4: Total SH by Treatmemnt*Time*TYPE of community
# (Colonies pooled by D dominanance)
Fig4<-logSHTreat + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Type~., margins=TRUE)
# Figure 5: Change in D proportion by Treatmemnt*Time*Colony
Dprp <- ggplot(data, aes(factor(Treatment), D.Prp))
Fig5<-Dprp + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Type~., margin=T)
# Figure 6-7: Change in C:H ratio by Treatmemnt*Time*Colony or Type
logCH <- ggplot(data, aes(factor(Treatment), logC.SH))
Fig6<-logCH + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Colony~., margin=T)
# CH increased in Dark, N, Fe?, but not NP, or NPF
# Colony S2 does not change at all!!! Check this data
Fig7<-logCH + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Type~., margin=T)
# Figure 8-9: Change in D:H ratio by Treatmemnt*Time*Colony or Type
logDH <- ggplot(data, aes(factor(Treatment), logD.SH))
Fig8<-logDH + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Colony~., margin=T)
# DH increased in Dark, unclar other paterns across colonies
# Again, colony S2 does not change at all!!!
Fig9<-logDH + geom_boxplot(aes(fill=factor(Time)))+ facet_grid(Type~., margin=T)
# -------------------------------------------------------------------------------------------------
# ANALYSIS
# -------------------------------------------------------------------------------------------------
# Linear model with Time as repeated messurements
# Factors
# Random: Time (T0, T1, T2, T3) - Core nested? Look at repeated messurements design
# Colony? How to separate genotype from Symbiont community in a colony?
# Replicate (R1,R2)
# Fixed: Treatment(Control, Dark, Fe, N, NP, NPF), discard CO2
# Community? or Dprp? or CH_T0?
# ANOVA, not correct for repeated mess, but to have an idea
aov1<-aov (logTot.SH ~ Time*Treatment*Type*Repli, data=data)
summary(aov1) # Every factor and interaction (except Time:Repli and *) has an effect!
plot(aov1)
# LME Should be ok, but need criteria to eliminate non informative factors
LME<- lme(logTot.SH ~ Time*Treatment*Colony,
random = ~ Time|Core, na.action = na.exclude, data=data)
summary(LME)
plot(LME)
|
de1c30ae25082af8d80ea03280bbfb267817ab44
|
a19663c25470f4b4165fea8313f880f44481e887
|
/edu_stats_report2020/_common.R
|
404592ce3acb85c6e5ea89b95516b978316ef140
|
[] |
no_license
|
perlatex/tidy_easyuse
|
e9322e75969c3f5802236afa6159aa355c6a42d2
|
00a90fcd68b6d121b184a507fd457e83e477185d
|
refs/heads/master
| 2022-01-03T09:25:53.661346
| 2021-12-28T10:01:55
| 2021-12-28T10:01:55
| 231,519,749
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,777
|
r
|
_common.R
|
library(tidyverse)
library(flextable)
labels <- c(
# part One
# 切身体验机会
"t_Practical_experience" = "切身体验机会",
# 高阶思维参与
"t_Higher_order_thinking" = "高阶思维参与",
"f_Higher_order_thinking_rethink" = "反思与批评思维",
"f_Higher_order_thinking_whole" = "整体与辩证思维",
"f_Higher_order_thinking_practice" = "实践与创新思维",
# 学会学习指导
"t_Learn_to_learn" = "学会学习指导",
# 交流合作机会
"t_Cooperation" = "交流合作机会",
# part Two
# 学业成绩
"t_Knowledge_mastery" = "学业成绩",
# 深层理解
"t_Understanding_nature_of_things" = "对知识的理解",
"t_Understanding_meaning_of_knowledge" = "对意义的理解",
# 迁移与创造
"t_Application_of_knowledge" = "迁移与创造",
# 自我调节学习
"t_Learning_planning" = "学习规划",
"t_Learning_strategy" = "学习策略",
"t_Learning_persistence" = "学习毅力",
# 学习情感动力
"t_Learning_motivation" = "学习动机",
"t_Learning_confidence" = "学习信心",
"t_Examination_anxiety" = "考试焦虑",
# 交流合作效果
"t_Exchange_and_share" = "交流合作效果",
# 其余
"chinese202007" = "语文得分率",
"math202007" = "数学得分率",
"english202007" = "英语得分率",
"total_score" = "总得分率"
)
### 产生 minbar
### question - 金牛区- cur_schoolname
### 比较最后两列,大于区均值的柱子一个颜色,低于的另一个颜色
flextable_minbar <- function(dt) {
dt %>%
flextable(cwidth = 3) %>%
compose(
j = 3,
value = as_paragraph(
minibar(
value = dt[[ncol(dt)]], max = 100,
barcol = if_else(dt[[ncol(dt)]] > dt[[ncol(dt) - 1]], "#F08080", "#00B4D8")
),
" ",
as_chunk(dt[[ncol(dt)]],
formatter = function(x) formatC(x, digits = 2, format = "f", flag = "0"),
props = fp_text(color = "black")
)
),
part = "body"
) %>%
autofit() %>%
width(j= 1, width = 3.5)
}
# flextable_minbar <- function(.data, cur_school) {
# .data %>%
# flextable(cwidth = 3) %>%
# compose(
# j = 3,
# value = as_paragraph(
# minibar(
# value = .data[[cur_school]], max = 100,
# barcol = if_else(.data[[cur_school]] > .data[["金牛区"]], "red", "blue")
# ),
# " ",
# as_chunk(.data[[cur_school]],
# props = fp_text(color = "red")
# )
# ),
# part = "body"
# ) %>%
# autofit()
# }
flextable_minbar2 <- function(dt) {
dt %>%
flextable(cwidth = 3) %>%
theme_box() %>%
compose(
j = 4,
value = as_paragraph(
minibar(
value = dt[[ncol(dt)]], max = 100,
barcol = if_else(dt[[ncol(dt)]] > dt[[ncol(dt) - 1]], "#F08080", "#00B4D8")
),
" ",
as_chunk(dt[[ncol(dt)]],
formatter = function(x) formatC(x, digits = 2, format = "f", flag = "0"),
props = fp_text(color = "black")
)
),
part = "body"
) %>%
merge_v(j = 1, part = "body") %>%
autofit() %>%
width(j = 1, width = 1.4)
}
### map list to flextable 相连的表格产生间隔, 注意在rmd中要 results='asis'
### usage
### map(iris_split, flextable_list_display)
flextable_list_display <- function(df) {
cat("<w:p/>")
ft <- flextable::flextable(df, cwidth = 1.3)
cat(knitr::knit_print(ft))
}
##### 统计选项占比:选择1和2的比例多高
stats_option_prop <- function(.data, cur_option) {
cur_content <- cur_option
r1_a <- .data %>%
summarise(
across(all_of(cur_content), ~ sum(. %in% c(1, 2)) * 100 / n())
) %>%
mutate(school = "金牛区") %>%
relocate(school)
r1_b <- d %>%
group_by(school) %>%
summarise(
across(
all_of(cur_content), ~ sum(. %in% c(1, 2)) * 100 / n()
)
) %>%
ungroup()
df <- bind_rows(r1_a, r1_b) %>%
mutate(
across(all_of(cur_content), list(RC = ~ . >= first(.)))
) %>%
rowwise() %>%
mutate(
num_above_mean = sum(c_across(ends_with("_RC")))
) %>%
ungroup() %>%
select(-ends_with("_RC")) %>%
arrange(desc(num_above_mean)) %>%
select(-num_above_mean)
return(df)
}
##### 打印出表格,高于区均值的要加背景色
colwise_color_fun <- function(x) {
col <- character(length = length(x))
col[] <- "transparent"
col[x > x[1]] <- "gray"
col
}
flextable_print <- function(.data, cwidth = 1.3) {
if (ncol(.data) > 4) {
cwidth <- 1
}
.data %>%
flextable::flextable(cwidth = cwidth) %>%
bg(
j = 2:ncol(.data),
bg = colwise_color_fun
) %>%
color(i = 1,
color = "red"
) %>%
align_nottext_col(align = "center")
}
##### 计分,选择1的记为4分,选择4的记为1分
positive_scoring <- function(vec) {
100 * mean(abs(5 - vec)) / 4 # 考察1、2, 即选1得4分, 2 3 1 --> (2+3+1)/ 12
}
### 切实体会的机会
content_Practical_experience <- c(
"直观感受事物,如观察聆听触摸等",
"用心感受和揣摩,如猜测疑惑好奇等",
"开展相应行动,如动手操作设计探索等"
)
### 高阶思维的机会
content_Higher_order_thinking <- c(
"对老师讲的内容反复思考",
"就所学内容形成自己的想法",
"评估自己的观点正确与否",
"质疑他人的观点是否有说服力",
"评估不同观点,选择最优思路",
"将新知识与已知知识联系起来",
"将不同学科的知识联系起来",
"利用思维导图等形式理解不同知识之间的关系",
"在不同时间点上认识知识的发展变化",
"会对所学知识进行一定总结",
"用非常规的目光审视问题",
"想到新的解决办法",
"提出自己独特的见解",
"问一些别人没有想到的问题",
"开展实际行动验证想法的正误",
"将知识应用于实际问题的解决中"
)
#### 高阶思维的机会-反思与批评思维
content_Higher_order_thinking_rethink <- c(
"对老师讲的内容反复思考",
"就所学内容形成自己的想法",
"评估自己的观点正确与否",
"质疑他人的观点是否有说服力",
"评估不同观点,选择最优思路"
)
#### 高阶思维的机会-整体与辩证思维
content_Higher_order_thinking_whole <- c(
"将新知识与已知知识联系起来",
"将不同学科的知识联系起来",
"利用思维导图等形式理解不同知识之间的关系",
"在不同时间点上认识知识的发展变化",
"会对所学知识进行一定总结"
)
#### 高阶思维的机会-实践与创新思维
content_Higher_order_thinking_practice <- c(
"用非常规的目光审视问题",
"想到新的解决办法",
"提出自己独特的见解",
"问一些别人没有想到的问题",
"开展实际行动验证想法的正误",
"将知识应用于实际问题的解决中"
)
### 学会学习的机会
content_Learn_to_learn <- c(
"找到适合不同学科特点的学习方法和策略",
"当不能理解时,向其他学生寻求帮助",
"请老师澄清你不太理解的知识等",
"带着问题来学习,并得到解答",
"对自己的学习过程进行有意识的调控",
"根据实际情况,调整学习进程和方法等"
)
### 交流合作的机会
content_Cooperation <- c(
"与其他同学合作完成作业/任务",
"同学之间对彼此的作业/任务完成情况提供反馈意见",
"给同学讲解看法所学知识等",
"与班上同学进行相关问题的讨论"
)
### 反馈方式
content_Feedback <- c(
"随堂测验的结果",
"课堂上回答问题的正误",
"课堂上老师对我学习表现的点评",
"课堂上同学对我学习表现的点评",
"课后作业老师批改信息",
"考试成绩",
"其他学习活动的结果"
)
### 教学方法
content_Teaching_methods <- c(
"跨学科基于项目式的学习",
"跨学科基于问题解决的学习",
"跨学科基于课题研究的学习",
"学科内上课,但会涉及其他学科的知识",
"学科内上课,重新组织教材内容上课",
"学科内上课,按照教材原有顺序上课"
)
## 学习效果
### 知识掌握
### content_Knowledge_mastery
### 知识与自我理解
#### 对事物或知识本质的理解
content_Understanding_nature_of_things <- c(
"我了解了知识的来龙去脉",
"我建立了不同知识之间的关联",
"我掌握了学科学习方法",
"我掌握了学科关键思想",
"我能琢磨和领会具有本质性和规律性的东西"
)
#### 对知识和自我生命意义的理解
content_Understanding_meaning_of_knowledge <- c(
"我能理解知识背后蕴含的作用和价值",
"我有自己的价值和信念",
"我有自己的愿望和理想"
)
### 知识迁移与创造
content_Application_of_knowledge <- c(
"我能将所学知识应用于解决类似的问题或变化的情境中",
"我能把生活中的现象与所学知识联系起来",
"我能运用所学知识来解决生活中的实际问题",
"我能将一个学科中学到的方法思想等运用到其他学科的学习中",
"我能将不同学科的知识结合起来",
"我能提出独特或创新的个人见解"
)
### 学习自我调节
#### 学习规划
content_Learning_planning <- c(
"我通常在一个可以集中精力的地方学习",
"我很好地安排了学习时间",
"我有自己的学习计划"
)
#### 学习策略
content_Learning_strategy <- c(
"我会反复阅读课堂笔记和教材",
"我会反复练习习题",
"我把重要知识点列成清单,并把清单背下来",
"我很少在考试前找时间复习"
)
#### 学习毅力
content_Learning_persistence <- c(
"我经常感到很懒惰或无聊,以至于我还没有完成学习计划就放弃了",
"即使我不喜欢正在学习的内容,我也会努力完成学习",
"当学习有困难时,我要么放弃,要么只学习容易的部分"
)
### 学习态度情感价值观
#### 学习动力
content_Learning_motivation <- c(
"学习满足了我对知识的好奇心",
"学习唤起我对美好事物的渴望",
"我觉得学习是一件很有趣的事",
"学习让我的价值得以体现",
"学习对今后的生活和工作很有用处",
"学习对自己的成长很重要",
"获得好成绩是我现在最满意的事",
"我想取得比其他大多数同学更好的成绩"
)
#### 学习信心
content_Learning_confidence <- c(
"如果我用适当的方式学习,我能够学会",
"如果我足够努力,学习完全难不倒我",
"我认为课堂上老师讲的最难内容我都能理解",
"我有信心能出色完成作业",
"我相信自己会取得好成绩"
)
#### 考试焦虑
content_Examination_anxiety <- c(
"我会想到我和其他学生相比,我的成绩有多差",
"我会想到很多题目我不能回答",
"我想到了失败的后果",
"我有一种不安心烦的感觉",
"我觉得我的心跳很快"
)
### 交流分享
content_Exchange_and_share <- c(
"我能与他人进行积极的互动",
"我能倾听他人的发言",
"我能流畅分享自己的观点",
"我能通过合作确定或创造问题解决方案"
)
|
9a1e611fb815a3f12e09f6611f6033127dab2f04
|
b661214a7f6c0398a222ed5628a4a48881c930db
|
/app.R
|
98a118272b95fc71f38405b105dbf29e518c4dd6
|
[] |
no_license
|
thekingofall/ShinyLearn
|
7c313b0021241bf8c173f26c33cd41dfebe496c3
|
7d4844422bd2731b85e231a729972212d61c33a5
|
refs/heads/master
| 2023-07-13T08:13:48.851233
| 2021-08-25T15:16:59
| 2021-08-25T15:16:59
| 271,017,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,832
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 5000,
value = 1000)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
library(data.table)
a=fread('gene2pubmed.gz',data.table = F)
head(a)
a=a[a$`#tax_id`==9606,]
head(a)
library("tm")
library("SnowballC")
#install.packages("SnowballC")
library("wordcloud")
library("RColorBrewer")
tb=as.data.frame(table(a$GeneID))
head(tb)
tb=tb[order(tb$Freq,decreasing = T),]
head(tb)
head(tb,100)
colnames(tb)[1]="gene_id"
set.seed(1234)
library(org.Hs.eg.db)
ids=toTable(org.Hs.egSYMBOL)
head(ids)
tbs=merge(ids,tb,by='gene_id')
wordcloud(words = tbs$symbol, freq = tbs$Freq, min.freq = input$bins,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Accent"))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
8d8b6c3875e89967464bcc683a642de884e1223e
|
dbe5a0926040289a8b5f8c983036049575aa86ba
|
/MDS_plots.R
|
4ca8b9553e49bff86c715b276df7c2f46294bd9a
|
[
"MIT"
] |
permissive
|
KatieSev/Manifold-valued-data-analysis-of-samples-of-networks
|
40e7cb4a1c63b1af57b28bfcc2091b5c3b7a4a00
|
a249bdb7c02279115606268438d36155b2f0d56d
|
refs/heads/main
| 2023-06-04T17:51:26.157244
| 2021-06-22T17:05:37
| 2021-06-22T17:05:37
| 369,204,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,754
|
r
|
MDS_plots.R
|
#source('functions_needed.R')
#load("workspace for novels.RData")
m<-dim(List_L[[1]])[1]
nt<-23
novcol<-c(rep(2,times=16),rep(4,times=7))
#Euclidean
Dmat<-matrix(0,nt,nt)
for (i in 1:(nt-1)){
print(i)
for (j in (i+1):nt){
Dmat[i,j]<-sqrt(sum((L[,,i]-L[,,j])^2))
Dmat[j,i]<-Dmat[i,j]
}
}
Dmateucl<-Dmat
dend<-hclust(as.dist(Dmateucl),method="ward.D2")
plot(dend,labels=label, xlab="", main="", sub="", cex=0.5, xaxt='n')
abline(h=0.08, lty=2)
set.seed(1)
plot(cmdscale(Dmateucl),type="n",ylab="MDS2", xaxt='n', yaxt='n', xlab="")
text(cmdscale(Dmateucl),labels=label,col=novcol, cex=0.5, xaxt='n', yaxt='n')
#Squareroot Euclidean
Dmat<-matrix(0,nt,nt)
L_sqrt <- L
for (i in 1:23){
L_sqrt[,,i] <- rootmat(L[,,i])
}
for (i in 1:(nt-1)){
print(i)
for (j in (i+1):nt){
Dmat[i,j]<-norm( L_sqrt[,,i] - L_sqrt[,,j] , type='f')
Dmat[j,i]<-Dmat[i,j]
}
}
Dmatsqrt<-Dmat
dend<-hclust(as.dist(Dmatsqrt),method="ward.D2")
plot(dend,labels=label, xlab="", main="", sub="", cex=0.5)
abline(h=0.7, lty=2)
set.seed(1)
plot(cmdscale(Dmatsqrt),type="n",xlab="",ylab="MDS2",yaxp = c(-0.1, 0.1, 2), xaxt='n', yaxt='n')
text(cmdscale(Dmatsqrt),labels=label,col=novcol, cex=0.5, xaxt='n', yaxt='n')
#Procrustes size-and-shape
Dmat<-matrix(0,nt,nt)
for (i in 1:(nt-1)){
print(i)
for (j in (i+1):nt){
Dmat[i,j]<-distcov( L[,,i],L[,,j] , method="Procrustes")
Dmat[j,i]<-Dmat[i,j]
}
}
Dmatproc<-Dmat
dend<-hclust(as.dist(Dmatproc),method="ward.D2")
plot(dend,labels=label, xlab="", main="", sub="", cex=0.5)
abline(h=0.7, lty=2)
set.seed(1)
plot(cmdscale(Dmatproc),type="n",xlab="MDS1",ylab="MDS2",yaxp = c(-0.1, 0.1, 2), xaxt='n', yaxt='n')
text(cmdscale(Dmatproc),labels=label,col=novcol, cex=0.5, xaxt='n', yaxt='n')
|
9dfea62c9ad14c8a9b94224c5fd44165a9c6e9c1
|
4052881622ec762fba384830c6a8b9b3f6fa8e96
|
/Week9.R
|
a0a9d207ed300081aa7ddf3111c450aedd975d91
|
[] |
no_license
|
Wtrible/Code-Portfolio
|
ed9fbb8064662e3f9b76850f0705449bc4dd9aeb
|
b8ac5c1bdc1b7a71c5d992377e1ad0ceb562ee2e
|
refs/heads/master
| 2020-06-25T03:31:33.614272
| 2019-07-27T16:19:04
| 2019-07-27T16:19:04
| 199,186,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,592
|
r
|
Week9.R
|
library(tidyverse)
#graphics devices are used to display plots
#Rstudio has the "Viewer" pane, which is a screen device
#other possibilities include PDF, PNG, JPEG, or SVG, which are file devices
#default is the Rstudio viewer pane
#some are device=specific, such as:
#windows() does not work on Mac
#quartz() does not work on Windows
#let's make a simple plot to practice with; we will copy it for later use
data=mpg
plot(x=data$cyl,y=data$displ,type="p",
main="Test Plot",xlab="Cylinders",ylab="Displacement")
#how to use other graphics devices
#launch graphics device
#make plot
#annotate plot
#close graphics device using >dev.off()
#making a pdf; portable
pdf(file="plots/testplot.pdf")
plot(x=data$cyl,y=data$displ,type="p",
main="Test Plot",xlab="Cylinders",ylab="Displacement")
dev.off()
#making an svg; supports interactivity
svg(file="plots/testplot.svg")
plot(x=data$cyl,y=data$displ,type="p",
main="Test Plot",xlab="Cylinders",ylab="Displacement")
dev.off()
#pdf and svg are vector graphics
#making a png; does not resize well,good for plotting many points
png(file="plots/testplot.png")
plot(x=data$cyl,y=data$displ,type="p",
main="Test Plot",xlab="Cylinders",ylab="Displacement")
dev.off()
#making a jpeg, good for photographs
jpeg(file="plots/testplot.jpeg")
plot(x=data$cyl,y=data$displ,type="p",
main="Test Plot",xlab="Cylinders",ylab="Displacement")
dev.off()
#tiff() and bmp() may also be used
#png, jpeg, tiff, and bmp are scalar graphics (a.k.a. bitmaps)
|
5fc1c4328a043b7b002fecc482c32b831638af51
|
ed4c0c3ea985b90fbe0a102bfdf15f3447144925
|
/R/permTest_LR.R
|
d63dbbb44bbc9076f81b0fd567e285447f7faa67
|
[] |
no_license
|
RichieHonor/PermutateR
|
0ebcdaab06987b00b612641bddb1978f68a3d1f7
|
575a43812d013920fdd0f7a3c31eedda3daa23c7
|
refs/heads/main
| 2023-07-12T10:07:14.469170
| 2023-06-25T13:52:11
| 2023-06-25T13:52:11
| 331,747,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,984
|
r
|
permTest_LR.R
|
#' Perform a permutation test using the likelyhood ratio test
#'
#' This function performs a permutation test on a model object using a likelyhood
#' ratio test. It works by generating a specified amount of replicated
#' data frames and fits the supplied model object to each one. The permutation
#' test is based on the test statistic output of a likelyhood ratio test.
#' This function will not work on interactions, to run a permutation test on an
#' interaction with a likelyhood ratio test, use permTest_LR_int(). When there is
#' no interaction, the null model lacks the variable of interest, and only needs
#' to be fitted one. Therefore, only 1 model is fitted on each iteration, making
#' it twice as fast as the perm_Test_LR_int function.
#'
#' @param Model_Object A statistical model object.
#' @param Variable A character string of the variable requiring the permutation
#' test is run on.
#' @param Test_Statistic A character string of the desired test statistic to
#' conduct the permutation test.
#' @param Replication The number of simulations in the permutation test.
#' @param OutputData Should the simulated test statistics be outputted ?
#' @return A list of two items. The first is a data frame of results of the
#' permutation test. The second is a histogram displaying the sampling
#' distribution of the simulated test statistics, with a red line displaying the
#' test statistic of the original (non-randomized) data. If OutputData=T, then
#' a vector of simulated test statistics will also be included in the output list.
#' @export
permTest_LR<-function(Model_Object,Variable,Test_Statistic,Replication,UseAllAvailableCores=TRUE,OutputData=FALSE){
#Obtaining the data frame including only the rows and columns used in the model.
data2<- model.frame(Model_Object,drop.unused.levels=TRUE)
#Refitting the model object with this minimal data frame.
fit_True<-update(Model_Object,data=data2)
#Creating a new formula string for the update function.
NewFormula<-paste("~.-",Variable,sep="")
#Obtaining the null model without the variable for the likelyhood ratio test.
#This is referred to as "fit_Null"
fit_Null<-update(fit_True,as.formula(NewFormula))
#Determining the real test statistic
AnovaOutput<-as.data.frame(anova(fit_Null,fit_True))
Real_TS<-AnovaOutput[2,Test_Statistic]
#Assessing that the test statistic output was correctly specified.
if(is.null(Real_TS)){
TS_Options<-colnames(AnovaOutput)
if (Test_Statistic %in% TS_Options ){
message("Error: ensure that the Test Parameter is in the model object")
}
else{ message(paste("Error: Test_Statistic :",Test_Statistic," is not part of the model object output\nPlease pick one of:"))
print(TS_Options)
stop("Test_Statistic not valid")
}
}
#Obtaining data frames with desired replication
Data_Frames<-replicate(Replication,new_data(data2,Variable),simplify=F)
if(UseAllAvailableCores==TRUE){# Modeling desired formula over each
#data frame with a random permutation
random_TS<-unlist(parallel::mclapply(Data_Frames,model_extract,Model_Object.ME=fit_True,Null_Model.ME=fit_Null,Test_Statistic.ME=Test_Statistic))
}
else{ # Modeling desired formula over each data frame with a random permutation
random_TS<-unlist(lapply(Data_Frames,model_extract,Model_Object.ME=fit_True,Null_Model.ME=fit_Null,Test_Statistic.ME=Test_Statistic))
}
#Obtaining p value
p_Val<-length(random_TS[abs(random_TS)>abs(Real_TS)])/length(random_TS)
out_P<-paste("The simulated p-value value is:",p_Val,sep=" ")#Creating a string to
#put the p value
#Returning a histogram of z values
p<-ggplot2::ggplot()+
geom_histogram(aes(x=random_TS),bins = 50) +
geom_vline(aes(xintercept=Real_TS),colour="red")
if(OutputData==T){
return(list(out_P,p,random_TS))
}
else return(list(out_P,p))
}
|
719448c3cc31804fdfd6a38240fd0279f83af70b
|
6e129f11d1db6c19a4390bdf180ce08a630c87a2
|
/R/ypr_ml.r
|
4bab933ff511aa83f6c53c16ba367cee2167b11f
|
[] |
no_license
|
quang-huynh/ICES_MSY
|
4bd549238bb94f85318a5ff666336e8cda1b0c62
|
c2ea56d69809d5027c29e1de87c11a537241be64
|
refs/heads/master
| 2021-01-19T13:30:59.245464
| 2017-02-14T09:00:55
| 2017-02-14T09:00:55
| 88,094,269
| 1
| 0
| null | 2017-04-12T20:39:57
| 2017-04-12T20:39:57
| null |
UTF-8
|
R
| false
| false
| 3,271
|
r
|
ypr_ml.r
|
# YPR - function to calculate F-01 from life history parameters from natural mortality M,
# growth parameters (von Bertalanffy: Linf, K, t0; length-weight: a, b), and Lc.
#
# Maxage of YPR is currently set to be age at which 1% of cohort survives given M
# maxF is the maximum F for searching for F01
# nsteps is the increment of F in the YPR search
YPR <- function(Linf, K, t0, M, a, b, Lc, maxage = -log(0.01)/M, maxF = 2, nsteps = 0.01,
graph = TRUE) {
maxage <- as.integer(maxage)
frates <- seq(0, maxF, nsteps) # range of F for YPR
tc <- -log(1 - Lc/Linf)/K + t0 # convert Lc to age, based on von Bertalanffy parameters
tc <- round(tc, 0)
tc[tc < 1] <- 1
tc[tc > maxage] <- maxage
#average weight at age - follow von Bertalanffy growth
age <- 1:maxage
la <- Linf * (1 - exp(-K*(age - t0)))
wa <- a * la^b
#vulnerability schedule - assumes knife-edge vulnerability, where all individuals age tc to maxage are fully vulnerbale
#all individulas less than age tc are not vulnerable
vul <- ifelse(age < tc, 0, 1)
lx <- numeric(maxage)
ypr <- numeric(length(frates))
lx[1]<-1
for(k in 1:length(frates)) {
for(i in 2:maxage) {
lx[i] <- lx[i-1] * exp(-(M + vul[i-1] * frates[k]))
}
phi_vb <- sum(lx*wa*vul) # Equilibrium biomass per recruit
ypr[k] <- (1 - exp(-frates[k])) * phi_vb
}
# More code that derived F0.1 in 'per recruit analysis.R' (Meaghan Bryan)
slope.origin <- (ypr[2] - ypr[1])/(frates[2] - frates[1])
slope.10 <- 0.1*slope.origin
slope <- numeric(length(ypr))
slope[1] <- slope.origin
for(i in 3:length(ypr)) slope[i-1] <- (ypr[i] - ypr[i-1])/(frates[i] - frates[i-1])
dif <- abs(slope - slope.10)
dif[is.na(dif)] <- 10e10
F01 <- frates[which.min(dif)]
output <- list(summary = data.frame(frates = frates, YPR = ypr), F01 = F01)
if(graph) {
plot(YPR ~ frates, output$summary, xlab = "Fishing Mortality (F)", ylab = "YPR", typ = "l")
points(F01, ypr[which.min(dif)], col = "red", pch = 16)
}
return(output)
}
# MLZ - wrapper function to use mean length estimator and YPR to obtain reference point and F/Fmsy.
#
# Maxage of YPR is currently set to be age at which 1% of cohort survives given M
# maxF is the maximum F for searching for F01
# nsteps is the increment of F in the YPR search
MLZ <- function(year, mlen, ss, K, Linf, t0, Lc, nbreaks, styrs, stZ,
M, a, b, maxage = -log(0.01)/M, maxF = 2, nsteps = 0.01, graph = TRUE) {
# estimate F from mean length estimator
Z.estimator <- gh(year, mlen, ss, K, Linf, Lc, nbreaks, styrs, stZ, graph)
Z.recent <- Z.estimator$summary$Estimate[nbreaks + 1]
F.benchmark <- Z.recent - M
if(F.benchmark <= 0) stop("F = Z - M results in F < 0.")
# obtain F-01 from YPR
YPR.analysis <- YPR(Linf, K, t0, M, a, b, Lc, maxage, maxF, nsteps, graph)
F01 <- YPR.analysis$F01
if(F01 == maxF) warning("F-01 was the maximum allowed F. Does F-01 exist?")
# F/Fmsy
Fstatus <- data.frame(F.benchmark, F01, F.benchmark, F01)
names(Fstatus) <- c("F", "F-01", "F/Fmsy")
output <- list(Zestimator = Z.estimator, YPR = YPR.analysis, Status = Fstatus)
return(output)
}
|
43ac9180c36a7af793abac5bd0fa71b7d278cbda
|
2f93019f313ea7485baea129b0d6b62c35f7b438
|
/OfflineAndOnlinePreprocessing.R
|
0ea577e41ee00379fae8e8c9f3717345a779bab6
|
[] |
no_license
|
ivry216/dwdsome-modeling
|
47db7eb2c82f22702b7a2402fcf7d0db93848d59
|
a5482670b4a89b7a2003f86eaed5bd86d564a77f
|
refs/heads/master
| 2020-04-14T12:27:07.043686
| 2019-01-31T20:14:12
| 2019-01-31T20:14:12
| 163,840,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,095
|
r
|
OfflineAndOnlinePreprocessing.R
|
# COlumns we are interesed in
columns_to_process <- c("temperature", "specific_conductance", "dissolved_oxygen", "orp", "ph")
# Initialize data
offline_data_list_plt <- offline_data_list
online_data_list_plt <- data_to_postporcess
# Make online data suitable
for (i in seq(online_data_list_plt)) {
online_data_list_plt[[i]] <- online_data_list_plt[[i]] %>%
mutate(Line = i) %>%
select(one_of(c("timestamp", columns_to_process, "Line"))) %>%
mutate(Line = as.factor(Line))
}
# Merge all
online_data_plt <- do.call("rbind", online_data_list_plt)
offline_data_plt <- do.call("rbind", offline_data_list_plt)
# Add callibration points
callibration_points <- list(
as.POSIXct("16.06.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("07.07.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("21.07.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("04.08.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("11.08.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("18.08.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("31.08.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("19.09.2017", format = "%d.%m.%Y", tz = "UTC"),
as.POSIXct("09.10.2017", format = "%d.%m.%Y", tz = "UTC")
)
if (FALSE) {
p <- ggplot(NULL, mapping = aes_string(x = "timestamp", y = "ph", color = "Line")) +
geom_point(data = online_data_plt, alpha = 0.5, size = 1) +
geom_line(data = sis_clean, color = "black") +
geom_point(data = sis_clean, color = "black", fill = "grey", size = 2, shape = 21) +
geom_line(data = sis_amiini, color = "cyan") +
geom_point(data = sis_amiini, color = "black", fill = "cyan", size = 2, shape = 21) +
geom_line(data = sis_hypo, color = "magenta") +
geom_point(data = sis_hypo, color = "black", fill = "magenta", size = 2, shape = 21) +
geom_line(data = offline_data_plt, color = "black") +
geom_point(data = offline_data_plt, color = "black", shape = 15) +
scale_color_manual(values=c("red3", "yellow3", "limegreen", "royalblue3")) +
scale_x_datetime(date_labels = "%d/%m") +
facet_grid(Line ~ .) +
labs(x = "Time", y = "pH")
print(p)
p <- ggplot(NULL, mapping = aes_string(x = "timestamp", y = "ph", color = "Line")) +
geom_point(data = online_data_plt[day(online_data_plt$timestamp) == 30 & month(online_data_plt$timestamp) == 7,]) +
facet_wrap(~Line) +
scale_x_datetime(date_breaks = "1 day", date_labels = "%d/%m") +
scale_color_manual(values=c("red3", "yellow3", "limegreen", "royalblue3")) +
labs(x = "Time", y = "pH")
print(p)
p <- ggplot(NULL, mapping = aes_string(x = "timestamp", y = "dissolved_oxygen", color = "Line")) +
geom_point(data = online_data_plt, alpha = 0.3, size = 1)
for (vline_data in callibration_points) {
p <- p + geom_vline(xintercept = vline_data, color = "slategray", linetype= 4, size = 1)
}
p <- p + scale_color_manual(values=c("red3", "yellow3", "limegreen", "royalblue3")) +
scale_x_datetime(date_labels = "%d/%m") +
labs(x = "Time", y = "Dissolved Oxygen")
print(p)
}
|
7f32138597a617dd7f919404dc7e370501504687
|
a607b44335be39a267f5b78908189d5605c10145
|
/R/assess_DD.R
|
7409d7ed0e89e4eecfa82172f8c1b32457268051
|
[] |
no_license
|
tcarruth/MSEtool
|
75d4c05b44b84bb97e8f9f85d4dfa7f4246453d5
|
c95c7bcfe9bf7d674eded50e210c3efdc7c2725f
|
refs/heads/master
| 2021-03-27T20:44:23.407068
| 2020-10-13T15:20:26
| 2020-10-13T15:20:26
| 116,047,693
| 2
| 4
| null | 2020-02-27T23:59:15
| 2018-01-02T19:06:42
|
R
|
UTF-8
|
R
| false
| false
| 17,262
|
r
|
assess_DD.R
|
#' Delay - Difference Stock Assessment in TMB
#'
#' A simple delay-difference assessment model using a
#' time-series of catches and a relative abundance index and coded in TMB. The model
#' can be conditioned on either (1) effort and estimates predicted catch or (2) catch and estimates a predicted index.
#' In the state-space version, recruitment deviations from the stock-recruit relationship are estimated.
#'
#' @param x An index for the objects in \code{Data} when running in closed loop simulation.
#' Otherwise, equals to 1 when running an assessment.
#' @param Data An object of class \linkS4class{Data}.
#' @param condition A string to indicate whether to condition the model on catch or effort (ratio of catch and index).
#' @param AddInd A vector of integers or character strings indicating the indices to be used in the model. Integers assign the index to
#' the corresponding index in Data@@AddInd, "B" (or 0) represents total biomass in Data@@Ind, "VB" represents vulnerable biomass in
#' Data@@VInd, and "SSB" represents spawning stock biomass in Data@@SpInd.
#' @param SR Stock-recruit function (either \code{"BH"} for Beverton-Holt or \code{"Ricker"}).
#' @param rescale A multiplicative factor that rescales the catch in the assessment model, which
#' can improve convergence. By default, \code{"mean1"} scales the catch so that time series mean is 1, otherwise a numeric.
#' Output is re-converted back to original units.
#' @param start Optional list of starting values. Entries can be expressions that are evaluated in the function. See details.
#' @param fix_h Logical, whether to fix steepness to value in \code{Data@@steep} in the assessment model.
#' @param fix_sd Logical, whether the standard deviation of the data in the likelihood (index for conditioning on catch or
#' catch for conditioning on effort). If \code{TRUE}, the SD is fixed to value provided in \code{start} (if provided), otherwise,
#' value based on either \code{Data@@CV_Cat} or \code{Data@@CV_Ind}.
#' @param fix_tau Logical, the standard deviation of the recruitment deviations is fixed. If \code{TRUE},
#' tau is fixed to value provided in \code{start} (if provided), otherwise, equal to 1.
#' @param dep The initial depletion in the first year of the model. A tight prior is placed on the model objective function
#' to estimate the equilibrium exploitation rate that corresponds to the initial depletion. Due to this tight prior, this F
#' should not be considered to be an independent model parameter.
#' @param LWT A vector of likelihood weights for each survey.
#' @param integrate Logical, whether the likelihood of the model integrates over the likelihood
#' of the recruitment deviations (thus, treating it as a random effects/state-space variable).
#' Otherwise, recruitment deviations are penalized parameters.
#' @param silent Logical, passed to \code{\link[TMB]{MakeADFun}}, whether TMB
#' will print trace information during optimization. Used for dignostics for model convergence.
#' @param opt_hess Logical, whether the hessian function will be passed to \code{\link[stats]{nlminb}} during optimization
#' (this generally reduces the number of iterations to convergence, but is memory and time intensive and does not guarantee an increase
#' in convergence rate). Ignored if \code{integrate = TRUE}.
#' @param n_restart The number of restarts (calls to \code{\link[stats]{nlminb}}) in the optimization procedure, so long as the model
#' hasn't converged. The optimization continues from the parameters from the previous (re)start.
#' @param control A named list of parameters regarding optimization to be passed to
#' \code{\link[stats]{nlminb}}.
#' @param inner.control A named list of arguments for optimization of the random effects, which
#' is passed on to \code{\link[TMB]{newton}} via \code{\link[TMB]{MakeADFun}}.
#' @param ... Additional arguments (not currently used).
#' @return An object of \code{\linkS4class{Assessment}} containing objects and output from TMB.
#' @details
#' To provide starting values for \code{DD_TMB}, a named list can be provided for \code{R0} (virgin recruitment),
#' \code{h} (steepness), and \code{q} (catchability coefficient) via the \code{start} argument (see example).
#'
#' For \code{DD_SS}, additional start values can be provided for and \code{omega} and \code{tau}, the standard
#' deviation of the catch and recruitment variability, respectively.
#' @note Similar to many other assessment
#' models, the model depends on assumptions such as stationary productivity and
#' proportionality between the abundance index and real abundance.
#' Unsurprisingly the extent to which these assumptions are
#' violated tends to be the biggest driver of performance for this method.
#' @author T. Carruthers & Z. Siders. Zach Siders coded the TMB function.
#' @references
#' Carruthers, T, Walters, C.J,, and McAllister, M.K. 2012. Evaluating methods that classify
#' fisheries stock status using only fisheries catch data. Fisheries Research 119-120:66-79.
#'
#' Hilborn, R., and Walters, C., 1992. Quantitative Fisheries Stock Assessment: Choice,
#' Dynamics and Uncertainty. Chapman and Hall, New York.
#' @describeIn DD_TMB Observation-error only model
#' @section Required Data:
#' \itemize{
#' \item \code{DD_TMB}: Cat, Ind, Mort, L50, vbK, vbLinf, vbt0, wla, wlb, MaxAge
#' \item \code{DD_SS}: Cat, Ind, Mort, L50, vbK, vbLinf, vbt0, wla, wlb, MaxAge
#' }
#' @section Optional Data:
#' \itemize{
#' \item \code{DD_TMB}: steep
#' \item \code{DD_SS}: steep, CV_Cat
#' }
#' @import TMB
#' @importFrom stats nlminb
#' @examples
#' \donttest{
#' #### Observation-error delay difference model
#' res <- DD_TMB(Data = DLMtool::Red_snapper)
#'
#' # Provide starting values
#' start <- list(R0 = 1, h = 0.95)
#' res <- DD_TMB(Data = DLMtool::Red_snapper, start = start)
#'
#' summary(res@@SD) # Parameter estimates
#'
#' ### State-space version
#' ### Set recruitment variability SD = 0.3 (since fix_tau = TRUE)
#' res <- DD_SS(Data = Red_snapper, start = list(tau = 0.3))
#' }
#' @seealso \link{plot.Assessment} \link{summary.Assessment} \link{retrospective} \link{profile} \link{make_MP}
#' @useDynLib MSEtool
#' @export
DD_TMB <- function(x = 1, Data, condition = c("catch", "effort"), AddInd = "B", SR = c("BH", "Ricker"), rescale = "mean1",
start = NULL, fix_h = TRUE, dep = 1, LWT = NULL, silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), ...) {
condition <- match.arg(condition)
DD_(x = x, Data = Data, state_space = FALSE, condition = condition, AddInd = AddInd, SR = SR, rescale = rescale, start = start,
fix_h = fix_h, dep = dep, LWT = LWT, fix_sd = FALSE,
fix_tau = TRUE, integrate = FALSE, silent = silent, opt_hess = opt_hess, n_restart = n_restart,
control = control, inner.control = list(), ...)
}
class(DD_TMB) <- "Assess"
#' @rdname DD_TMB
#' @useDynLib MSEtool
#' @export
DD_SS <- function(x = 1, Data, condition = c("catch", "effort"), AddInd = "B", SR = c("BH", "Ricker"), rescale = "mean1",
start = NULL, fix_h = TRUE, fix_sd = FALSE, fix_tau = TRUE, dep = 1, LWT = NULL,
integrate = FALSE, silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), inner.control = list(), ...) {
condition <- match.arg(condition)
DD_(x = x, Data = Data, state_space = TRUE, condition = condition, AddInd = AddInd, SR = SR, rescale = rescale, start = start,
fix_h = fix_h, dep = dep, LWT = LWT, fix_sd = fix_sd,
fix_tau = fix_tau, integrate = integrate, silent = silent, opt_hess = opt_hess, n_restart = n_restart,
control = control, inner.control = inner.control, ...)
}
class(DD_SS) <- "Assess"
DD_ <- function(x = 1, Data, state_space = FALSE, condition = c("catch", "effort"), AddInd = "B", SR = c("BH", "Ricker"), rescale = "mean1", start = NULL,
fix_h = TRUE, fix_sd = TRUE, fix_tau = TRUE, dep = 1, LWT = NULL,
integrate = FALSE, silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), inner.control = list(), ...) {
dependencies <- "Data@Cat, Data@Ind, Data@Mort, Data@L50, Data@vbK, Data@vbLinf, Data@vbt0, Data@wla, Data@wlb, Data@MaxAge"
dots <- list(...)
start <- lapply(start, eval, envir = environment())
condition <- match.arg(condition)
SR <- match.arg(SR)
Winf = Data@wla[x] * Data@vbLinf[x]^Data@wlb[x]
age <- 1:Data@MaxAge
la <- Data@vbLinf[x] * (1 - exp(-Data@vbK[x] * ((age - Data@vbt0[x]))))
wa <- Data@wla[x] * la^Data@wlb[x]
a50V <- iVB(Data@vbt0[x], Data@vbK[x], Data@vbLinf[x], Data@L50[x])
a50V <- max(a50V, 1)
if(any(names(dots) == "yind")) {
yind <- eval(dots$yind)
} else {
ystart <- which(!is.na(Data@Cat[x, ]))[1]
yind <- ystart:length(Data@Cat[x, ])
}
Year <- Data@Year[yind]
C_hist <- Data@Cat[x, yind]
Ind <- lapply(AddInd, Assess_I_hist, Data = Data, x = x, yind = yind)
I_hist <- do.call(cbind, lapply(Ind, getElement, "I_hist"))
I_sd <- do.call(cbind, lapply(Ind, getElement, "I_sd"))
I_units <- do.call(cbind, lapply(Ind, getElement, "I_units"))
if(is.null(I_hist)) stop("No indices found.", call. = FALSE)
nsurvey <- ncol(I_hist)
if(condition == "effort") {
if(nsurvey > 1) stop("Only one index time series can be used when conditioning on effort.", call. = FALSE)
E_hist <- C_hist/I_hist[, 1]
if(any(is.na(E_hist))) stop("Missing values in catch and index in Data object.")
E_rescale <- 1/mean(E_hist)
E_hist <- E_hist * E_rescale
} else {
E_hist <- rep(1, length(yind))
}
ny <- length(C_hist)
k <- ceiling(a50V) # get age nearest to 50% vulnerability (ascending limb)
k[k > Data@MaxAge/2] <- ceiling(Data@MaxAge/2) # to stop stupidly high estimates of age at 50% vulnerability
Rho <- (wa[k + 2] - Winf)/(wa[k + 1] - Winf)
Alpha <- Winf * (1 - Rho)
S0 <- exp(-Data@Mort[x]) # get So survival rate
wk <- wa[k]
if(rescale == "mean1") rescale <- 1/mean(C_hist)
if(dep <= 0 || dep > 1) stop("Initial depletion (dep) must be between > 0 and <= 1.")
if(is.null(LWT)) LWT <- rep(1, nsurvey)
if(length(LWT) != nsurvey) stop("LWT needs to be a vector of length ", nsurvey)
fix_sigma <- condition == "effort" | nsurvey > 1 | fix_sd
fix_omega <- condition == "catch" | fix_sd
data <- list(model = "DD", S0 = S0, Alpha = Alpha, Rho = Rho, ny = ny, k = k,
wk = wk, C_hist = C_hist, dep = dep, rescale = rescale, I_hist = I_hist, I_units = I_units, I_sd = I_sd,
E_hist = E_hist, SR_type = SR, condition = condition, I_lambda = LWT,
nsurvey = nsurvey, fix_sigma = as.integer(fix_sigma), state_space = as.integer(state_space))
LH <- list(LAA = la, WAA = wa, maxage = Data@MaxAge, A50 = k)
params <- list()
if(!is.null(start)) {
if(!is.null(start$R0) && is.numeric(start$R0)) params$R0x <- log(start$R0[1] * rescale)
if(!is.null(start$h) && is.numeric(start$h)) {
if(SR == "BH") {
h_start <- (start$h[1] - 0.2)/0.8
params$transformed_h <- logit(h_start)
} else {
params$transformed_h <- log(start$h[1] - 0.2)
}
}
if(!is.null(start$q_effort) && is.numeric(start$q_effort)) params$log_q_effort <- log(start$q_effort[1])
if(!is.null(start$U_equilibrium) && is.numeric(start$U_equilibrium)) params$U_equilibrium <- start$U_equililbrium
if(!is.null(start$omega) && is.numeric(start$omega)) params$log_omega <- log(start$omega[1])
if(!is.null(start$sigma) && is.numeric(start$sigma)) params$log_sigma <- log(start$sigma[1])
if(!is.null(start$tau) && is.numeric(start$tau)) params$log_tau <- log(start$tau[1])
}
if(is.null(params$R0x)) {
params$R0x <- ifelse(is.null(Data@OM$R0[x]), log(4 * mean(data$C_hist)), log(1.5 * rescale * Data@OM$R0[x]))
}
if(is.null(params$transformed_h)) {
h_start <- ifelse(is.na(Data@steep[x]), 0.9, Data@steep[x])
if(SR == "BH") {
h_start <- (h_start - 0.2)/0.8
params$transformed_h <- logit(h_start)
} else {
params$transformed_h <- log(h_start - 0.2)
}
}
if(is.null(params$log_q_effort)) params$log_q_effort <- log(1)
if(is.null(params$U_equilibrium)) params$U_equilibrium <- ifelse(dep < 1, 0.1, 0)
if(is.null(params$log_omega)) {
params$log_omega <- max(0.05, sdconv(1, Data@CV_Cat[x]), na.rm = TRUE) %>% log()
}
if(is.null(params$log_sigma)) params$log_sigma <- max(0.05, sdconv(1, Data@CV_Ind[x]), na.rm = TRUE) %>% log()
if(is.null(params$log_tau)) {
params$log_tau <- ifelse(is.na(Data@sigmaR[x]), 0.6, Data@sigmaR[x]) %>% log()
}
params$log_rec_dev = rep(0, ny - k)
info <- list(Year = Year, data = data, params = params, I_hist = I_hist, LH = LH,
rescale = rescale, control = control, inner.control = inner.control)
if(condition == "effort") info$E_rescale <- E_rescale
map <- list()
if(condition == "catch") map$log_q_effort <- factor(NA)
if(fix_h) map$transformed_h <- factor(NA)
if(dep == 1) map$U_equilibrium <- factor(NA)
if(fix_omega) map$log_omega <- factor(NA)
if(fix_sigma) map$log_sigma <- factor(NA)
if(fix_tau) map$log_tau <- factor(NA)
if(!state_space) map$log_rec_dev <- factor(rep(NA, ny - k))
random <- NULL
if(integrate) random <- "log_rec_dev"
obj <- MakeADFun(data = info$data, parameters = info$params, random = random,
map = map, hessian = TRUE, DLL = "MSEtool", inner.control = inner.control, silent = silent)
mod <- optimize_TMB_model(obj, control, opt_hess, n_restart)
opt <- mod[[1]]
SD <- mod[[2]]
report <- obj$report(obj$env$last.par.best)
Yearplusone <- c(Year, max(Year) + 1)
Yearplusk <- c(Year, max(Year) + 1:k)
if(condition == "catch") {
NLL_name <- paste0("Index_", 1:nsurvey)
} else {
NLL_name <- "Catch"
}
nll_report <- ifelse(is.character(opt), ifelse(integrate, NA, report$nll), opt$objective)
Assessment <- new("Assessment", Model = ifelse(state_space, "DD_SS", "DD_TMB"),
Name = Data@Name, conv = !is.character(SD) && SD$pdHess,
B0 = report$B0, R0 = report$R0, N0 = report$N0,
SSB0 = report$B0, VB0 = report$B0, h = report$h,
U = structure(report$U, names = Year),
B = structure(report$B, names = Yearplusone),
B_B0 = structure(report$B/report$B0, names = Yearplusone),
SSB = structure(report$B, names = Yearplusone),
SSB_SSB0 = structure(report$B/report$B0, names = Yearplusone),
VB = structure(report$B, names = Yearplusone),
VB_VB0 = structure(report$B/report$B0, names = Yearplusone),
R = structure(report$R, names = Yearplusk),
N = structure(report$N, names = Yearplusone),
Obs_Catch = structure(C_hist, names = Year),
Obs_Index = structure(I_hist, dimnames = list(Year, paste0("Index_", 1:nsurvey))),
Catch = structure(report$Cpred, names = Year),
Index = structure(report$Ipred, dimnames = list(Year, paste0("Index_", 1:nsurvey))),
NLL = structure(c(nll_report, report$nll_comp, report$prior, report$penalty),
names = c("Total", NLL_name, "Dev", "Prior", "Penalty")),
info = info, obj = obj, opt = opt, SD = SD, TMB_report = report,
dependencies = dependencies)
if(state_space) {
YearDev <- seq(Year[1] + k, max(Year))
Assessment@Dev <- structure(report$log_rec_dev, names = YearDev)
Assessment@Dev_type <- "log-Recruitment deviations"
}
if(Assessment@conv) {
ref_pt <- get_MSY_DD(info$data, report$Arec, report$Brec)
report <- c(report, ref_pt)
Assessment@UMSY <- report$UMSY
Assessment@MSY <- report$MSY
Assessment@BMSY <- Assessment@SSBMSY <- Assessment@VBMSY <- report$BMSY
Assessment@U_UMSY <- structure(report$U/report$UMSY, names = Year)
Assessment@B_BMSY <- Assessment@SSB_SSBMSY <- Assessment@VB_VBMSY <- structure(report$B/report$BMSY, names = Yearplusone)
Assessment@TMB_report <- report
if(state_space) {
if(integrate) {
SE_Dev <- sqrt(SD$diag.cov.random)
} else {
SE_Dev <- sqrt(diag(SD$cov.fixed)[names(SD$par.fixed) == "log_rec_dev"])
}
Assessment@SE_Dev <- structure(SE_Dev, names = YearDev)
}
}
return(Assessment)
}
get_MSY_DD <- function(TMB_data, Arec, Brec) {
S0 <- TMB_data$S0
Alpha <- TMB_data$Alpha
Rho <- TMB_data$Rho
wk <- TMB_data$wk
SR <- TMB_data$SR_type
solveMSY <- function(x) {
U <- ilogit(x)
SS <- S0 * (1 - U)
Spr <- (SS * Alpha/(1 - SS) + wk)/(1 - Rho * SS)
if(SR == "BH") Req <- (Arec * Spr - 1)/(Brec * Spr)
if(SR == "Ricker") Req <- log(Arec * Spr)/(Brec * Spr)
Beq <- Spr * Req
Yield <- U * Beq
return(-1 * Yield)
}
opt2 <- optimize(solveMSY, interval = c(-50, 6))
UMSY <- ilogit(opt2$minimum)
MSY <- -1 * opt2$objective
BMSY <- MSY/UMSY
return(list(UMSY = UMSY, MSY = MSY, BMSY = BMSY))
}
|
8c4fd9a0db42399628071bb1cd8c18cab808e5dd
|
6f29719e2a8eaeee1c0e424fa3a62a105eb4fc34
|
/run_analysis.R
|
922c81d19e658811a5841e2563f26251b34d5ebb
|
[] |
no_license
|
tengiao/GetCleanData_CourseProj
|
682509c9bdf90a1ba8071e48556a8d274be1cf49
|
bb3b1bfed188d9014bf9442799b8394119dc86d4
|
refs/heads/master
| 2020-05-27T04:19:12.844964
| 2015-04-26T23:10:15
| 2015-04-26T23:10:15
| 34,354,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,546
|
r
|
run_analysis.R
|
## Course: Getting and Cleaning Data
## Coursework: Peer Assessment / Course Project
#####################################################################
#### 1. Merges the training and the test sets to create one data set.
#####################################################################
## Piece together data on test subjects, activity labels and feature data.
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
testData <- X_test
testData$Activity <- Y_test[, 1]
testData$Subjects <- subject_test[, 1]
## Piece together data on train subjects, activity labels and feature data.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
trainData <- X_train
trainData$Activity <- Y_train[, 1]
trainData$Subjects <- subject_train[, 1]
## Merge test and training sets.
allData <- rbind(testData, trainData)
###############################################################################################
#### 2. Extracts only the measurements on the mean and standard deviation for each measurement.
###############################################################################################
features <- read.table("./UCI HAR Dataset/features.txt")
meanStdCols <- grep("mean()|std()", features[, 2]) # Returns a numeric vector of positions in features.
# Data frame containing activity data with non-descriptive variable names.
meanStdData <- data.frame(allData[, meanStdCols], "Subjects" = allData$Subjects, "Activity" = allData$Activity)
###############################################################################
#### 3. Uses descriptive activity names to name the activities in the data set.
###############################################################################
actLabels <- read.table("./UCI HAR Dataset/activity_labels.txt", colClasses = c("integer", "character"))
library(dplyr)
actLabels <- dplyr::rename(actLabels, Activity = V1, ActivityNames = V2) # If plyr is also loaded. Conflict between the same function "rename".
meanStdData$Activity <- actLabels$ActivityNames[match(meanStdData$Activity, actLabels$Activity)] # Match and replace with activity description.
##########################################################################
#### 4. Appropriately labels the data set with descriptive variable names.
##########################################################################
featCols <- features[, 2]
newVarNames <- as.character(featCols[meanStdCols]) # Get variable names of mean/sd features.
names(meanStdData) <- append(newVarNames, c("Subjects", "Activity")) # Assign new variable names for data set.
######################################################################################################################################################
#### 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
######################################################################################################################################################
subjects <- 1:length(unique(meanStdData$Subjects)) # List of subject names (in numbers).
activity <- sort(actLabels$ActivityNames) # List of activity names.
numofFeat <- length(newVarNames) # Number of mean/sd features.
# Instantiate new data frame for mean values.
meanStdData.mean <- NULL
# Subset data frame rows conditioned on each combination of "Subjects" and "Activity".
for (i in subjects) {
for (j in activity) {
# Subset dataset and exclude "Subjects" and "Activity" columns.
meanStdData.sub <- meanStdData[(meanStdData$Subjects == i & meanStdData$Activity == j), 1:numofFeat]
# Calculate column means and return a 79-elements vector.
colMeansVal <- as.data.frame(colMeans(meanStdData.sub))
# Add each vector of column means as rows to new data set.
newrow <- append(colMeansVal[, 1], c(i, j))
meanStdData.mean <- rbind(meanStdData.mean, as.vector(newrow))
}
}
meanStdData.mean <- as.data.frame(meanStdData.mean)
names(meanStdData.mean) <- names(meanStdData) # Add back variable names.
# Save table as meanStdDataAvrg.txt
write.table(meanStdData.mean, file = "meanStdDataAvrg.txt", row.names = FALSE)
|
d73f8175b1fc49c604aa5bb600fc73e31e6c9682
|
c49aa09f1f83ee8f8c9d1e716ae38381ed3fafca
|
/feature_selection/ex_8/sig_examine.R
|
9c0be0d95dde5241f5d3d4d358f001a14392d319
|
[] |
no_license
|
whtbowers/multiomics
|
de879d61f15aa718a18dc866b1e5ef3848e27c42
|
81dcedf2c491107005d184f93cb6318865d00e65
|
refs/heads/master
| 2020-04-11T03:25:40.635266
| 2018-09-24T08:51:06
| 2018-09-24T08:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 754
|
r
|
sig_examine.R
|
setwd("/home/whb17/Documents/project3/project_files/feature_selection/ex_9/")
tic()
step1s <- c('_BH', '_LFC')
sets <- c('prot', 'gene')
comps <- c('_tb_ltbi', '_tb_od')
for (set in sets){
for (comp in comps){
for (step1 in step1s){
sig.factors <- read.csv(paste("../../data/ex_9/feat_sel/", set, comp, step1, "_sig_factors.csv", sep=""), header=TRUE, row.names = 1)
sig.emn.factors <- read.csv(paste("../../data/ex_9/feat_sel/", set, comp, step1, "_EMN_sig_factors.csv", sep=""), header=TRUE, row.names = 1)
print(paste("Number of factors for ", set, " and ", comp, " with ", step1, ": ", nrow(sig.factors), ". After elastic net using lambda+1se as threshold: ", nrow(sig.emn.factors), sep=""))
}
}
}
toc()
|
a31b5998f44f8aa7ddd922b5cab3eed6bcf311ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TreeSearch/tests/test-tree-rearrange.R
|
4e4c1724ba9474e3afd661443c776106b8d7954d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,774
|
r
|
test-tree-rearrange.R
|
library(ape)
context("Arboriculture: Specified tree rearrangements")
tree5a <- read.tree(text='(a, (b, (c, (d, e))));')
tree5b <- read.tree(text='((a, b), (c, (d, e)));')
tree6 <- Preorder(read.tree(text="((a, (b, (c, d))), (e, f));"))
tree6b <- Preorder(read.tree(text="((a, (b, c)), (d, (e, f)));"))
tree8 <- read.tree(text="(((a, (b, (c, d))), (e, f)), (g, h));")
tree11 <- read.tree(text="((((a, b), (c, d)), e), ((f, (g, (h, i))), (j, k)));")
attr(tree5a, 'order') <- attr(tree5b, 'order') <- attr(tree8, 'order') <- attr(tree11, 'order') <- 'preorder'
test_that("NNI works", {
trComb <- read.tree(text = "(((((1,2),3),4),5),6);")
suppressWarnings(RNGversion("3.5.0")) # Until we can require R3.6.0
set.seed(0)
nniComb <- NNI(trComb)
expect_equal(nniComb$tip.label, trComb$tip.label)
expect_equal(nniComb$Nnode, trComb$Nnode)
expect_equal(nniComb, read.tree(text = "(((((3,2),1),4),5),6);"))
})
test_that("TBR can swap over root", {
expect_equal(TBR(tree5a, 1, c(7, 1)), read.tree(text='(a, (d, (e, (c, b))));'))
expect_equal(TBR(tree5a, 2, c(5, 1)), read.tree(text='(a, (c, (b, (d, e))));'))
expect_equal(TBR(tree5b, 1, c(7, 1)), read.tree(text='((a, b), (d, (c, e)));'))
expect_equal(TBR(tree5b, 4, c(7, 1)), read.tree(text='((a, b), (d, (c, e)));'))
})
test_that("TBR works", {
tree <- tree8
### expect_equal(TBR(tree, 3, 1 ), read.tree(text="((a, ((b, (c, d)), (e, f))), (g, h));"))
### expect_warning(expect_identical(TBR(tree, 3, 2), tree))
### expect_warning(expect_identical(TBR(tree, 3, 3), tree))
### expect_warning(expect_identical(TBR(tree, 3, 4), tree))
### expect_warning(expect_identical(TBR(tree, 3, 44), tree))
### expect_equal(TBR(tree, 3, 5 ), read.tree(text="((((a, b), (c, d)), (e, f)), (g, h));"))
### expect_equal(TBR(tree, 3, 6 ), read.tree(text="(((b, (a, (c, d))), (e, f)), (g, h));"))
### expect_equal(TBR(tree, 3, 7 ), read.tree(text="(((b, ((a, c), d)), (e, f)), (g, h));"))
### expect_equal(TBR(tree, 3, 8 ), read.tree(text="(((b, (c, (a, d))), (e, f)), (g, h));"))
### expect_equal(TBR(tree, 3, 9 ), read.tree(text="(((b, (c, d)), (a, (e, f))), (g, h));"))
### expect_equal(TBR(tree, 3, 10), read.tree(text="(((b, (c, d)), ((a, e), f)), (g, h));"))
### expect_equal(TBR(tree, 3, 11), read.tree(text="(((b, (c, d)), (e, (a, f))), (g, h));"))
### expect_equal(TBR(tree, 3, 12), read.tree(text="(((b, (c, d)), (e, f)), (a, (g, h)));"))
### expect_equal(TBR(tree, 3, 13), read.tree(text="(((b, (c, d)), (e, f)), ((g, a), h));"))
### expect_equal(TBR(tree, 3, 14), read.tree(text="(((b, (c, d)), (e, f)), (g, (a, h)));"))
tree <- tree8
expect_equal(TBR(tree, 6, c(1 , 6)), read.tree(text="((((a, b), (e, f)), (c, d)), (g, h));"))
expect_equal(TBR(tree, 6, c(1 , 7)), read.tree(text="((((a, b), (e, f)), (c, d)), (g, h));"))
expect_equal(TBR(tree, 6, c(1 , 8)), read.tree(text="((((a, b), (e, f)), (c, d)), (g, h));"))
expect_equal(TBR(tree, 6, c(2 , 6)), TBR(tree, 6, c(2 , 7)))
expect_equal(TBR(tree, 6, c(2 , 6)), TBR(tree, 6, c(2 , 8)))
expect_equal(TBR(tree, 6, c(2 , 6)), read.tree(text="((((a, b), (c, d)), (e, f)), (g, h));"))
expect_equal(TBR(tree, 6, c(3 , 6)), read.tree(text="(((((c, d), a), b), (e, f)), (g, h));"))
expect_warning(expect_identical(TBR(tree, 6, c(4 , 6)), tree))
expect_warning(expect_identical(TBR(tree, 8, c(6 , 8)), tree))
expect_warning(expect_identical(TBR(tree, 6, c(5 , 6)), tree))
expect_warning(expect_identical(TBR(tree, 6, c(6 , 6)), tree))
expect_warning(expect_identical(TBR(tree, 6, c(6 , 7)), tree))
expect_warning(expect_identical(TBR(tree, 6, c(6 , 8)), tree))
expect_equal(TBR(tree, 6, c(9 , 6)), read.tree(text="(((a, b), ((c, d), (e, f))), (g, h));"))
expect_equal(TBR(tree, 6, c(10, 6)), read.tree(text="(((a, b), (((c, d), e), f)), (g, h));"))
expect_equal(TBR(tree, 6, c(11, 6)), read.tree(text="(((a, b), (((c, d), f), e)), (g, h));"))
expect_equal(TBR(tree, 6, c(12, 6)), read.tree(text="(((a, b), (e, f)), ((c, d), (g, h)));"))
expect_equal(TBR(tree, 6, c(13, 6)), read.tree(text="(((a, b), (e, f)), (((c, d), g), h));"))
expect_equal(TBR(tree, 6, c(14, 6)), read.tree(text="(((a, b), (e, f)), (((c, d), h), g));"))
expect_warning(expect_identical(TBR(tree, 6, c(6, 15)), tree))
expect_equal(TBR(tree, 4, c(1, 5)), read.tree(text="(((a, (e, f)), (b, (c, d))), (g, h));"))
expect_equal(TBR(tree, 4, c(1, 6)), read.tree(text="(((a, (e, f)), (b, (c, d))), (g, h));"))
expect_equal(TBR(tree, 4, c(1, 7)), read.tree(text="(((a, (e, f)), (c, (b, d))), (g, h));"))
expect_equal(TBR(tree, 4, c(1, 8)), read.tree(text="(((a, (e, f)), (d, (b, c))), (g, h));"))
tree <- tree11
tree$edge.length = rep(1, 20)
expect_equal(TBR(tree11, 11, c(8, 17)), read.tree(text='((j, k), (e, ((a, b), (c, (d, (i, (h, (g, f))))))));'))
expect_equal(TBR(tree11, 11, c(2, 11)), read.tree(text='((j, k), (e, (((a, b), (c, d)), (f, (g, (i, h))))));'))
expect_warning(TBR(tree11, 10, c(2, 11)))
expect_equal(TBR(tree11, 10, c(3, 11)), read.tree(text='(e, ((c, d), ((a, b), ((j, k), (f, (g, (h, i)))))));'))
})
test_that("RootedTBR fails", {
# tree8 <- read.tree(text="(((a, (b, (c, d))), (e, f)), (g, h));")
# tree11 <- read.tree(text="((((a, b), (c, d)), e), ((f, (g, (h, i))), (j, k)));")
expect_equal(TBR(tree8, 4, c(3, 7)), RootedTBR(tree8, 4, c(3, 7)))
expect_equal(TBR(tree8, 4, c(1, 5)), RootedTBR(tree8, 4, c(1, 5)))
expect_warning(RootedTBR(tree5a, edgeToBreak = 1))
expect_warning(RootedTBR(tree5a, edgeToBreak = 2))
expect_equal(RootedTBR(tree5a, edgeToBreak = 3, mergeEdges=6), read.tree(text='(a, (c, (b, (d, e))));'))
expect_silent(replicate(100, RootedTBR(tree5a)))
expect_warning(RootedTBR(tree8, 4, c(13, 6)))
expect_warning(RootedTBR(read.tree(text='((a, b), (c, d));')))
})
test_that("RootedSPR fails", {
expect_warning(RootedSPR(read.tree(text='((a, b), (c, d));')))
expect_warning(RootedSPR(tree8, edgeToBreak=1))
expect_warning(RootedSPR(tree8, edgeToBreak=13))
expect_warning(RootedSPR(tree8, edgeToBreak=14))
warnTree1 <- read.tree(text='((a, (b, (c, d))), (e, (f, (g, h))));')
warnTree2 <- read.tree(text='((a, (b, (c, d))), (((e, f), g), h));')
attr(warnTree1, 'order') <- attr(warnTree2, 'order') <- 'preorder'
expect_warning(RootedSPR(warnTree1, 3))
expect_warning(RootedSPR(warnTree1, 10))
expect_warning(RootedSPR(warnTree2, 9))
expect_warning(RootedSPR(warnTree2, 8))
})
test_that("SPR is special case of TBR", {
#library(devtools); library(testthat); library(ape); load_all()
Plot <- function (x) {plot(x); nodelabels(cex=0.8); edgelabels()}
expect_equal(SPR(tree11, 3, 9), TBR(tree11, 3, c(3, 9)))
expect_equal(SPR(tree11, 12, 9), TBR(tree11, 12, c(12, 9)))
expect_equal(root(SPR(tree11, 1, 14), letters[1:5], resolve.root=TRUE), TBR(tree11, 1, c(1, 14)))
expect_error(SPR(tree11, 1, 6))
})
test_that("TBR move lister works", {
edge <- tree6$edge
parent <- edge[, 1]
child <- edge[, 2]
moves <- TBRMoves(parent, child)
expect_equal(rep(2:10, c(7, 5, 6, 4, 6, 6, 4, 6, 6)), moves[, 1])
expect_equal(c(4:10, 6:10, 2, 6:10, 2, 8:10,
rep(c(2:4, 8:10), 2), 4:7, 2:7, 2:7), moves[, 2])
rootedMoves <- TBRMoves(parent, child, retainRoot=TRUE)
expect_equal(matrix(c(2,4, 2,5, 2,6, 2,7,
3,6, 3,7,
4,2, 4,6, 4,7,
5,2,
6,2, 6,3, 6,4,
7,2, 7,3, 7,4), ncol=2, byrow=TRUE), rootedMoves)
edge <- tree6b$edge
parent <- edge[, 1]
child <- edge[, 2]
rootedMoves <- TBRMoves(parent, child, retainRoot=TRUE)
expect_equal(matrix(c(2,2,4,5,7,7 ,9,10,
4,5,2,2,9,10,7,7 ), ncol=2), rootedMoves)
expect_equal(length(AllTBR(parent, child, retainRoot=TRUE)), 4)
})
test_that("CollapseNodes works", {
expect_error(CollapseNode(1:5, tree8))
expect_error(CollapseNode(tree8, 1))
suppressWarnings(RNGversion("3.5.0")) # Until we can require R3.6.0
set.seed(1)
tree <- rtree(7)
expect_equal(tree, CollapseNode(tree, integer(0)))
no1213 <- CollapseNode(tree, c(12, 13))
expect_equal(no1213$edge, matrix(c(8, 9, 9, 8, 10, 11, 11, 10, 10, 10,
9, 1, 2, 10, 11, 3:7), ncol=2))
el <- tree$edge.length
expect_equal(no1213$edge.length, c(el[1:7], el[8] + c(c(el[9] + el[10:11]), el[12])))
no11 <- CollapseEdge(tree, 5L)
expect_equal(no11$edge, matrix(c(8, 9, 9, 8, 10, 10, 10, 11, 12, 12, 11,
9, 1, 2, 10, 3, 4, 11, 12, 5:7), ncol=2))
})
|
ca256545a0324538dcb3f4ad55b13c576ae0d36c
|
2b4051506c86c25d511e522a9c1627ff1735ba7a
|
/R/step_opls.R
|
42126552e6957d2027909f001d55e95a04819c60
|
[
"BSD-3-Clause"
] |
permissive
|
CVUA-RRW/tidySpectR
|
5b3f8d4d03b321d4d54339a802d09424b1262930
|
2a10beae84c34bf37ba265fdfb76ceb7eb263654
|
refs/heads/main
| 2023-04-03T10:03:26.964521
| 2021-04-09T13:12:52
| 2021-04-09T13:12:52
| 331,905,727
| 0
| 1
|
BSD-3-Clause
| 2021-01-26T15:45:45
| 2021-01-22T10:00:48
|
R
|
UTF-8
|
R
| false
| false
| 7,145
|
r
|
step_opls.R
|
#' Applies OPLS noise filtering on numeric data
#'
#' `step_opls_denoise` creates a 'specification' of a recipe
#' step that will filter the first orthogonal component of the OPLS
#' transfomation on the columns.
#'
#' @param recipe A recipe object. The step will be added to the
#' sequence of operations for this recipe.
#' @param ... One or more selector functions to choose which
#' variables are affected by the step. See [selections()]
#' for more details. For the `tidy` method, these are not
#' currently used.
#' @param role Not used by this step since no new variables are
#' created.
#' @param trained A logical to indicate if the quantities for
#' preprocessing have been estimated.
#' @param outcome When a single outcome is available, character
#' string or call to [dplyr::vars()] can be used to specify a single outcome
#' variable.
#' @param Wortho A vector a weights for the first orthogonal component. This is
#' `NULL` until computed by [prep.recipe()].
#' @param Portho A vector of loadings for the first orthogonal component. This is
#' `NULL` until computed by [prep.recipe()].
#' @param skip A logical. Should the step be skipped when the
#' recipe is baked by [bake.recipe()]? While all operations are baked
#' when [prep.recipe()] is run, some operations may not be able to be
#' conducted on new data (e.g. processing the outcome variable(s)).
#' Care should be taken when using `skip = TRUE` as it may affect
#' the computations for subsequent operations
#' @param id A character string that is unique to this step to identify it.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected), `value` (the
#' standard deviations and means), and `statistic` for the type of value.
#'
#' @importFrom recipes add_step rand_id ellipse_check step bake prep
#' @importFrom recipes printer terms_select check_type is_trained sel2char
#' @importFrom tibble tibble as_tibble
#' @importFrom generics tidy required_pkgs
#'
#' @export
#' @details
#' Orthogonal Projection to Latent Structurees (OPLS) allows the separation
#' of the predictor variations that are correlated and orthogonal to the response.
#' This allows to remove systematic variation that are not correlated to the response.
#'
#' The OPLS algorithm is implemented only for binary outcomes!
#'
#' OPLS calculation uses the implementation of the R package:
#' \url{https://bioconductor.org/packages/release/bioc/html/ropls.html}
#'
#'
#' @references
#' Trygg, J., & Wold, S. (2002). Orthogonal projections to latent structures
#' (O-PLS). Journal of Chemometrics, 16(3), 119–128. doi:10.1002/cem.695
#' \url{https://onlinelibrary.wiley.com/doi/abs/10.1002/cem.695}
#'
#' Thévenot, E. A., Roux, A., Xu, Y., Ezan, E., & Junot, C. (2015). Analysis
#' of the Human Adult Urinary Metabolome Variations with Age, Body Mass Index,
#' and Gender by Implementing a Comprehensive Workflow for Univariate and OPLS
#' Statistical Analyses. Journal of Proteome Research, 14(8), 3322–3335.
#' doi:10.1021/acs.jproteome.5b00354
#' \url{https://pubs.acs.org/doi/10.1021/acs.jproteome.5b00354}
#'
#' @examples
#' library(ropls)
#' library(tidymodels)
#' library(tidySpectR)
#'
#' data(sacurine)
#' attach(sacurine)
#'
#' genderFc <- sampleMetadata[, "gender"]
#'
#' urinedata <- dataMatrix %>%
#' cbind(genderFc) %>%
#' as_tibble() %>%
#' add_column(id = rownames(dataMatrix), .before = 1) %>%
#' select(-id)
#'
#' rec <- recipe(urinedata, genderFc ~.) %>%
#' step_normalize(all_predictors()) %>%
#' step_opls_denoise(all_predictors(), outcome = "genderFc")
#' tidy(rec)
#' rec %>% prep() %>% bake(NULL)
step_opls_denoise <-
function(recipe,
...,
role = NA,
trained = FALSE,
outcome = NULL,
Wortho = NULL,
Portho = NULL,
skip = FALSE,
id = rand_id("opls_denoise")){
if (is.null(outcome)) {
rlang::abort("`outcome` should select one column.")
}
terms = ellipse_check(...)
add_step(
recipe,
step_opls_denoise_new(
terms = terms,
role = role,
trained = trained,
outcome = outcome,
Wortho = Wortho,
Portho = Portho,
skip = skip,
id = id
)
)
}
step_opls_denoise_new <-
function(terms, role, trained, outcome, Wortho, Portho, skip, id){
step(
subclass = "opls_denoise",
terms = terms,
role = role,
trained = trained,
outcome = outcome,
Wortho = Wortho,
Portho = Portho,
skip = skip,
id = id
)
}
#' @importFrom ropls opls getWeightMN getLoadingMN
#' @importFrom dplyr select
#' @importFrom utils capture.output
#' @export
prep.step_opls_denoise <- function(x, training, info = NULL, ...){
col_names <- terms_select(x$terms, info)
check_type(training[, col_names])
predictors <- training[, col_names]
outcomes <- select(training, x$outcome) %>% as.matrix()
invisible(
capture.output(
model <- opls(predictors, outcomes, predI = 1, orthoI = 1)
)
)
Wortho <- getWeightMN(model, orthoL = TRUE)
Portho <- getLoadingMN(model, orthoL = TRUE)
step_opls_denoise_new(
terms = x$terms,
role = x$role,
trained = TRUE,
outcome = x$outcome,
Wortho = Wortho,
Portho = Portho,
skip = x$skip,
id = x$identify
)
}
#' @importFrom dplyr bind_cols
#' @importFrom tibble as_tibble
#' @export
bake.step_opls_denoise <- function(object, new_data, ...){
opls_vars <- rownames(object$Wortho)
dat <- new_data[, opls_vars] %>%
as.matrix()
# Calculate new scores and remove noise
Tortho <- dat %*% object$Wortho
res <- dat - Tortho %*% t(object$Portho)
# Update data
new_data <- new_data[, !(colnames(new_data) %in% opls_vars), drop = FALSE]
new_data <- bind_cols(new_data, as_tibble(res))
as_tibble(new_data)
}
#' @export
print.step_opls_denoise <-
function(x, width = max(20, options()$width - 30), ...) {
cat("OPLS denoising for ", sep = "")
printer(rownames(x$Wortho), x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_opls_denoise
#' @param x A `step_opls_denoise` object.
#' @export
tidy.step_opls_denoise <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = c(rownames(x$Wortho)),
statistic = rep(c("orthogonal weigths", "orthogonal loadings"), each = length(x$Wortho)),
value = c(x$Wortho, x$Portho))
} else {
term_names <- sel2char(x$terms)
res <- tibble(terms = term_names,
statistic = rlang::na_chr,
value = rlang::na_dbl)
}
res$id <- x$id
res
}
#' @rdname required_pkgs.step
#' @export
required_pkgs.step_opls_denoise <- function(x, ...) {
c("tidySpectR")
}
|
a0e941445bcd0293c1f6ae7654900b64acc34be5
|
ddd3c938592e80c2c7712fc5577ae6d280365fd0
|
/logoplotFastqs/man/consensusMatrix_function.Rd
|
3c8f5d83c0d7cde3f9bd12140cf94ffd7b3b9db4
|
[
"MIT"
] |
permissive
|
jjlinscientist/logoplot_fastqs
|
90d7b4fb4f0b6c290029b05e6413f530a3dccbbc
|
f767e3511c39eee3899ed2559cae243f72a0e4eb
|
refs/heads/master
| 2022-12-07T07:25:19.240480
| 2020-09-04T07:23:50
| 2020-09-04T07:23:50
| 261,117,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 866
|
rd
|
consensusMatrix_function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logoplot_functions.R
\name{consensusMatrix_function}
\alias{consensusMatrix_function}
\title{Wrapper around consensusMatrix()}
\usage{
consensusMatrix_function(file, right_align = FALSE)
}
\arguments{
\item{file}{A fastq file}
\item{right_align}{Logical statement to 'right justify' all sequences found in the fastq file. defaults to FALSE}
}
\value{
Returns a matrix of DNA base frequencies
}
\description{
Wrapper around consensusMatrix() from the ShortRead package to import sequences from a fastq file and (optionally) align them at the 5' (left) or 3' (right) DNA base
depends on packages Biostrings, data.table, ShortRead
}
\examples{
consensusMatrix_function('path/to/file.fastq', right_align = TRUE)
}
\author{
Justin Lin, \email{justin.jnn.lin@gmail.com}
}
\keyword{utilities}
|
dac9e16c322a0f54c20cad53b91be75daf6b3474
|
2d382240f37086ffa606944acb1e99eabfb9a5ce
|
/run_analysis.R
|
6bac1219cd717cf464b9a1e0e0f96f861a2c08a3
|
[] |
no_license
|
chennycool/cleandata
|
1d6ff16920ca6cb9e2a4b91d157ff8917a1eb6ab
|
0665729fe1ab0d9213c9ae6a55cc3110d24076de
|
refs/heads/master
| 2021-01-10T13:37:26.744143
| 2015-09-27T21:45:40
| 2015-09-27T21:45:40
| 43,264,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,862
|
r
|
run_analysis.R
|
## First go to the proper working directory (setwd())
## Download the file in folder "cleandata"
## It is a .zip file, give a filename as "Dataset"
## in my Windows OS, method="curl" not working,
## download.file method should choose "auto" while mode should choose "wb"
if(!file.exists("./cleandata")){dir.create("./cleandata")
setwd("./cleandata")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="Dataset.zip",method="auto", mode="wb")
## Unzip the file
unzip("Dataset.zip")
## Now get a folder "UCI HAR Dataset"
setwd("./UCI HAR Dataset")
## You should create one R script called run_analysis.R that does the following.
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean
# and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second,
# independent tidy data set with the average of each variable
# for each activity and each subject.
## Read the readme.txt for details
## Read data from the files
test_set<- read.table("./test/X_test.txt")
test_label<- read.table("./test/Y_test.txt")
train_set<- read.table("./train/X_train.txt")
train_label<- read.table("./train/Y_train.txt")
test_subject<- read.table("./test/subject_test.txt")
train_subject<- read.table("./train/subject_train.txt")
feature_name<- read.table ("features.txt")
activity_label<- read.table ("activity_labels.txt")
## Check these varibles
str(test_set)
str(test_label)
str(train_set)
str(train_label)
str(test_subject)
str(train_subject)
str(feature_name)
str(activity_label)
## Merge train and test sets
merge_set<- rbind (train_set, test_set)
merge_label<- rbind (train_label, test_label)
merge_subject<- rbind (train_subject, test_subject)
## Name and merge all varibles
names(merge_set)<- feature_name$V2
names(merge_label)<- c("Activity_ID")
names(merge_subject)<- c("Subject")
all_data<- cbind (merge_subject, merge_label, merge_set)
## Select only "mean()" of "std()" in feature names
MoS_feature<- feature_name$V2[grep("mean\\(\\)|std\\(\\)", feature_name$V2)]
## subset data by MoS
sub_col<- c("Subject", "Activity_ID", as.character(MoS_feature))
MoS_data<- subset(all_data, select=sub_col)
## Use descriptive activity name
names(activity_label)<- c("Activity_ID", "Activity")
MoS_data<- merge (MoS_data, activity_label, by="Activity_ID")
str(MoS_data)
## Appropriately labels the data set with descriptive variable names:
## remove ()
names(MoS_data)<- gsub("\\()", "", names(MoS_data))
## Capital M in mean
names(MoS_data)<- gsub("mean", "Mean", names(MoS_data))
## Capital S in std
names(MoS_data)<- gsub("std", "Std", names(MoS_data))
## ^t is replaced by time
names(MoS_data)<- gsub("^t", "time", names(MoS_data))
## ^f is replaced by frequency
names(MoS_data)<- gsub("^f", "frequency", names(MoS_data))
## Acc is replaced by Accelerometer
names(MoS_data)<- gsub("Acc", "Accelerometer", names(MoS_data))
## Gyro is replaced by Gyroscope
names(MoS_data)<- gsub("Gyro", "Gyroscope", names(MoS_data))
## Mag is replaced by Magnitude
names(MoS_data)<- gsub("Mag", "Magnitude", names(MoS_data))
## BodyBody is replaced by Body
names(MoS_data)<- gsub("BodyBody", "Body", names(MoS_data))
## remove -
names(MoS_data)<- gsub("-", "", names(MoS_data))
##check names
names(MoS_data)
## creates a second, independent tidy data set
# with the average of each variable for each activity and each subject
tidy_data<- aggregate(. ~Subject + Activity, MoS_data, mean)
tidy_data<- tidy_data[order(tidy_data$Subject, tidy_data$Activity_ID), ]
setwd("..")
write.table(tidy_data, file="tidydata.txt", row.name=FALSE)
|
128b05fda387a9ea575e7e488f9a2a364fe89f8a
|
0f5ac1a14815e057f2032ef0c56e6b147962bcc6
|
/helpers.R
|
5071351ec00940501477e68e12fe3fcd14ca6754
|
[] |
no_license
|
fdabl/Covid-Forecast
|
6fa7561c69d3131199f0997e7ce14b7363cee0fe
|
c829fca997f08e84bc5738622b846a7bafab4b03
|
refs/heads/master
| 2022-12-18T21:05:13.643754
| 2020-09-28T10:28:35
| 2020-09-28T10:28:35
| 256,978,009
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,482
|
r
|
helpers.R
|
library('jsonlite')
library('reticulate')
library('RColorBrewer')
uni2norm <- function(p) {
c((p[1] + p[2]) / 2, (p[2] - p[1]) / sqrt(12))
}
norm2uni <- function(mean, sd = 0.10) {
p2 <- sd * sqrt(12) / 2 + mean
p1 <- 2 * mean - p2
c(p1, p2)
}
ALPHAS <- list(
c(0.1,0.6), c(0.3,0.90), c(0.6,0.95),
c(0.6,0.95), c(0.6,0.95), c(0.6,0.95), c(0.5,0.70)
)
ALPHAS <- lapply(ALPHAS, uni2norm)
DAYALPHAS <- c(8, 15, 20, 23, 28, 32, 72)
# Add alphas
todate <- Sys.Date()
startdate <- as.Date('3/1/20', tryFormats = '%m/%d/%y')
total_days <- as.numeric(todate - startdate)
last_day <- DAYALPHAS[length(DAYALPHAS)]
additional_days <- round(seq(last_day + 60, total_days - 15, length.out = 8))
additional_alphas <- lapply(seq(length(additional_days)), function(i) c(0.75, 0.075))
# The third to last date is doubled
ALPHAS <- c(ALPHAS, additional_alphas)
DAYALPHAS <- c(DAYALPHAS, additional_days)
LINESIZE <- 0.95
INTERVENTION_COLOR <- '#ADADAD'
use_python('/usr/bin/python3')
# Setup Python Environment
# system('apt-get install python3-tk')
# # For Shinyapps.io
# virtualenv_create(envname = 'python_env', python = 'python3')
# virtualenv_remove(envname = "python_env", packages = "pip")
# print(py_discover_config())
# virtualenv_install(
# 'python_env',
# packages = c('numpy==1.18.5', 'h5py', 'scipy==1.4.1', 'tqdm', 'requests', 'lxml', 'selenium')
# )
source_python('bin/dashboard_wrapper.py')
# config <- fromJSON('Covid-SEIR/configs/netherlands_dashboard.json')
# config <- fromJSON('config.json')
# config$single_run <- TRUE
# res <- run_dashboard_wrapper(toJSON(config, auto_unbox = TRUE))
plot_predictions <- function(
config, model, type, cols, ylab, title,
show_intervention = FALSE
) {
res <- model$data
dat <- res[[type]]
colnames(dat) <- c('Time', 'Mean', 'p5', 'p30', 'p50', 'p70', 'p95', 'Observed')
dat <- data.frame(dat)
start <- as.Date(config[['startdate']], tryFormats = '%m/%d/%y')
dat$Date <- start + dat$Time - 1
p <- ggplot(dat, aes(x = Date, y = p50)) +
geom_ribbon(aes(ymin = p5, ymax = p95, fill = '90% CI'), alpha = 0.50) +
geom_ribbon(aes(ymin = p30, ymax = p70, fill = '40% CI'), alpha = 0.75) +
geom_point(aes(y = Observed), color = 'gray30', size = 0.50) +
geom_line(aes(y = p50, color = 'Median'), size = LINESIZE) +
ggtitle(title) +
ylab(ylab) +
scale_colour_manual(
name = '',
values = c('Median' = cols[2]),
labels = c('Median')
) +
scale_fill_manual(
name = '',
values = c('90% CI' = cols[1], '40% CI' = cols[2])
) +
theme_bw() +
theme(
legend.position = 'top',
legend.text = element_text(size = 10),
plot.title = element_text(size = 16, hjust = 0.50),
axis.text = element_text(size = 12),
axis.title = element_text(size = 14)
)
if (show_intervention) {
# Make the plotting of the intervention line work for infected
if (type == 'infected') {
type <- 'infected_cum'
}
preddat <- model$single_data[[type]]
dat$Mean <- preddat[, 2]
p <- p +
geom_line(data = dat, aes(x = Date, y = Mean, color = 'Intervention'), size = LINESIZE) +
geom_ribbon(
data = dat,
aes(ymin = Mean * (1 - (p50 - p5) / p50), ymax = Mean * (1 + (p95 - p50) / p50)),
alpha = 0.35, fill = INTERVENTION_COLOR
) +
scale_colour_manual(
name = '',
values = c('Intervention' = INTERVENTION_COLOR, 'Median' = cols[2]),
labels = c('Intervention', 'Median')
)
}
upper_xlim <- startdate + 8 * 30
upper_ylim <- with(dat, {
xlim <- as.numeric(upper_xlim) - as.numeric(startdate)
max(
c(
max(p95[seq(xlim)], na.rm = TRUE),
max((Mean * (1 + (p95 - p50) / p50))[seq(xlim)], na.rm = TRUE)
)
)
})
p <- p + guides(
color = guide_legend(order = 1),
fill = guide_legend(order = 2)
) +
scale_y_continuous(
limits = c(0, upper_ylim),
breaks = scales::pretty_breaks(n = 5)
) +
scale_x_date(
limits = c(startdate, upper_xlim),
breaks = scales::pretty_breaks(n = 10)
)
# saveRDS(p, 'ggsaved.RDS')
p
}
plot_interventions <- function(config, model, cols, ylab, title, show_intervention = FALSE) {
res <- model$data
alpha <- res[['alpha']][['posterior']]
sanitize <- function(x) {
y <- x
y[y < 0] <- 0
y[y > 1] <- 1
y
}
dat <- cbind(alpha[, 1], alpha[, -1])
colnames(dat) <- c('Time', 'p5', 'p30', 'p50', 'p70', 'p95')
dat <- data.frame(dat)
dat$p5 <- sanitize(dat$p5)
dat$p30 <- sanitize(dat$p30)
dat$p50 <- sanitize(dat$p50)
dat$p70 <- sanitize(dat$p70)
start <- as.Date(config[['startdate']], tryFormats = '%m/%d/%y')
dat$Date <- start + dat$Time - 1
p <- ggplot(dat, aes(x = Date, y = p50 * 100)) +
geom_ribbon(aes(ymin = p5 * 100, ymax = p95 * 100, fill = '90% CI'), alpha = 0.50) +
geom_ribbon(aes(ymin = p30 * 100, ymax = p70 * 100, fill = '40% CI'), alpha = 0.75) +
geom_line(aes(y = p50 * 100, color = 'Median'), size = LINESIZE) +
ggtitle(title) +
ylab(ylab) +
scale_colour_manual(
name = '',
values = c('Median' = cols[2]),
labels = c('Median')
) +
scale_fill_manual(
name = '',
values = c('90% CI' = cols[1], '40% CI' = cols[2])
) +
theme_bw() +
theme(
legend.position = 'top',
legend.text = element_text(size = 10),
plot.title = element_text(size = 16, hjust = 0.50),
axis.text = element_text(size = 12),
axis.title = element_text(size = 14)
)
if (show_intervention) {
preddat <- model$single_data[['alpha']]
# preddat <- data.frame('Time' = preddat[, 1], 'Mean' = preddat[, 2])
# preddat$Date <- start + preddat$Time - 1
dat$Mean <- sanitize(1 - preddat[, 2])
p <- p +
geom_line(data = dat, aes(x = Date, y = Mean * 100, color = 'Intervention')) +
geom_ribbon(
data = dat,
aes(ymin = sanitize(Mean * (1 - (p50 - p5) / p50)) * 100,
ymax = sanitize(Mean * (1 + (p95 - p50) / p50)) * 100
),
alpha = 0.35, fill = INTERVENTION_COLOR, size = LINESIZE
) +
scale_colour_manual(
name = '',
values = c('Intervention' = INTERVENTION_COLOR, 'Median' = cols[2]),
labels = c('Intervention', 'Median')
)
}
p + guides(
colour = guide_legend(order = 1),
fill = guide_legend(order = 2)
) +
scale_y_continuous(
breaks = seq(0, 100, 20)
) +
scale_x_date(
limits = c(startdate, startdate + 9 * 30),
breaks = scales::pretty_breaks(n = 10)
)
}
plot_all <- function(data, model, has_intervened) {
p1 <- plot_predictions(
data, model, 'infected', c('#FFE4E1', '#F08080'),
'Confirmed Cases', 'Cumulative Confirmed Cases', has_intervened
)
p2 <- plot_predictions(
data, model, 'hospitalizedcum', c('#B0E0E6', '#4682B4'),
'Hospitalized Cases', 'Cumulative Hospitalized Cases', has_intervened
)
p3 <- plot_predictions(
data, model, 'ICU', c('#FFDAB9', '#F4A460'),
'Intensive Care Cases', 'Intensive Care Cases', has_intervened
)
p4 <- plot_predictions(
data, model, 'dead', c('#C0C0C0', '#808080'),
'Mortalities', 'Cumulative Mortalities', has_intervened
)
gridExtra::grid.arrange(p1, p2, p3, p4, nrow = 2, ncol = 2)
}
add_alphas <- function(alphas, dayalphas) {
alphas_new <- list()
dayalphas_new <- c()
n <- length(alphas)
for (i in seq(n-1)) {
a1 <- alphas[[i]]
a2 <- alphas[[i+1]]
to_add <- list(c(mean(c(a1[1], a2[1])), mean(c(a1[2], a2[2]))))
alphas_new <- c(alphas_new, list(a1), to_add)
d1 <- dayalphas[i]
d2 <- dayalphas[i+1]
dayalphas_new <- c(dayalphas_new, d1, round(mean(c(d1, d2))))
}
list(alphas_new, dayalphas_new)
}
create_config <- function(input, posterior_alphas = NULL, single_run = FALSE) {
# If user shows the alphas, use the alpha input
# Otherwise use the global variables (defined above)
# The global variables are also what is shown as default input
if (input$show_alpha) {
nr_int <- seq(input$nr_interventions)
alpha_mean_prior <- paste0('alpha_mean_', nr_int)
alpha_sd_prior <- paste0('alpha_sd_', nr_int)
dayalphas_prior <- paste0('day_', nr_int)
ALPHAS <- lapply(seq(nr_int), function(i) {
# Since input is in %, not in proportions
c(input[[alpha_mean_prior[i]]], input[[alpha_sd_prior[i]]]) / 100
})
startdate <- as.Date('3/1/20', tryFormats = '%m/%d/%y')
DAYALPHAS <- sapply(dayalphas_prior, function(day) {
input[[day]] - startdate
})
}
# If the user has intervened (single_run = TRUE), add the intervention alphas and
# the days on which the intervention took place to ALPHAS and DAYALPHAS
if (single_run) {
nr_int <- seq(input$nr_interventions_forecast)
alphas_inter <- paste0('alpha_intervention_', nr_int)
dayalphas_inter <- paste0('day_intervention_', nr_int)
ALPHAS_INTER <- lapply(alphas_inter, function(alpha) {
c(1 - input[[alpha]], 0.10)
})
DAYALPHAS_INTER <- sapply(dayalphas_inter, function(day) input[[day]])
startdate <- as.Date('3/1/20', tryFormats = '%m/%d/%y')
# Add intervention alphas
# ALPHAS <- c(posterior_alphas, ALPHAS_INTER)
# DAYALPHAS <- c(DAYALPHAS, DAYALPHAS_INTER - as.numeric(startdate))
# ALPHAS <- posterior_alphas
}
print(ALPHAS)
print(DAYALPHAS)
json <- list(
'worldfile' = FALSE,
'country' = 'res/corona_dataNL_main.txt',
'dt' = 0.1,
't_max' = 360,
'startdate' = '3/1/20',
'time_delay' = 12,
'population' = 17e6,
'nr_prior_samples' = 100,
'nr_forecast_samples' = 1500,
'esmda_iterations' = input$esmda_iterations,
'N' = list(
'type' = 'uniform',
'min' = 20000,
'max' = 80000
),
'sigma' = 0.20,
'gamma' = 0.50,
'R0' = list(
'type' = 'normal',
'mean' = input$R0_mean,
'stddev' = input$R0_sd
),
'm' = 0.9,
'delayHOS' = list(
'type' = 'normal',
'mean'= input$delayHOS_mean,
'stddev'= input$delayHOS_sd,
'smooth_sd'= input$delayHOS_xi,
'smooth_sd_sd'= 0
),
'delayHOSREC' = list(
'type' = 'normal',
'mean' = input$delayHOS_mean,
'stddev' = input$delayHOSREC_sd,
'smooth_sd'= input$delayHOSREC_xi,
'smooth_sd_sd'= 0
),
'delayHOSD' = list(
'type' = 'normal',
'mean' = input$delayHOSD_mean,
'stddev'= input$delayHOSD_sd,
'smooth_sd' = input$delayHOSD_xi,
'smooth_sd_sd' = 0
),
'delayREC' = 12, #input$delayREC,
# Not in the table
'delayICUCAND' = list(
'type' = 'normal',
'mean' = 0, # input$delayICUCAND_mean,
'stddev' = 0, # input$delayICUCAND_sd,
'smooth_sd' = 0,
'smooth_sd_sd' = 0
),
'delayICUD' = list(
'type' = 'normal',
'mean' = input$delayICUD_mean,
'stddev' = input$delayICUD_sd,
'smooth_sd' = input$delayICUD_xi,
'smooth_sd_sd' = 0
),
'delayICUREC' = list(
'type' = 'normal',
'mean' = input$delayICUREC_mean,
'stddev' = input$delayICUREC_sd,
'smooth_sd' = input$delayICUREC_xi,
'smooth_sd_sd' = 0
),
# h
'hosfrac' = list(
'type' = 'normal',
'mean' = input$hosfrac_mean,
'stddev' = input$hosfrac_sd
),
# CFR_hos
'dfrac' = list(
'type' = 'normal',
'mean' = input$dfrac_mean,
'stddev' = input$dfrac_sd
),
# f_icu (CFR of IC Patients)
'icudfrac' = list(
'type' = 'normal',
'mean' = input$icudfrac_mean,
'stddev' = input$icudfrac_sd
),
'ICufrac' = 0.30, # does not matter (because estimated from the data, see icufracfile)
'calibration_mode' = c('hospitalizedcum', 'ICU', 'dead'),
'observation_error' = c(100.0, 20.0, 30000.0),
'hist_time_steps' = c(30, 35, 40, 60),
'p_values' = c(0.05, 0.3, 0.5, 0.7, 0.95),
'alpha_normal' = TRUE, #single_run,
'alpha' = ALPHAS,
'dayalpha' = DAYALPHAS,
'icufracscale' = list(
'type' = 'normal',
'mean' = 1,
'stddev' = 0.10
),
'icufracfile' = 'output/netherlands_dashboard_icufrac.txt',
# 'icufracfile' = '../bin/output/netherlands_dashboard_icufrac.txt',
'icdatafile' = 'res/icdata_main.txt',
'single_run' = single_run,
'output_base_filename' = 'netherlands_dashboard',
'ACCC_timeinterval' = 14000,
# 'ACCC_timestart' = hammer_date, # Set only in the single-run, so hammer does not run in ensemble
'ACCC_step' = 0.04,
'ACCC_maxstep' = 0,
'ACCC_step_sd' = 0.01,
'ACCC_low' = 200,
'ACCC_slope' = 20,
'ACCC_cliplow' = 0.01,
'ACCC_cliphigh' = 0.99,
'ACCC_scale' = 1,
# 'hammer_ICU' = input$hammer_ICU,
'hammer_slope' = 10e6, # Effectively not used
'hammer_release' = 500,
# 'hammer_alpha' = norm2uni(input$hammer_alpha),
'YMAX' = 150e3,
'XMAX' = 240,
'plot' = list(
'legendloc' = 'best',
'legendloczoom' = 'lower left',
'legendfont' = 'x-small',
'y_axis_log' = FALSE,
'hindcast_plume' = TRUE,
'xmaxalpha' = 240,
'casename' = 'Netherlands',
'daily' = FALSE,
'figure_size' = c(10.0, 4.0)
)
)
toJSON(json, pretty = TRUE, auto_unbox = TRUE)
}
|
c36509738b0e5c726c9ef668e760e99e98f56791
|
3ea5d5735196bfd21bec8ee61b96ff9ab124a97b
|
/man/chord.Rd
|
c898ae3fc68a308d9a3264dd3ba4e5e0b2bd2ffd
|
[
"MIT"
] |
permissive
|
yeukyul/jsplot
|
f5664c1e57b282910f0d65bcb6639021817b95c0
|
4cda852599c17fbb9f94de8461fe706d13470dd2
|
refs/heads/master
| 2021-01-19T22:15:06.478683
| 2017-04-23T02:27:57
| 2017-04-23T02:27:57
| 88,781,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 236
|
rd
|
chord.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chord.R
\name{chord}
\alias{chord}
\title{<Add Title>}
\usage{
chord(obj, width = NULL, height = NULL, elementId = NULL)
}
\description{
<Add Description>
}
|
e8384451842f8252c3814466478bba037014cbbf
|
03a8b5eb4204f4f07f443ce7742c0190a43d8f91
|
/_archive/WFA8000x/_Example R codes/Multi-state darter jags/Multispecies jag estimator.R
|
719d3b8c76fd6059395e9cc4d1c04ee46a4e7625
|
[] |
no_license
|
mcolvin/WFA8000-Research-Credits
|
b6b18c63905f0fa2284cca4b10eb5f3b0ef3b8ce
|
96dae9706cb690500e6705e587ccc03d9f64148d
|
refs/heads/master
| 2022-02-21T07:52:45.223428
| 2022-01-25T17:04:55
| 2022-01-25T17:04:55
| 93,789,059
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,180
|
r
|
Multispecies jag estimator.R
|
## >>>>>> NOTE REQUIRES R PACKAGE R2JAGS
## AND
## >>>>>> JAGS SOFTWARE INSTALLED (available: http://mcmc-jags.sourceforge.net/)
##
require(R2jags)
### THE JAGS MODEL
jag.model<- function ()
{
### the site loop begins
for(ii in 1:nobs) {
# state 1 only occupied by dominant species (D)
Psi[ii,1] <- psiD[ii]*(1-psiSD[ii])
#state 2 only occupied by subordinate species (S) no YOY
Psi[ii,2] <- (1-psiD[ii])*psiSd[ii]*(1- psiYOY[ii])
#state 3 only occupied by subordinate species (S) YOY
Psi[ii,3] <- (1-psiD[ii])*psiSd[ii]*psiYOY[ii]
#state 4 occupied by both species no YOY
Psi[ii,4] <- psiD[ii]*psiSD[ii]*(1- psiDYOY[ii])
#state 5 occupied by both species with YOY
Psi[ii,5] <- psiD[ii]*psiSD[ii]*psiDYOY[ii]
#state 6 unoccupied
Psi[ii,6] <- (1-psiD[ii])*(1-psiSd[ii])
### Model occupancy as function of covariates cobble
logit(psiD.z[ii])<- eta.D[patch.no[ii]]
psiD[ii]<-max(0.0001,min(0.9999, psiD.z[ii]))
logit(psiSd.z[ii])<- eta.Sd[patch.no[ii]]
psiSd[ii]<-max(0.0001,min(0.9999, psiSd.z[ii]))
logit(psiSD.z[ii])<- eta.SD[patch.no[ii]]
psiSD[ii]<-max(0.0001,min(0.9999, psiSD.z[ii]))
logit(psiYOY.z[ii])<- eta.YOY[patch.no[ii]]
psiYOY[ii]<-max(0.0001,min(0.9999, psiYOY.z[ii]))
logit(psiDYOY.z[ii])<- eta.YOY[patch.no[ii]] + eta.D.YOY.eff
psiDYOY[ii]<-max(0.0001,min(0.9999, psiDYOY.z[ii]))
#categorical 1 draw from a multinomial
Occ[ii] ~ dcat(Psi[ii,])
for (jj in 1:k) {
## detection probabilities indexed by site (ii), visit (jj), true state, observed state
p[ii,jj,1,1] <- pD[ii,jj]
p[ii,jj,1,2] <- 0
p[ii,jj,1,3] <- 0
p[ii,jj,1,4] <- 0
p[ii,jj,1,5] <- 0
p[ii,jj,1,6] <- 1- pD[ii,jj]
p[ii,jj,2,1] <- 0
p[ii,jj,2,2] <- pS[ii,jj]
p[ii,jj,2,3] <- 0
p[ii,jj,2,4] <- 0
p[ii,jj,2,5] <- 0
p[ii,jj,2,6] <- 1 - pS[ii,jj]
p[ii,jj,3,1] <- 0
p[ii,jj,3,2] <- pS[ii,jj]*(1-pYOY[ii,jj])
p[ii,jj,3,3] <- pS[ii,jj]*pYOY[ii,jj]
p[ii,jj,3,4] <- 0
p[ii,jj,3,5] <- 0
p[ii,jj,3,6] <- 1-(pS[ii,jj])
p[ii,jj,4,1] <- pD[ii,jj]*(1-pS[ii,jj])
p[ii,jj,4,2] <- (1-pD[ii,jj])*pS[ii,jj]
p[ii,jj,4,3] <- 0
p[ii,jj,4,4] <- pD[ii,jj]*pS[ii,jj]
p[ii,jj,4,5] <- 0
p[ii,jj,4,6] <- (1-pD[ii,jj])*(1-pS[ii,jj])
p[ii,jj,5,1] <- pD[ii,jj]*(1-pS[ii,jj])
p[ii,jj,5,2] <- (1-pD[ii,jj])*pS[ii,jj]
p[ii,jj,5,3] <- (1-pD[ii,jj])*pS[ii,jj]*pDYOY[ii,jj]
p[ii,jj,5,4] <- pD[ii,jj]*pS[ii,jj]*(1-pDYOY[ii,jj])
p[ii,jj,5,5] <- pD[ii,jj]*pS[ii,jj]*pDYOY[ii,jj]
p[ii,jj,5,6] <- (1-pD[ii,jj])*(1-pS[ii,jj])
p[ii,jj,6,1] <- 0
p[ii,jj,6,2] <- 0
p[ii,jj,6,3] <- 0
p[ii,jj,6,4] <- 0
p[ii,jj,6,5] <- 0
p[ii,jj,6,6] <- 1
# reading in the response variable: 1 for dominant present, 2 for subordinate present
# 3 for both present, and 4 for both absent
# the first k columns of the dataset are capture histories
detect[ii,jj] ~ dcat(p[ii,jj,Occ[ii], ])
### Model detection as function of covariates
logit(pD.z[ii,jj]) <- beta.pD
logit(pS.z[ii,jj]) <- beta.pS
logit(pYOY.z[ii,jj]) <- beta.pYOY
logit(pDYOY.z[ii,jj]) <-beta.pYOY
### THIS JUST KEEPS THINGS IN BOUNDS IGNORE
pD[ii,jj] <-max(0.0001,min(0.9999, pD.z[ii,jj]))
pS[ii,jj] <-max(0.0001,min(0.9999, pS.z[ii,jj]))
pYOY[ii,jj] <-max(0.0001,min(0.9999, pYOY.z[ii,jj]))
pDYOY[ii,jj] <-max(0.0001,min(0.9999, pDYOY.z[ii,jj]))
}
}
### PRIOR DISTRIBUTION FOR DETECTION PARMS
## detection more two parms except for rSD
beta.pD ~ dnorm(0,0.37)
beta.pS ~ dnorm(0,0.37)
beta.pYOY ~ dnorm(0,0.37)
beta.D.YOY.eff ~ dnorm(0,0.37)
for(xx in 1:no.ptch) {
eta.D[xx] ~ dnorm(D.bar,D.tau)
eta.SD[xx] ~ dnorm(SD.bar,SD.tau)
eta.Sd[xx] ~ dnorm(Sd.bar,Sd.tau)
eta.YOY[xx] ~ dnorm(YOY.bar,YOY.tau)
}
D.bar ~ dnorm(0,0.37)
D.tau <- pow(D.ss,-2)
D.ss ~ dunif(0,6)
SD.bar ~ dnorm(0,0.37)
SD.tau <- pow(SD.ss,-2)
SD.ss ~ dunif(0,6)
Sd.bar ~ dnorm(0,0.37)
Sd.tau <- pow(Sd.ss,-2)
Sd.ss ~ dunif(0,6)
YOY.bar ~ dnorm(0,0.37)
YOY.tau <- pow(YOY.ss,-2)
YOY.ss ~ dunif(0,6)
eta.D.YOY.eff ~ dnorm(0,0.37)
logit(pred.psi.D)<- D.bar
logit(pred.psi.SD)<- SD.bar
logit(pred.psi.Sd)<- Sd.bar
logit(pred.psi.YOYd) <- YOY.bar
logit(pred.psi.YOYD) <- YOY.bar + eta.D.YOY.eff
## estimate species interaction factor
SIF <- pred.psi.D*pred.psi.SD/(pred.psi.D*(pred.psi.D*pred.psi.SD + (1-pred.psi.D)*pred.psi.Sd))
}
setwd("C:/Users/peterjam/Desktop")
dater<-read.csv("multistate.dater.csv")
dater$drop<- rowSums(is.na(dater)) < 5
dater<-subset(dater,drop == T)
no.ptch<-nrow(as.data.frame(table(dater$patch.no)))
nobs = nrow(dater); k = 5;
detect<-c(dater[ ,1],dater[ ,2],dater[ ,3],dater[ ,4],dater[ ,5])
detect<-array(detect,dim=c(nobs,k))
patch.no<-dater[,8]
#these are the parameters we want to monitor
params<- c("pred.psi.D", "pred.psi.SD", "pred.psi.Sd", "pred.psi.YOYd","pred.psi.YOYD","eta.D.YOY.eff","SIF")
## the data for jags
jdata<- list(nobs=nobs, k=k, no.ptch = no.ptch, patch.no =patch.no, detect =detect)
# the initial values
inits<-function(){list(beta.D.YOY.eff = 0, beta.pD = 0, beta.pS = 0, beta.pYOY = 0,
eta.D.YOY.eff = 0, D.bar = 0, D.ss = 1, SD.bar = 0, SD.ss = 1, Sd.bar = 0, Sd.ss = 1,
YOY.bar = 0, YOY.ss = 1, Occ = rep(5,nobs))}
#Invoking jags
ZZ<-jags(data =jdata, inits=inits, parameters.to.save=params, model.file=jag.model,
n.thin=1, n.chains=2, n.burnin=200, n.iter=10000)
### this provides summary data
ZZ$BUGSoutput$summary
ZZ$BUGSoutput$DIC
ZZ$BUGSoutput$pD
|
324602683732e103fc277cdeb3518403e02697c2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/celestial/examples/sph2car.Rd.R
|
a0f7ab08bf1ac8a61da14c800e77b80df55975ed
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
sph2car.Rd.R
|
library(celestial)
### Name: sph2car
### Title: Transforms 3D spherical coordinates to cartesian coordinates
### Aliases: sph2car
### Keywords: transform
### ** Examples
print(sph2car(45,0,sqrt(2),deg=TRUE))
|
9355991b09e88cf0686c392af18d3d2a75b56358
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/causaleffect/examples/zzaux.effect.Rd.R
|
4a7cf47609ae1912e705ee23b8e390e8b786526b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 668
|
r
|
zzaux.effect.Rd.R
|
library(causaleffect)
### Name: aux.effect
### Title: Identify a causal effect using surrogate experiments
### Aliases: aux.effect
### ** Examples
library(igraph)
# simplify = FALSE to allow multiple edges
f <- graph.formula(W -+ Z, Z -+ X, X -+ Y, W -+ Y, # Observed edges
W -+ Y, Y -+ W, Z -+ Y, Y -+ Z, Z -+ X, X -+ Z, simplify = FALSE)
# Here the bidirected edges are set to be unobserved in graph g
# This is denoted by giving them a description attribute with the value "U"
# The first 4 edges correspond to the observed edges, the rest are unobserved
f <- set.edge.attribute(f, "description", 5:10, "U")
aux.effect(y = "Y", x = "X", z = "Z", G = f)
|
9028169983979922b91cf1222194b9839e47d1a3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/berryFunctions/examples/dupes.Rd.R
|
421fb20a048dc26544dc9e34b1ceb58d649d77cc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
dupes.Rd.R
|
library(berryFunctions)
### Name: dupes
### Title: Duplicate lines in file
### Aliases: dupes
### Keywords: IO character file
### ** Examples
file <- system.file("extdata/doublelines.txt", package="berryFunctions")
dupes(file, tofile=FALSE)
dupes(file, tofile=FALSE, ignore.empty=TRUE)
## These are skipped by rcmd check (opening external places is not allowed):
## Not run: dupes(file)
# a template file (dupes.ods) for libreOffice Calc is available here:
system.file("extdata", package="berryFunctions")
## Not run: system2("nautilus", system.file("extdata/dupes.ods", package="berryFunctions"))
# To open folders with system2:
# "nautilus" on linux ubuntu
# "open" or "dolphin" on mac
# "explorer" or "start" on windows
|
6c08ae47e493ef24a64524787c0d65e5025da2d7
|
8b30b242ec7b79f6db2357b9314c72b0104154f9
|
/man/var_lin.Rd
|
a3c50f25f76ae4e3fe8aa554de83e388478f50ad
|
[] |
no_license
|
ericofrs/CompositeRegressionEstimation
|
e8314cefa4c9207edf0484f5f3efdf48ddfe263e
|
2c1f98840180f21d387b5f4d387b1d45edbebda2
|
refs/heads/master
| 2022-10-11T23:25:17.801929
| 2020-06-11T16:51:17
| 2020-06-11T16:51:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 948
|
rd
|
var_lin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AK3.R
\name{var_lin}
\alias{var_lin}
\title{Gives the variance of an array Y that is a linear transformation AX of an array X from the coefficients of A and Sigma=Var[X]}
\usage{
var_lin(A, Sigma)
}
\arguments{
\item{Sigma}{An array of dimension b_1 x ... x b_p x b_1 x ... x b_p}
\item{coeff}{An array of dimension a_1 x ... x a_n x b_1 x ... x b_p}
}
\value{
The variance of the AK estimators from the A,K coefficients and the variance covariance matrix .
}
\description{
Gives the variance of an array Y that is a linear transformation AX of an array X from the coefficients of A and Sigma=Var[X]
}
\examples{
a=c(2,4);b=c(3,10,8);A<-array(rnorm(prod(a)*prod(b)),c(a,b));
dimnames(A)[1:2]<-lapply(a,function(x){letters[1:x]});names(dimnames(A))[1:2]<-c("d1","d2");
Sigma=array(drop(stats::rWishart(1,df=prod(b),diag(prod(b)))),rep(b,2));
var_lin(A,Sigma)
}
|
f5534765c8f0bb729a580b7a4e1c00c1598852a4
|
4c1f8f2ec6de103116e598eaa9585151fb604e42
|
/R/pcaPlot.stm.R
|
c2bd6be8e6bcff3c63e177525c5c18f04378732e
|
[] |
no_license
|
ABindoff/stmQuality
|
aedb852378e1ac778ced04aa49490c315498313f
|
142e21263f82efa02000d3eebf11552458cc8890
|
refs/heads/master
| 2021-01-24T10:48:25.617879
| 2018-02-28T02:18:08
| 2018-02-28T02:18:08
| 123,064,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
pcaPlot.stm.R
|
#' Visualise models returned by `stm::manyTopics`
#' @param x A data-frame returned by `extractFit`
#' @export
#' @import ggplot2
#' @import ggfortify
#' @import ggalt
pcaPlot.stm <- function(x){
autoplot(prcomp(x[,3:5], center = T, scale. = T), data = x, colour = "K",
group = "K",
loadings = T,
loadings.label = T) +
geom_encircle(aes(colour = K))
}
|
c32abab906c0aef623d0af18c50193e35f4c22ae
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.migration/man/importexport_list_jobs.Rd
|
a00bf21e4bb388c71895eccb6de788986e6f0429
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,106
|
rd
|
importexport_list_jobs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importexport_operations.R
\name{importexport_list_jobs}
\alias{importexport_list_jobs}
\title{This operation returns the jobs associated with the requester}
\usage{
importexport_list_jobs(MaxJobs, Marker, APIVersion)
}
\arguments{
\item{MaxJobs}{}
\item{Marker}{}
\item{APIVersion}{}
}
\value{
A list with the following syntax:\preformatted{list(
Jobs = list(
list(
JobId = "string",
CreationDate = as.POSIXct(
"2015-01-01"
),
IsCanceled = TRUE|FALSE,
JobType = "Import"|"Export"
)
),
IsTruncated = TRUE|FALSE
)
}
}
\description{
This operation returns the jobs associated with the requester. AWS
Import/Export lists the jobs in reverse chronological order based on the
date of creation. For example if Job Test1 was created 2009Dec30 and
Test2 was created 2010Feb05, the ListJobs operation would return Test2
followed by Test1.
}
\section{Request syntax}{
\preformatted{svc$list_jobs(
MaxJobs = 123,
Marker = "string",
APIVersion = "string"
)
}
}
\keyword{internal}
|
fdd5bb395c8a8fe84e117d51d0af64d78d27cfe1
|
72d03ec10b4955bcc7daac5f820f63f3e5ed7e75
|
/input/gcam-data-system/socioeconomics-processing-code/level1/L101.Population.R
|
690d1757c9f9ea62d9cc8234fcfaae15ea156507
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bgmishra/gcam-core
|
54daddc3d037571bf745c4cf0d54c0d7a77f493f
|
bbfb78aeb0cde4d75f307fc3967526d70157c2f8
|
refs/heads/master
| 2022-04-17T11:18:25.911460
| 2020-03-17T18:03:21
| 2020-03-17T18:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,831
|
r
|
L101.Population.R
|
if( !exists( "SOCIOPROC_DIR" ) ){
if( Sys.getenv( "SOCIOPROC" ) != "" ){
SOCIOPROC_DIR <- Sys.getenv( "SOCIOPROC" )
} else {
stop("Could not determine location of socioeconomics processing scripts, please set the R var SOCIOPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(SOCIOPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(SOCIOPROC_DIR,"/../_common/headers/SOCIO_header.R",sep=""))
logstart( "L101.Population.R" )
adddep(paste(SOCIOPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
adddep(paste(SOCIOPROC_DIR,"/../_common/headers/SOCIO_header.R",sep=""))
printlog( "Historical and future population by GCAM region" )
# -----------------------------------------------------------------------------
# 1. Read data
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "unit_conversions", extension = ".R" )
sourcedata( "SOCIO_ASSUMPTIONS", "A_socioeconomics_data", extension = ".R" )
iso_GCAM_regID <- readdata( "COMMON_MAPPINGS", "iso_GCAM_regID" )
GCAM3_population <- readdata( "SOCIO_LEVEL0_DATA", "GCAM3_population" )
L100.Pop_thous_ctry_Yh <- readdata( "SOCIO_LEVEL1_DATA", "L100.Pop_thous_ctry_Yh" )
L100.Pop_thous_SSP_ctry_Yfut <- readdata( "SOCIO_LEVEL1_DATA", "L100.Pop_thous_SSP_ctry_Yfut" )
# -----------------------------------------------------------------------------
# 2. Perform computations
#Historical population by region
L100.Pop_thous_ctry_Yh[[R]] <- iso_GCAM_regID[[R]][ match( L100.Pop_thous_ctry_Yh$iso, iso_GCAM_regID$iso ) ]
L101.Pop_thous_R_Yh <- aggregate( L100.Pop_thous_ctry_Yh[ c( X_Maddison_historical_years, X_UN_historical_years ) ],
by=as.list( L100.Pop_thous_ctry_Yh[ R ] ), sum )
#Future population in the SSP scenarios
L100.Pop_thous_SSP_ctry_Yfut[[R]] <- iso_GCAM_regID[[R]][ match( L100.Pop_thous_SSP_ctry_Yfut$iso, iso_GCAM_regID$iso ) ]
L101.Pop_thous_SSP_R_Yfut <- aggregate( L100.Pop_thous_SSP_ctry_Yfut[ c( X_future_years ) ],
by=as.list( L100.Pop_thous_SSP_ctry_Yfut[ Scen_R ] ), sum )
#Future population in the GCAM-SSP (paP) scenarios
#for now use SSP population for both SSP and gSSP; revisit this after consulting GCAM-China team
L101.Pop_thous_gSSP_R_Yfut <- L101.Pop_thous_SSP_R_Yfut
L101.Pop_thous_gSSP_R_Yfut[[Scen]] <- paste0( "g", substr(L101.Pop_thous_gSSP_R_Yfut[[Scen]],1, 4))
L101.Pop_thous_Scen_R_Yfut <- merge( L101.Pop_thous_SSP_R_Yfut, L101.Pop_thous_gSSP_R_Yfut, all = T)
printlog( "Downscaling GCAM 3.0 population to country on the basis of UN historical data and base SSP in future years")
#Population by GCAM 3.0 region - downscale to country according to actual shares in the historical periods, and SSPbase in the future periods
L101.Pop_thous_ctry_Y <- L100.Pop_thous_ctry_Yh
L101.Pop_thous_ctry_Y[ X_future_years ] <- L100.Pop_thous_SSP_ctry_Yfut[
match( paste( L101.Pop_thous_ctry_Y$iso, base_pop_scen ),
paste( L100.Pop_thous_SSP_ctry_Yfut$iso, L100.Pop_thous_SSP_ctry_Yfut[[Scen]] ) ),
X_future_years ]
L101.Pop_thous_ctry_Y$region_GCAM3 <- iso_GCAM_regID$region_GCAM3[ match( L101.Pop_thous_ctry_Y$iso, iso_GCAM_regID$iso ) ]
L101.Pop_thous_SSPbase_RG3_Y <- aggregate( L101.Pop_thous_ctry_Y[ c( X_historical_years, X_future_years ) ],
by=as.list( L101.Pop_thous_ctry_Y[ "region_GCAM3"] ), sum )
#Calculate shares of each country within its region over the historical time series
L101.Popshares_ctryRG3_Y <- L101.Pop_thous_ctry_Y[ c( "iso", "region_GCAM3", X_historical_years, X_future_years ) ]
L101.Popshares_ctryRG3_Y[ c( X_historical_years, X_future_years ) ] <-
L101.Pop_thous_ctry_Y[ c( X_historical_years, X_future_years ) ] / L101.Pop_thous_SSPbase_RG3_Y[
match( L101.Pop_thous_ctry_Y$region_GCAM3, L101.Pop_thous_SSPbase_RG3_Y$region_GCAM3 ),
c( X_historical_years, X_future_years ) ]
#Interpolate the GCAM population data to all historical and future years
L101.Pop_thous_GCAM3_RG3_Y <- gcam_interp( GCAM3_population, c( historical_years, future_years ) )[ c( "region_GCAM3", X_historical_years, X_future_years ) ]
if( "X2100" %in% X_future_years && "X2100" %!in% names( L101.Pop_thous_GCAM3_RG3_Y ) ){
printlog( "Extending GCAM 3.0 scenario to 2100 using SSPbase population ratios by GCAM 3.0 region")
L101.Pop_thous_GCAM3_RG3_Y$X2100 <- L101.Pop_thous_GCAM3_RG3_Y$X2095 *
L101.Pop_thous_SSPbase_RG3_Y$X2100[ match( L101.Pop_thous_GCAM3_RG3_Y$region_GCAM3, L101.Pop_thous_SSPbase_RG3_Y$region_GCAM3 ) ] /
L101.Pop_thous_SSPbase_RG3_Y$X2095[ match( L101.Pop_thous_GCAM3_RG3_Y$region_GCAM3, L101.Pop_thous_SSPbase_RG3_Y$region_GCAM3 ) ]
}
#Multiply these population numbers by the shares of each country within GCAM region
L101.Pop_thous_GCAM3_ctry_Y <- L101.Popshares_ctryRG3_Y
L101.Pop_thous_GCAM3_ctry_Y[ c( X_historical_years, X_future_years ) ] <-
L101.Popshares_ctryRG3_Y[ c( X_historical_years, X_future_years ) ] * L101.Pop_thous_GCAM3_RG3_Y[
match( L101.Popshares_ctryRG3_Y$region_GCAM3, L101.Pop_thous_GCAM3_RG3_Y$region_GCAM3 ),
c( X_historical_years, X_future_years ) ]
printlog( "Aggregating by GCAM regions")
L101.Pop_thous_GCAM3_ctry_Y[[R]] <- iso_GCAM_regID[[R]][ match( L101.Pop_thous_GCAM3_ctry_Y$iso, iso_GCAM_regID$iso ) ]
L101.Pop_thous_GCAM3_R_Y <- aggregate( L101.Pop_thous_GCAM3_ctry_Y[ c( X_historical_years, X_future_years ) ],
by=as.list( L101.Pop_thous_GCAM3_ctry_Y[ R ] ), sum )
L101.Pop_thous_GCAM3_ctry_Y <- L101.Pop_thous_GCAM3_ctry_Y[ c( "iso", X_historical_years, X_future_years ) ]
# -----------------------------------------------------------------------------
# 3. Output
#Add comments to tables
comments.L101.Pop_thous_R_Yh <- c( "Population by region over the historical time period","Unit = million persons" )
comments.L101.Pop_thous_Scen_R_Yfut <- c( "Population by region and gSSP SSP in future periods","Unit = million persons" )
comments.L101.Pop_thous_GCAM3_R_Y <- c( "GCAM 3.0 population by region in historical and future years","Unit = thousand persons" )
comments.L101.Pop_thous_GCAM3_ctry_Y <- c( "GCAM 3.0 population by country in historical and future years","Unit = thousand persons" )
writedata( L101.Pop_thous_R_Yh, domain="SOCIO_LEVEL1_DATA", fn="L101.Pop_thous_R_Yh", comments=comments.L101.Pop_thous_R_Yh )
writedata( L101.Pop_thous_Scen_R_Yfut, domain="SOCIO_LEVEL1_DATA", fn="L101.Pop_thous_Scen_R_Yfut", comments=comments.L101.Pop_thous_Scen_R_Yfut )
writedata( L101.Pop_thous_GCAM3_R_Y, domain="SOCIO_LEVEL1_DATA", fn="L101.Pop_thous_GCAM3_R_Y", comments=comments.L101.Pop_thous_GCAM3_R_Y )
writedata( L101.Pop_thous_GCAM3_ctry_Y, domain="SOCIO_LEVEL1_DATA", fn="L101.Pop_thous_GCAM3_ctry_Y", comments=comments.L101.Pop_thous_GCAM3_ctry_Y )
# Every script should finish with this line
logstop()
|
dbe9780e36d91b0f05762ae2f86c657adaa6f728
|
cd4ac3306dc49b99b0c3b69affbe115068ff5447
|
/server.R
|
3ee45b4c88dff7514d05a58801ab3c03ffec1898
|
[] |
no_license
|
sammerk/abiturma_he16
|
8c6b33337fb5f484c0fc45c0258fab3ad54b0cbe
|
97071217b3b56e09b15160f5e5f7c5b268e500f6
|
refs/heads/master
| 2021-01-20T09:42:49.318614
| 2017-05-09T09:33:06
| 2017-05-09T09:33:06
| 68,576,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56,756
|
r
|
server.R
|
library(ggplot2)
library(dplyr)
library(tidyr)
library(shiny)
library(shinyBS)
library(feather)
library(scrypt)
library(forcats)
# Ip-Test ##################################################################################
Sys.setenv(TZ='GMT+4')
ipID <- renderPrint(print(input$ipid))
# Data Import ##############################################################################
#kursdata <- read.table("data/kursdata_fr16_b_demo.csv", sep = ";", header = TRUE, na.strings = c("NA")) # Import of Data
kursdata <- data.table::fread("data/data_dynamic/kursdata_inkrementiert.csv", sep = ";", header = TRUE, na.strings = c("NA"))
#freitextdata <- read.table("data/freitextdata_fr16_demo.csv", sep = ";", header = T, na.strings = c("NA"))
freitextdata <- data.table::fread("data/data_dynamic/freitextdata_inkrementiert.csv", sep = ";", header = TRUE, na.strings = c("NA"), stringsAsFactors = T)
#likertdata1 <- read.table("data/likertdata_fr16_2_demo.csv", sep = ";", header = T, na.strings = c("NA"))
likertdata1 <- data.table::fread("data/data_dynamic/likertdata_inkrementiert.csv", sep = ";", header = T, na.strings = c("NA"))
#pw_data2 <- read_feather("Stuff/kl_pw.feather")
pw_data <- tbl_df(data.table::fread("data/data_kl/data_pw_scrypted.csv", sep = ";", na.strings = "NA"))
##########################################################################################
# Custom Functions ###
##########################################################################################
# Function for mean (jitter plot panel) ##################################################
stat_sum_single <- function(fun, geom="point", ...) {
stat_summary(fun.y=fun, colour="#DC5C40", geom=geom, size = 3.0, ...)
}
# Functions for usage tracking of jitter Navbarpage #######################################
fields <- c("qualdim", "darstell", "scaling", "groupin", "ipid", "q1_inf_stars", "q1_sic_stars") # names of fields to track
outputDir <- "responses_jitter"
saveData <- function(data) {
data <- t(data)
# Create a unique file name
fileName <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data))
# Write the file to the local system
write.csv(
x = data,
file = file.path(outputDir, fileName),
row.names = FALSE, quote = TRUE
)
}
loadData <- function() {
# Read all the files into a list
files <- list.files(outputDir, full.names = TRUE)
data <- lapply(files, read.csv, stringsAsFactors = FALSE)
# Concatenate all data together into one data.frame
data <- do.call(rbind, data)
data
}
# Functions for usage tracking of likert Navbarpage #####################################
fields_l <- c("likertfragen", "ipid", "groupinl", "likert_inf_stars", "likert_sic_stars") # names of fields to track
outputDir_l <- "responses_likert"
saveData_l <- function(data_l) {
data_l <- t(data_l)
# Create a unique file name
fileName_l <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_l))
# Write the file to the local system
write.csv(
x = data_l,
file = file.path(outputDir_l, fileName_l),
row.names = FALSE, quote = TRUE
)
}
loadData_l <- function() {
# Read all the files into a list
files_l <- list.files(outputDir_l, full.names = TRUE)
data_l <- lapply(files_l, read.csv, stringsAsFactors = FALSE)
# Concatenate all data together into one data.frame
data_l <- do.call(rbind, data_l)
data_l
}
# Functions for usage tracking of freitext Navbarpage ####################################
fields_f <- c("sort_freitexte", "ipid", "frei_inf_stars") # names of fields to track
outputDir_f <- "responses_freitext"
saveData_f <- function(data_f) {
data_f <- t(data_f)
# Create a unique file name
fileName_f <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_f))
# Write the file to the local system
write.csv(
x = data_f,
file = file.path(outputDir_f, fileName_f),
row.names = FALSE, quote = TRUE
)
}
loadData_f <- function() {
# Read all the files into a list
files_f <- list.files(outputDir_f, full.names = TRUE)
data_f <- lapply(files_f, read.csv, stringsAsFactors = FALSE)
# Concatenate all data together into one data.frame
data_f <- do.call(rbind, data_f)
data_f
}
# Functions for usage tracking of qualidim2 Navbarpage ################################################################
fields_q2 <- c("qualdim2", "ipid", "q2_inf_stars", "q2_sic_stars") # names of fields to track
outputDir_q2 <- "responses_qualdim2"
saveData_q2 <- function(data_q2) {
data_q2 <- t(data_q2)
# Create a unique file name
fileName_q2 <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_q2))
# Write the file to the local system
write.csv(
x = data_q2,
file = file.path(outputDir_q2, fileName_q2),
row.names = FALSE, quote = TRUE
)
}
loadData_q2 <- function() {
# Read all the files into a list
files_q2 <- list.files(outputDir_f, full.names = TRUE)
data_q2 <- lapply(files_q2, read.csv, stringsAsFactors = FALSE)
# Concatenate all data together into one data.frame
data_q2 <- do.call(rbind, data_q2)
data_q2
}
# Function for Feedback recording of jitter navbarpage #####################################
fields_fb_q1 <- c("q1_inf_stars", "q1_sic_stars") # names of fields to track
outputDir_fb_q1 <- "responses_fb_q1"
saveData_fb_q1 <- function(data_fb_q1) {
data_fb_q1 <- t(data_fb_q1)
# Create a unique file name
fileName_fb_q1 <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_fb_q1))
# Write the file to the local system
write.csv(
x = data_fb_q1,
file = file.path(outputDir_fb_q1, fileName_fb_q1),
row.names = FALSE, quote = TRUE
)
}
# Function for Feedback recording of likert navbarpage #####################################
fields_fb_likert <- c("likert_inf_stars", "likert_sic_stars") # names of fields to track
outputDir_fb_likert <- "responses_fb_likert"
saveData_fb_likert <- function(data_fb_likert) {
data_fb_likert <- t(data_fb_likert)
# Create a unique file name
fileName_fb_likert <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_fb_likert))
# Write the file to the local system
write.csv(
x = data_fb_likert,
file = file.path(outputDir_fb_likert, fileName_fb_likert),
row.names = FALSE, quote = TRUE
)
}
# Function for Feedback recording of jitter v2 navbarpage #####################################
fields_fb_q2 <- c("q2_inf_stars", "q2_sic_stars") # names of fields to track
outputDir_fb_q2 <- "responses_fb_q2"
saveData_fb_q2 <- function(data_fb_q2) {
data_fb_q2 <- t(data_fb_q2)
# Create a unique file name
fileName_fb_q2 <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_fb_q2))
# Write the file to the local system
write.csv(
x = data_fb_q2,
file = file.path(outputDir_fb_q2, fileName_fb_q2),
row.names = FALSE, quote = TRUE
)
}
# Function for Feedback recording of logout navbarpage #####################################
fields_fb_logout <- c("glob_fb_likert_inf", "glob_fb_q1_inf", "glob_fb_q2_inf", "glob_fb_frei_inf", "glob_fb_erk", "glob_fb_abiturma") # names of fields to track
outputDir_fb_logout <- "responses_fb_logout"
saveData_fb_logout <- function(data_fb_logout) {
data_fb_logout <- t(data_fb_logout)
# Create a unique file name
fileName_fb_logout <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_fb_logout))
# Write the file to the local system
write.csv(
x = data_fb_logout,
file = file.path(outputDir_fb_logout, fileName_fb_logout),
row.names = FALSE, quote = TRUE
)
}
# Function for Feedback recording of freitextpage #####################################
fields_fb_frei <- c("frei_inf_stars") # names of fields to track
outputDir_fb_frei <- "responses_fb_frei"
saveData_fb_frei <- function(data_fb_frei) {
data_fb_frei <- t(data_fb_frei)
# Create a unique file name
fileName_fb_frei <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_fb_frei))
# Write the file to the local system
write.csv(
x = data_fb_frei,
file = file.path(outputDir_fb_frei, fileName_fb_frei),
row.names = FALSE, quote = TRUE
)
}
# Function for recording of helpbutton-clicks #####################################
fields_help <- c("darstellmodalbt", "jitterqualdimmodalbt", "scalemodalbt", "help_table_bt", "help_scale_bt",
"help_gmean", "help_q2_grandmean", "help_darst_st", "help_q2_groupmean", "help_restq2")
outputDir_help <- "responses_help"
saveData_help <- function(data_help) {
data_help <- t(data_help)
# Create a unique file name
fileName_help <- sprintf("%s_%s.csv", as.integer(Sys.time()), digest::digest(data_help))
# Write the file to the local system
write.csv(
x = data_help,
file = file.path(outputDir_help, fileName_help),
row.names = FALSE, quote = TRUE
)
}
#######################################################################################################################
#### #######################################################################################
#### #######################################################################################
#### Shiny internal #######################################################################################
#### #######################################################################################
#### #######################################################################################
#### #######################################################################################
#######################################################################################################################
shinyServer(function(input, output, session) {
#####################################################################################################################
# Hide loading message ############
#####################################################################################################################
# Hide the loading message when the rest of the server function has executed
# hide(id = "loading-content", anim = TRUE, animType = "fade")
# show("app-content")
#####################################################################################################################
# Test Fingerprinting ############
#####################################################################################################################
#output$testtext <- renderText(paste(" fingerprint: ", input$fingerprint, " ip: ", input$ipid))
#####################################################################################################################
# User Import ############
#####################################################################################################################
user <- reactive({
return(input$username)
})
#####################################################################################################################
# Log in Backend ############
#####################################################################################################################
## Login Feedback for user with busyindicator
# reactive value of valid login
login_true <- reactive({verifyPassword(as.character(pw_data[pw_data$Login == user(),"Passwort_scrypted"]),
as.character(input$passw)) == TRUE
})
observeEvent(input$loginBtn, {
# When the button is clicked, wrap the code in a call to `withBusyIndicatorServer()`
withBusyIndicatorServer("loginBtn", {
if (login_true() == FALSE) {
stop("Login Passwort Kombi ist falsch!")
}
})
})
## Hide further tabs before sucessful login
observe({
hide(selector = c("#navbarpage li a[data-value=einzelfragen]",
"#navbarpage li a[data-value=qualdim_v1]",
"#navbarpage li a[data-value=qualdim_v2]",
"#navbarpage li a[data-value=freitext_antw]"))
})
observeEvent(input$loginBtn, {
if(login_true() == TRUE)
show(selector = c("#navbarpage li a[data-value=einzelfragen]",
"#navbarpage li a[data-value=qualdim_v1]",
"#navbarpage li a[data-value=qualdim_v2]",
"#navbarpage li a[data-value=freitext_antw]"))
})
# Show hide fuctions of LoginAlerts and plots
observeEvent(login_true(),{
if(login_true() == F){
hide("likert-plot-container")
hide("qualdimplot1-container")
hide("plot-container")
hide("freitext-container")
show("loginwarning")
show("loginwarning2")
show("loginwarning3")
show("loginwarning4")}
if(login_true() == T){
hide("loginwarning")
hide("loginwarning2")
hide("loginwarning3")
hide("loginwarning4")}
})
observeEvent(input$gofreitext,{
show("freitext-container")
show("frei_form", anim = T, animType = "fade", time = 1)
})
#####################################################################################################################
# Freitext Backend ############
#####################################################################################################################
## Freitextdaten subsetten ########################################################################################
freitextdata2 <-
eventReactive(input$gofreitext, {
if(input$sort_freitexte == "abst"){
freitextdata1 <-
freitextdata%>%
filter(kursleiterin == user(),
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/2_",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/winter",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/2_NA",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/winterNA",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/NA")%>% ### Aktualisieren
arrange(desc(score))
}
if(input$sort_freitexte == "nsort" | is.null(input$sort_freitexte) == TRUE){
freitextdata1 <-
freitextdata%>%
filter(kursleiterin == user(),
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/2_",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/winter",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/2_NA",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/winterNA",
ftk != "rawdata_fruehjahr17_3_charge1e/freitextbilder/NA") ### Aktualisieren
}
return(freitextdata1)
})
##
max_freitextplots <- 70
## Freitextplotschleife #############################################################################################
freitextplotgone <- eventReactive(input$gofreitext,{
plot_output_list <- lapply(1:length((freitextdata2()$ftk)), function(i) {
plotname <- paste("freitextplot", i, sep="")
plotOutput(plotname, height = "165px")
})
# Convert the list to a tagList - this is necessary for the list of items
# to display properly.
do.call(tagList, plot_output_list)
})
# Call renderPlot for each one. Plots are only actually generated when they
# are visible on the web page.
for (i in 1:max_freitextplots) {
# Need local so that each item gets its own number. Without it, the value
# of i in the renderPlot() will be the same across all instances, because
# of when the expression is evaluated.
local({
my_i <- i
plotname <- paste("freitextplot", my_i, sep="")
output[[plotname]] <- renderImage({
path1 <- as.character(freitextdata2()$ftk[my_i])
list(src = path1,
alt = paste("alt text"),
width = "100%"
)
})
})
}
## Rendering FreitextUI #############################################################################################
output$freitextplots <- renderUI({
if(login_true() == T)
freitextplotgone()
})
## Usage tracking freitextplot #####################################################################################
# Whenever a field is filled, aggregate all form data
formData_f <- reactive({
data_f <- sapply(fields_f, function(x) paste(input[[x]], collapse = "-"))
data_f$systtime <- paste(Sys.time())
data_f$user <- user()
data_f
})
# When the Submit button is clicked, save the form data
observeEvent(input$gofreitext, {
saveData_f(formData_f())
})
######################################################################################################################
# Qualitätsdimensionen Backend ############
#####################################################################################################################
# Create reactive dataset for Plot of Quality Dimensions with grouping-variables ####################################
kursdata1 <-
reactive({ kursdata2 <-
kursdata%>%
filter(variable %in% input$qualdim)%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"),
mggroup = paste("Dein Kurs in ", Kursort, "\n", "Kursbeginn: ", Kursbeginn, ", ", Uhrzeit, sep = ""),
mgxgmgroup = ifelse(kursleiterin == user(), paste("Dein Kurs in ", Kursort, ", Kursbeginn: ", "\n",
Kursbeginn, ", ", Uhrzeit, sep = "")
, "abiturma gesamt"),
variable = factor(variable))
return(kursdata2)
})
# Kursanzahl extrahieren für adaptive plotheight ####################################################################
n_lev_mg <- eventReactive(input$golikert, {nlevdata <- kursdata1()%>% #Count of teached courses by actual user
filter(kursleiterin == user())
return(nlevels(as.factor(nlevdata$kurs)))})
# Main Jitter-Plot ##################################################################################################
qualdimplotgone <- eventReactive(input$goqualdim,{
# Data fetching ###################################################################################################
kursdata3 <- kursdata1()
# Alerts ###################################################################################################
if(is.null(input$qualdim) == TRUE | is.null(input$darstell) == TRUE | "noscale" %in% input$scaling) {
createAlert(session, "jitteralert1", "JitterAlert1", title = "Bitte Auswahl treffen!",
content = "<ul>Bitte wähle
<li>mindestens eine <b>Qualitätsdimension</b></li>
<li>mindestens eine <b>Darstellungsoption</b> und</li>
<li>genau eine <b>Skalierungsoption</b> aus.</li>
Drücke dann nochmals den <i>Plot-Button.</i>
</ul>", append = FALSE)
}
if(is.null(input$qualdim) == FALSE & is.null(input$darstell) == FALSE & !"noscale" %in% input$scaling) {
closeAlert(session, "JitterAlert1")
}
# Setting up plot-data and plot-variables in dependence of input$groupin and input$scaling ########################
# | | no group | gmean | kurse | gmean & kurse |
# |-------|------------|:---------:|-----------|---------------|
# | raw | #jitter_10 | #jitter_1 | #jitter_2 | #jitter_3 |
# | grand | #jitter_11 | #jitter_4 | #jitter_5 | #jitter_6 |
# | group | #jitter_12 | #jitter_7 | #jitter_8 | #jitter_9 |
#
# Plotsyntax if there are no scaling and no grand mean grouping options active
p <- ggplot(kursdata1()[kursdata1()$kursleiterin == user() & kursdata1()$variable%in%input$qualdim,], # jitter_2
aes(x=variable, y= value)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Plotsyntax if there are GRANDmean scaling and no grand mean grouping options active
if ("std" %in% input$scaling)
p <- ggplot(kursdata3[kursdata3$kursleiterin == user() & kursdata3$variable%in%input$qualdim,], # jitter_11
aes(x=variable, y= value.std)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Plotsyntax if there are GROUPmean ( = person-mean) scaling and no grand mean grouping options active # jitter_12
if ("gstd" %in% input$scaling)
p <- ggplot(kursdata3[kursdata3$kursleiterin == user() & kursdata3$variable%in%input$qualdim,],
aes(x=variable, y= value.pstd)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Plotsyntax if only GRAND mean grouping options active # jitter_1
if ("gmean" %in% input$groupin)
p <- ggplot(kursdata3[kursdata3$variable%in%input$qualdim,],
aes(x=variable, y= value)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Plotsyntax if there are GRAND mean scaling AND grand mean grouping options active # jitter_4
if ("gmean" %in% input$groupin & "std" %in% input$scaling)
p <- ggplot(kursdata1(),
aes(x=variable, y= value.std)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Plotsyntax if there are GROUP mean scaling AND grand mean grouping options active # jitter_7
if ("gmean" %in% input$groupin & "gstd" %in% input$scaling)
p <- ggplot(kursdata3[kursdata3$variable%in%input$qualdim,],
aes(x=variable, y= value.pstd)) + geom_blank() + facet_wrap(~ gmgroup, ncol = 3)
# Grouping options (not grand mean)
if ("kurse" %in% input$groupin)
p <- p + facet_wrap(~ mggroup, ncol = 3)
if ("kurse" %in% input$groupin & "gmean" %in% input$groupin)
p <- p + facet_wrap(~ mgxgmgroup, ncol = 3)
# Setting up geoms in dependence of input$darstell ################################################################
if("boxplot" %in% input$darstell)
p <- p + geom_boxplot(width = 0.35, colour = "#3997FA", fill = NA, outlier.shape = NA, size = 1.1)
if("jitter" %in% input$darstell)
p <- p + geom_jitter(shape = 1, position = position_jitter(w= 0.05, h = .0), alpha = 1/2, size = 2)
if("ci" %in% input$darstell)
p <- p + stat_summary(fun.data = "mean_cl_boot", colour = "#DC5C40", size = 1.3, geom = "errorbar", width = .08)
if("mean" %in% input$darstell)
p <- p + stat_sum_single(mean)
# Axis labels in dependence of input$scalin #######################################################################
if("std" %in% input$scaling)
p <- p + geom_hline(yintercept = 0, colour = "#A51E37") +
#annotate("text", x=0.6, y = -0.3, label="abiturma gesamt = 0", colour = "#A51E37", hjust = 0) +
ylab("Mittelwert abiturma = 0 (je Dimension)") +
xlab("") +
theme(axis.title.y = element_text(size = rel(1), colour = "#A51E37"),
strip.text.x = element_text(size = 11),
axis.text.x = element_text(angle = 45, hjust = 1, colour = "black", size = 11)) +
coord_cartesian(ylim=c(-4, 3))
if("gstd" %in% input$scaling)
p <- p + geom_hline(yintercept = 0, colour = "#A51E37") +
#annotate("text", x=0.6, y = -0.55, label="Mittelwert je\nTeilnehmerIn = 0", colour = "#A51E37", hjust = 0, vjust = 0) +
ylab("Mittelwert je\nTeilnehmerIn = 0") +
xlab("") +
theme(axis.title.y = element_text(size = rel(1), colour = "#A51E37"),
strip.text.x = element_text(size = 11),
axis.text.x = element_text(angle = 45, hjust = 1, colour = "black", size = 11)) +
coord_cartesian(ylim=c(-4, 3))
if("raw" %in% input$scaling)
p <- p + theme(axis.title.y = element_text(size = rel(1)),
strip.text.x = element_text(size = 11),
axis.text.x = element_text(angle = 55, hjust = 1, colour = "black", size = 11)) +
ylim(1,7) + labs(x = "", y = "1 = trifft überhaupt nicht zu...\n...7 = trifft vollständig zu")
if("noscale" %in% input$scaling )
p <- p + theme(axis.text.y = element_blank())
p <- p + theme(panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank())
p
})
# Kursanzahl extrahieren für adaptive plotheight ####################################################################
n_lev_mg <- eventReactive(input$golikert, {nlevdata <- kursdata1()%>% #Count of teached courses by actual user
filter(kursleiterin == user())
return(nlevels(as.factor(nlevdata$kurs)))})
# Adaptive Plotheight for Jitter-Plot ###############################################################################
n_lev_mg <- eventReactive(input$goqualdim, {nlevdata <- kursdata1()%>% # Count of teached courses by actual user
filter(kursleiterin == user())
return(nlevels(as.factor(nlevdata$kurs)))})
height_jitter <- eventReactive(input$goqualdim, {nlevmg <- n_lev_mg() # Calculate height in dependece of n_lev_mg()
ifelse(isolate("kurse" %in% input$groupin & "gmean" %in% input$groupin),
return(360*ceiling((as.numeric(nlevmg)+1)/3)),
ifelse(isolate("kurse" %in% input$groupin & !"gmean" %in% input$groupin),
return(360*ceiling((as.numeric(nlevmg))/3)),
return(360))
)
})
# # Qualdimplot as svg ###############################################################################
#
# list_of_qualdimplot <- eventReactive(input$goqualdim, {
# width_q1 <- session$clientData$output_qualdimplot_svg_width
# height_q1 <- session$clientData$output_qualdimplot_svg_height
# mysvgwidth_q1 <- width_q1/96
# mysvgheight_q1 <- height_q1/96*ifelse("kurse" %in% input$groupin & "gmean" %in% input$groupin,
# ceiling((as.numeric(n_lev_mg())+1)/3),
# ifelse("kurse" %in% input$groupin & !"gmean" %in% input$groupin,
# ceiling((as.numeric(n_lev_mg()))/3),
# 1
# )
# )
#
# # A temp file to save the output.
# # This file will be removed later by renderImage
#
# outfile_q1 <- tempfile(fileext='.svg')
#
# #This actually save the plot in a image
# ggsave(file=outfile_q1, plot=qualdimplotgone(), width=mysvgwidth_q1, height=mysvgheight_q1)
#
# # Return a list containing the filename
# list(src = normalizePath(outfile_q1),
# contentType = 'image/svg+xml',
# width = width_q1,
# height = height_q1*ifelse("kurse" %in% input$groupin & "gmean" %in% input$groupin,
# ceiling((as.numeric(n_lev_mg())+1)/3),
# ifelse("kurse" %in% input$groupin & !"gmean" %in% input$groupin,
# ceiling((as.numeric(n_lev_mg()))/3),
# 1
# )
# ),
# alt = "My svg Histogram")
# })
#
# output$qualdimplot_svg <- renderImage({
# list_of_qualdimplot()
# })
# show qualdimplot1spinner
observeEvent(input$goqualdim, {
show("qualdimplot1-container")
show("q1_star_wellpanel", anim = T, animType = "fade", time = 1)
})
# Call of qualidimplotgone() mit Actionbutton ######################################################################
output$qualdimplot <- renderPlot({
if(login_true() == T)
qualdimplotgone()
}, res = 72)
# Rendering des Qualdimplots mit adaptiver Plothöhe ################################################################
output$jitter.plot.ui <- renderUI({
plotOutput("qualdimplot", height = as.numeric(height_jitter()))
})
# Usage tracking jitterplot #########################################################################################
# Whenever a field is filled, aggregate all form data
formData <- reactive({
data <- sapply(fields, function(x) paste(input[[x]], collapse = "-"))
data$systtime <- paste(Sys.time())
data$user <- user()
data
})
# When the Submit button is clicked, save the form data
observeEvent(input$goqualdim, {
saveData(formData())
})
#####################################################################################################################
# Backend Feedbackforms ############
#####################################################################################################################
## Feedback for likert page ######################################################################################
# Reset likert form ###########################################
observeEvent(input$golikert, {
show("likert_star_wellpanel", anim = T, animType = "fade", time = 1)
reset("likert_inf_stars")
reset("likert_sic_stars")
})
# Write Feedback likert ##################################
# Whenever a field is filled, aggregate all form data
formData_fb_likert <- reactive({
data_fb_likert <- sapply(fields_fb_likert, function(x) input[[x]])
data_fb_likert$systtime <- paste(Sys.time())
data_fb_likert$user <- user()
data_fb_likert
})
# When the Submit button is clicked, save the form data
observeEvent(input$likert_fb_btn, {
saveData_fb_likert(formData_fb_likert())
reset("likert_inf_stars")
reset("likert_sic_stars")
})
## Feedback for Qualdim 1 page ######################################################################################
# Reset q1 ###########################################
observeEvent(input$qualdim1_fb_btn, {
reset("q1_inf_stars")
reset("q1_sic_stars")
})
observeEvent(input$goqualdim, {
reset("q1_inf_stars")
reset("q1_sic_stars")
})
# Write Feedback q1 ##################################
# Whenever a field is filled, aggregate all form data
formData_fb_q1 <- reactive({
data_fb_q1 <- sapply(fields_fb_q1, function(x) input[[x]])
data_fb_q1$systtime <- paste(Sys.time())
data_fb_q1$user <- user()
data_fb_q1
})
# When the Submit button is clicked, save the form data
observeEvent(input$qualdim1_fb_btn, {
saveData_fb_q1(formData_fb_q1())
})
## Feedback for Qualdim 2 page ######################################################################################
# Reset q2 ###########################################
observeEvent(input$qualdim2_fb_btn, {
reset("q2_form")
})
observeEvent(input$goqualdim2, {
reset("q2_form")
})
# Write Feedback q2 ##################################
# Whenever a field is filled, aggregate all form data
formData_fb_q2 <- reactive({
data_fb_q2 <- sapply(fields_fb_q2, function(x) input[[x]])
data_fb_q2$systtime <- paste(Sys.time())
data_fb_q2$user <- user()
data_fb_q2
})
# When the Submit button is clicked, save the form data
observeEvent(input$qualdim2_fb_btn, {
saveData_fb_q2(formData_fb_q2())
})
## Feedback for Freitext page ######################################################################################
# Reset q2 ###########################################
observeEvent(input$frei_fb_btn, {
reset("frei_inf_stars")
})
observeEvent(input$gofreitext, {
reset("frei_inf_stars")
})
# Write Feedback frei ##################################
# Whenever a field is filled, aggregate all form data
formData_fb_frei <- reactive({
data_fb_frei <- sapply(fields_fb_frei, function(x) input[[x]])
data_fb_frei$systtime <- paste(Sys.time())
data_fb_frei$user <- user()
data_fb_frei
})
# When the Submit button is clicked, save the form data
observeEvent(input$frei_fb_btn, {
saveData_fb_frei(formData_fb_frei())
})
# Write help button usage ##################################
# Whenever a field is filled, aggregate all form data
formData_help <- reactive({
data_help <- sapply(fields_help, function(x) input[[x]])
data_help$systtime <- paste(Sys.time())
data_help$user <- user()
data_help
})
# When the Submit button is clicked, save the form data ##
# observeEvent(input$logout_btn, { ## Code transfered to line ~ 855 where switchoff is iplemented
# saveData_help(formData_help()) ##
# }) ##
## Feedback for logout page ######################################################################################
# Write Feedback logout ##################################
# Whenever a field is filled, aggregate all form data
formData_fb_logout <- reactive({
data_fb_logout <- sapply(fields_fb_logout, function(x) input[[x]])
data_fb_logout$systtime <- paste(Sys.time())
data_fb_logout$user <- user()
data_fb_logout
})
# When the Submit button is clicked, save the form data, terminate app and close window
observeEvent(input$logout_btn, {
saveData_help(formData_help())
saveData_fb_logout(formData_fb_logout())
js$closeWindow()
stopApp()
})
# Save logut-form through all PlotButton (to track non-logout-people)
observeEvent(input$golikert, {
saveData_help(formData_help())
})
observeEvent(input$goqualdim, {
saveData_help(formData_help())
})
# observeEvent(input$goqualdim2, {
# saveData_help(formData_help())
# })
observeEvent(input$gofreitext, {
saveData_help(formData_help())
})
#####################################################################################################################
# Einzelfragen Backend ############
#####################################################################################################################
# Variablenstring für subsetting definieren ##################################################
items_lern <- c(
"Du hast im Kurs etwas Nützliches gelernt.", #le1
"Dein Interesse am Mathematik-Abistoff ist durch den Kurs gestiegen.", #le2
"Du hast die Inhalte des Kurses verstanden.", #le3
"Du fandest den Kurs herausfordernd und wurdest zum Denken angeregt.") #le4
items_enth <- c(
"Der/die Kursleiter/in unterrichtet mit Begeisterung.", #en1
"Der/die Kursleiter/in ist dynamisch und unterrichtet voller Energie.", #en2
"Der/die Kursleiter/in lockert den Unterricht durch Humor auf.", #en3
"Der/die Kursleiter/in hält Dein Interesse während des Kurses durch seinen/ihren Unterrichtsstil aufrecht.") #en4
items_orga <- c(
"Die Erklärungen des Kursleiters/ der Kursleiterin sind verständlich.", #or1
"Der/die Kursleiter/in ist gut vorbereitet und erklärt die Inhalte sorgfältig.", #or2
"Du hast im Kurs einen Überblick über alle Abi-relevanten Themen erhalten.", #or3
"Du hast im Kurs die Bearbeitung Abi-relevanter Aufgabentypen geübt." , #or4
"Du hast durch den Kurs Wissenslücken schließen können.") #or5
items_grup <- c(
"Der/die Kursleiter/in ermutigt die Teilnehmenden, an den Diskussionen im Kurs teilzunehmen.", #gi1
"Die Kursteilnehmer/innen werden eingeladen, eigene Ideen und Lösungswege mitzuteilen.", #gi2
"Die Kursteilnehmer/innen werden ermutigt, Fragen zu stellen.", #gi3
"Die Kursteilnehmer/innen werden ermutigt, eigene Lösungswege zu formulieren und/oder die vorgetragenen Lösungen kritisch zu hinterfragen.") #gi4
items_indi <- c(
"Der/die Kursleiter/in ist den Teilnehmenden gegenüber stets freundlich.", #ir1
"Der/die Kursleiter/in gibt den Teilnehmenden das Gefühl, jederzeit um Hilfe bitten zu können.", #ir2
"Der/die Kursleiter/in interessiert sich aufrichtig für die Teilnehmenden.") #ir3
items_empf <- c(
"Würdest Du den Kursleiter/die Kursleiterin einem Freund/einer Freundin weiterempfehlen?", #em1
"Hättest Du dir für diesen Kurs einen anderen Kursleiter/eine andere Kursleiterin gewünscht?", #em2
"Kurz vor dem Abitur bieten wir einen weiterführenden Kurs an, um Euch bestmöglich auf die Prüfung vorzubereiten. Wirst Du den abiturma Kurs im Frühjahr wieder besuchen?") #em3
# Create reactive dataset for likert panel and subset it according to input$likertfragen #############################
likertdata3 <-
eventReactive(input$golikert, {
# Alerts ###################################################################################################
## none-selection alert
if(input$likertfragen == "nolikert") {
createAlert(session, "likertalert1", "LikertAlert1", title = "Bitte Auswahl treffen!",
content = "<ul>Bitte wähle aus, welche Einzelfragen dargestellt werden sollen.
Drücke dann nochmals den <i>Plot-Button.</i></ul>", append = FALSE)
}
if(input$likertfragen != "nolikert") {
closeAlert(session, "LikertAlert1")
}
# Data ###################################################################################################
if("Lernerfolg" %in% input$likertfragen){
# withProgress(message = "Bereite Daten auf", value = 0, {
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"))%>%
filter(variable %in% items_lern)
#setProgress(value = 1, detail = "fertig")
#})
}
if("Organisation" %in% input$likertfragen){
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"))%>%
filter(variable %in% items_orga)
}
if("Enthusiasmus" %in% input$likertfragen){
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"))%>%
filter(variable %in% items_enth)
}
if("Interaktion" %in% input$likertfragen){
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"),
mggroup = paste("Dein Kurs in", Kursort, "/n", Kursbeginn, Uhrzeit))%>%
filter(variable %in% items_grup)%>%
mutate(variable = fct_recode(variable,
"Die Kursteilnehmer/innen werden ermutigt, eigene Lösungswege zu formulieren und/oder\ndie vorgetragenen Lösungen kritisch zu hinterfragen." = "Die Kursteilnehmer/innen werden ermutigt, eigene Lösungswege zu formulieren und/oder die vorgetragenen Lösungen kritisch zu hinterfragen."))
}
if("Beziehung" %in% input$likertfragen){
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"),
mggroup = paste("Dein Kurs in", Kursort, "/n", Kursbeginn, Uhrzeit))%>%
filter(variable %in% items_indi)
}
if("Weiterempfehlung" %in% input$likertfragen){
likertdata2 <- likertdata1%>%
filter(Kursbeginn != "Herbst '15")%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"))%>%
filter(variable %in% items_empf)
}
# return(likertdata2)
likertdata2b <- likertdata2%>%
filter(is.na(value)==F)%>%
group_by(value, gmgroup, variable)%>%
summarize(Freq = n())%>%
ungroup()%>%
group_by(gmgroup, variable)%>%
mutate(Freq_per = Freq/sum(Freq, na.rm = T)*100)%>%
ungroup()
if(!"gmean" %in% input$groupinl){
likertdata2b <-likertdata2b%>%
filter(gmgroup == "Deine Kurse")
}
return(likertdata2b)
})
## reactive Data frame with info for plot title
likertdata_with_userinfo <- eventReactive(input$golikert, {
likertdata1%>%
mutate(Kursbeginn = lubridate::ymd(Kursbeginn))%>%
filter(kursleiterin == user())%>%
filter(Kursbeginn == max(Kursbeginn, na.rm = T))
})
## Debug #################################################################################################
output$glimpse_likertdata3 <- renderPrint({glimpse(likertdata_with_userinfo())})
#output$user_p <- renderPrint({login_true()})
#output$pw_conf <- renderPrint({input$likertfragen})
# Create Einzelplots #################################################################################################
# Colorpalette
cbPalette <- c("#A51E37", "#D8516A", "#FF849D", "#D9D9D9", "#95C3DF", "#497793", "#002A46")
# Einzelplot with reactive grouping
einzelplot_s <- eventReactive(input$golikert, {
likertplot <- ggplot(likertdata3(), aes(x=gmgroup, y = Freq_per, fill = value)) + geom_bar(stat='identity') +
coord_flip() + facet_wrap(~variable, ncol =1) +
ggtitle(paste("Deine Kurse in ", likertdata_with_userinfo()$Kursort[1],"\n Kursbeginn: ", likertdata_with_userinfo()$Kursbeginn[1], sep = "")) +
scale_fill_manual(limits = c("1 = trifft überhaupt nicht zu", "2","3" ,"4","5" ,"6","7 = trifft vollständig zu"),
labels = c("1 = trifft überhaupt nicht zu", "2","3" ,"4","5" ,"6","7 = trifft vollständig zu"), values = cbPalette) +
guides(fill = guide_legend(nrow = 1)) +
theme(legend.title=element_blank(), legend.position = "top",
axis.title.y = element_blank(),
axis.title.x = element_blank(),
strip.text.x = element_text(size = 9, lineheight=1.1),
plot.background = element_blank(),
panel.background = element_rect(fill = '#FAFAFA'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
plot.title = element_text(lineheight=1.1))
likertplot
})
list_of_likertplot <- eventReactive(input$golikert, {
width_l <- session$clientData$output_einzelplot_width
height_l <- session$clientData$output_einzelplot_height
mysvgwidth_l <- width_l/96*1.35
mysvgheight_l <- height_l/96*1.35*ifelse(input$likertfragen == "Beziehung",0.7,1)
# A temp file to save the output.
# This file will be removed later by renderImage
outfile_l <- tempfile(fileext='.svg')
#This actually save the plot in a image
ggsave(file=outfile_l, plot=einzelplot_s(), width=mysvgwidth_l, height=mysvgheight_l)
# Return a list containing the filename
list(src = normalizePath(outfile_l),
contentType = 'image/svg+xml',
width = width_l,
height = height_l,
alt = "My svg Histogram")
})
observeEvent(input$golikert,{
if(login_true() == T)
show("likert-plot-container") # to override initial "hidden" value
})
output$einzelplot <- renderImage({
list_of_likertplot()
})
# Usage tracking likertplot ###########################################################################################
# Whenever a field is filled, aggregate all form data
formData_l <- reactive({
data_l <- sapply(fields_l, function(x) input[[x]])
data_l$systtime <- paste(Sys.time())
data_l$user <- user()
data_l
})
# When the Submit button is clicked, save the form data
observeEvent(input$golikert, {
saveData_l(formData_l())
})
#####################################################################################################################
# Qualdim2 Backend ############
#####################################################################################################################
observeEvent(input$goqualdim2,{
show("plot-container")
show("q2_form", anim = T, animType = "fade", time = 1)
})
qualdim2plotgone <- eventReactive(input$goqualdim2,{
# Data fetching ###################################################################################################
kursdata8 <-
kursdata%>%
mutate(gmgroup = ifelse(kursleiterin == user(), "Deine Kurse", "abiturma gesamt"),
mggroup = paste("Dein Kurs in ", Kursort, "\n", "Kursbeginn: ", Kursbeginn, ", ", Uhrzeit, sep = ""),
mgxgmgroup = ifelse(kursleiterin == user(), paste("Dein Kurs in ", Kursort, ", Kursbeginn: ", "\n",
Kursbeginn, ", ", Uhrzeit, sep = "")
, "abiturma gesamt"),
variable = factor(variable))
# Alerts ###################################################################################################
if(input$qualdim2 == "noqualdim2") {
createAlert(session, "qualdim2alert1", "Qualdim2Alert1", title = "Bitte Auswahl treffen!",
content = "<ul>Bitte Auswahl treffen. Drücke dann nochmals den <i>Plot-Button.</i>
</ul>", append = FALSE)
}
if(input$qualdim2 != "noqualdim2") {
closeAlert(session, "Qualdim2Alert1")
}
# Plot für Kursvergleich ###################################################################################
if ("kurse" %in% input$qualdim2){
p <- ggplot(kursdata8%>%filter(gmgroup == "Deine Kurse"),
aes(x=variable, y= value, fill = mggroup)) + geom_boxplot(outlier.shape = NA) +
geom_point(pch = 21, position = position_jitterdodge(jitter.width = 0.2))
p <- p + theme(axis.text.x = element_text(angle = 15, hjust = 1, colour = "black", size = 7),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
legend.title=element_blank(),
legend.text = element_text(size = 6, lineheight = 1.3),
axis.title.y = element_text(colour = "#A51E37", size = 7, lineheight = 1.3)) +
coord_cartesian(ylim=c(0.8, 7.2)) + labs(x = "", y = "1 = trifft überhaupt nicht zu...\n...7 = trifft vollständig zu") + scale_y_continuous(breaks=c(1:7)) +
guides(fill=guide_legend(
keywidth=0.15,
keyheight=0.3,
default.unit="inch")
)
}
# Plot für Überblick ####################################################################################
if ("ueber" %in% input$qualdim2){
p <- ggplot(kursdata8%>%filter(gmgroup == "Deine Kurse"),
aes(x=variable, y= value)) + geom_boxplot(width = 0.35, colour = "#3997FA", fill = NA, outlier.shape = NA, size = 1.1) +
geom_jitter(pch = 21, width = 0.1)
p <- p + theme(axis.text.x = element_text(angle = 15, hjust = 1, colour = "black", size = 7),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.y = element_text(colour = "#A51E37", size = 7, lineheight = 1.3)) +
coord_cartesian(ylim=c(0.8, 7.2)) + labs(y = "1 = trifft überhaupt nicht zu...\n...7 = trifft vollständig zu", x = "") + scale_y_continuous(breaks=c(1:7))
}
# Plot für Stärken ####################################################################################
if ("staerken" %in% input$qualdim2){
p <- ggplot(kursdata8%>%filter(gmgroup == "Deine Kurse"),
aes(x=variable, y= value.pstd, fill = mggroup)) + geom_boxplot(outlier.shape = NA) +
geom_point(pch = 21, position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 0, colour = "#A51E37")
p <- p + theme(axis.text.x = element_text(angle = 15, hjust = 1, colour = "black", size = 7),
axis.title.y = element_text(colour = "#A51E37", size = 7),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
legend.title=element_blank(),
legend.text = element_text(size = 6, lineheight = 1.2)) +
coord_cartesian(ylim=c(-4, 3)) + labs(x = "", y = "Mittelwert je TeilnehmerIn = 0") + scale_y_continuous(breaks=c(-4:3)) +
guides(fill=guide_legend(
keywidth=0.15,
keyheight=0.3,
default.unit="inch")
)
}
# Plot für gmean ####################################################################################
if ("gmean" %in% input$qualdim2){
p <- ggplot(kursdata8%>%filter(gmgroup == "Deine Kurse"),
aes(x=variable, y= value.std)) +
stat_summary(fun.data = "mean_cl_boot", colour = "#DC5C40", size = 1.3, geom = "errorbar", width = .08) +
stat_summary(fun.data = "mean_cl_boot", geom = "point", colour = "red", size = 3) +
geom_jitter(pch = 21, width = 0.1) +
geom_hline(yintercept = 0, colour = "#A51E37")
p <- p + theme(axis.title.y = element_text(colour = "#A51E37", size = 7),
axis.text.x = element_text(angle = 15, hjust = 1, colour = "black", size = 7),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
legend.title=element_blank(),
legend.text = element_text(size = 7)) + labs(x = "", y = "Mittelwert abiturma = 0 (je Dimension)") +
guides(fill=guide_legend(
keywidth=0.1,
keyheight=0.15,
default.unit="inch")
)
}
# Create .svg as tempfile ############################################################################
# fetch clientdata
width <- session$clientData$output_qualdim2plot_width
height <- session$clientData$output_qualdim2plot_height
mysvgwidth <- width/96
mysvgheight <- height/96
pixelratio <- session$clientData$pixelratio
# A temp file to save the output.
qualdimplot2_temp <- tempfile(fileext='.svg')
# Generate the svg
ggsave(file=qualdimplot2_temp, plot=p, width=mysvgwidth*pixelratio, height=mysvgheight*pixelratio)
# Return a list containing the filename
list(src = normalizePath(qualdimplot2_temp),
contentType = 'image/svg+xml',
width = width,
height = height,
alt = "My svg Histogram")
})
# Call of qualidim2plotgone() & Interprethilfe mit Actionbutton ####################################################
output$qualdim2plot <- renderImage({
if(login_true() == T)
qualdim2plotgone()
})
# Reaktive Interpretationshilfe erstellen ######################################################################
# .md String nicht-reaktiv generieren
# output$interpretationshilfe.ui <-
# # observeEvent(input$goqualdim2, {
# renderUI({
#
# if("kurse" %in% input$qualdim2)
# interpret <- withMathJax(includeMarkdown("Stuff/Helpstuff/interpret_kurse.md"))
# if("ueber" %in% input$qualdim2)
# interpret <- withMathJax(includeMarkdown("Stuff/Helpstuff/interpret_ueber.md"))
# if("staerken" %in% input$qualdim2)
# interpret <- withMathJax(includeMarkdown("Stuff/Helpstuff/interpret_staerken.md"))
# if("gmean" %in% input$qualdim2)
# interpret <- withMathJax(includeMarkdown("Stuff/Helpstuff/interpret_gmean.md"))
#
#
# interpret
# })
# })
## Debug qualdim2 tracking
output$debug1 <- renderPrint({formData_q2()})
output$debug2 <- renderPrint({formData_l()})
# # Help rendering #########################################################################################
#
# output$darstell_help_im <- renderImage({
#
# filename_help_im <- normalizePath(file.path('./Stuff/Helpstuff/darstell_modal_im2.png'))
#
# # Return a list containing the filename and alt text
# list(src = filename_help_im,
# alt = paste("Image number"),
#
# height = "100%",
# style= "display: block; margin-left: auto; margin-right: auto;")
#
# }, deleteFile = FALSE)
# Usage tracking qualdimplot2 #########################################################################################
# Whenever a field is filled, aggregate all form data
formData_q2 <- reactive({
data_q2 <- sapply(fields_q2, function(x) paste(input[[x]], collapse = "-"))
data_q2$systtime <- paste(Sys.time())
data_q2$user <- user()
data_q2
})
# When the Submit button is clicked, save the form data
observeEvent(input$goqualdim2, {
saveData_q2(formData_q2())
})
})
|
92308aaf3baf93686fd21268f341f63b23e6ee83
|
9686cd5ce83ec31b74b1440d65378da3faadba2c
|
/format_SAS.R
|
4209977981a7dae2195e54e018e7c564472bc779
|
[] |
no_license
|
ptlabadie/JumpPrediction
|
0dd523a4640ae7b86699b266f6967f425789b38e
|
068b70c6442e086e5fcdde6c97ff4f40bbd3367e
|
refs/heads/master
| 2022-12-15T17:02:14.438039
| 2020-09-08T21:09:25
| 2020-09-08T21:09:25
| 275,644,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,972
|
r
|
format_SAS.R
|
##############################################################################
#Format SAS Data
##############################################################################
library('tidyverse')
library('dplyr')
library('sas7bdat')
library('hms')
library('highfrequency')
library('zoo')
if(!require(tbl2xts)) install.packages("tbl2xts")
if(!require(rmsfuns)) install.packages("rmsfuns")
rm(list=ls())
#dev.off()
set.seed(123)
######DOUBLE CHECK SAS OUTPUT DATA FILES
#Import data
data <- read.sas7bdat("C:/Users/Patrick/Documents/Current Semester/SAS/taq_data.sas7bdat")
#formate dates/times
data$timeround <- as.hms(data$timeround)
data$TIME_M <- as.hms(data$TIME_M)
data$DATE <- as.Date(data$DATE,origin="1960-01-01")
#Sort data by Tickr, Date then time
data <- data[order(data$SYM_ROOT, data$DATE, data$timeround), ]
#Create time format reference - Must work with original nonformatted values
time_ref <- as.data.frame(cbind('original' = data$timeround, 'formatted' = data$timeround))
time_ref$formatted <- as.hms(time_ref$formatted)
time_ref <- time_ref[1:392,1:2]
#No negative prices, but several 0 bids/asks
#Best to repeat previous value for missing numbers
#Recode 0 to NA
data <- data %>%
mutate(BID=replace(BID, BID<10, NA))
data <- data %>%
mutate(ASK=replace(ASK, ASK==0, NA))
#Fill NA with previous values
data <- data %>%
group_by(SYM_ROOT) %>%
fill(BID, .direction = 'up') %>%
ungroup()
data <- data %>%
group_by(SYM_ROOT) %>%
fill(ASK, .direction = 'up') %>%
ungroup()
#Create midpoint price
midpoint <- (data$BID + data$ASK)/2
data <- cbind(data,midpoint)
#Create ln returns
data <- data %>%
group_by(SYM_ROOT) %>%
mutate(ln_ret = log((midpoint)/lag(midpoint))) %>%
ungroup()
#Drop counter & factor id
data$n <- NULL
data$X_FREQ_ <- NULL
#Sort data by Tickr, Date then time
data <- data[order(data$SYM_ROOT, data$DATE, data$timeround), ]
###################################################################################################
#Technical and Liquidity Measures
###################################################################################################
##Liquidity Measures
#Bid/Ask Spread
data$ba_spread <- data$ASK - data$BID
#Cumulative Return: Current return compared to yesterday's closing price
data <- data %>%
group_by(SYM_ROOT, DATE) %>%
mutate(cum_ret = cumsum(ln_ret)) %>%
ungroup()
#Fill missing start of day values with ln_ret
data$cum_ret <- ifelse(is.na(data$cum_ret), data$ln_ret, data$cum_ret)
#Depth Imbalance
data <- data %>%
group_by(SYM_ROOT, DATE) %>%
mutate(depth_imb = 2*(cumsum(ASKSIZ) - cumsum(BIDSIZ))/(cumsum(ASK) + cumsum(BID))) %>%
ungroup()
#Quoted Spreads
data$qs <- (data$ASK - data$BID)/data$midpoint
##Technical Measures
#Price rate of Change
data$PROC <- (data$midpoint - lag(data$midpoint))/lag(data$midpoint)
# Moving Average - 15 minute
data <- data %>%
group_by(SYM_ROOT) %>%
mutate(MA = rollapply(midpoint, FUN = sum, width = 15, fill = NA, align = 'right')) %>%
ungroup()
# Bias
data$BIAS <- (data$midpoint - data$MA)/data$MA
# Osciallator
data$OS <- (data$MA - lag(data$MA)) /lag(data$MA)
# Volume Rate of Change
data$mid_vol <- (data$BIDSIZ + data$ASKSIZ)/2
data$VROC <- (data$mid_vol - lag(data$mid_vol))/lag(data$mid_vol)
###VROC has infinite values
data$VROC <- NULL
#Save R.file
save(data, file = 'formatted.RData')
# #Check dates and values by stocks
# AAPL <- data %>%
# filter(SYM_ROOT == 'AAPL')
# MSFT <- data %>%
# filter(SYM_ROOT == 'MSFT')
# FB <- data %>%
# filter(SYM_ROOT == 'FB')
# GOOG <- data %>%
# filter(SYM_ROOT == 'GOOG')
#
#
# summary(MSFT$BID)
# summary(AAPL$BID)
# summary(FB$BID)
# summary(GOOG$BID)
#
# summary(MSFT$ASK)
# summary(AAPL$ASK)
# summary(FB$ASK)
# summary(GOOG$ASK)
|
9cfc6f0c4ebb43cd807cb76af7cb18b109043966
|
9a5fb9eab63a84d626c23b017bbeacfff334f6a4
|
/app.R
|
0eac9fc18d4fe6bf94169b491c75cbd03d26c713
|
[] |
no_license
|
th1460/CMG
|
1f704aab8fa2ecc4b8dc2c1e8ca23f7539863abc
|
c5995193f400930bae13622c2f3fd6ebfba9244f
|
refs/heads/master
| 2020-06-30T13:00:15.781538
| 2019-08-06T23:23:34
| 2019-08-06T23:23:34
| 200,833,784
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,580
|
r
|
app.R
|
library(shiny)
library(shinymaterial)
library(DT)
library(plotly)
library(DBI)
library(dplyr)
library(glue)
# canal
conn <- dbConnect(RSQLite::SQLite(), dbname = "dados.sqlite")
if(!dbExistsTable(conn, "cenarios")){
dbWriteTable(conn, "cenarios", value = tibble(cenario = character(),
desc = character(),
custos = numeric()))
}
ui <- material_page(
material_side_nav(
tags$h4("Painel de controle"),
material_text_box(input_id = "name", label = "Nome do cenário"),
material_number_box(input_id = "fator",
label = "Fator retorno veículo (1 ou 2)",
min_value = 1,
max_value = 2,
initial_value = 1),
material_number_box(input_id = "dura",
label = "Duração (meses)",
min_value = 1,
max_value = Inf,
initial_value = 1),
material_number_box(input_id = "exte",
label = "Extensão (km)",
min_value = 1,
max_value = Inf,
initial_value = 1),
material_dropdown(input_id = "porte",
label = "Porte",
choices = c("Pequeno", "Médio", "Grande")),
material_dropdown(input_id = "relevo",
label = "Relevo",
choices = c("Plano", "Montanhoso", "Ondulado")),
material_dropdown(input_id = "classe",
label = "Classe",
choices = c("0", "I-A", "I-B", "II", "III")),
material_dropdown(input_id = "canteiro",
label = "Padrão do canteiro",
choices = c("Permanente", "Provisório")),
material_slider(input_id = "pavi_a",
label = "Pavimento tipo A (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_b",
label = "Pavimento tipo B (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_c",
label = "Pavimento tipo C (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_d",
label = "Pavimento tipo D (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_e",
label = "Pavimento tipo E (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_f",
label = "Pavimento tipo F (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1),
material_slider(input_id = "pavi_g",
label = "Pavimento tipo G (%)",
min_value = 0,
max_value = 100,
initial_value = 0,
step_size = 1)
),
title = "Custo Médio Gerencial",
nav_bar_color = "indigo",
material_tabs(
tabs = c("Cenário" = "resumo",
"Comparação de cenários" = "cenario")
),
material_tab_content(
tab_id = "resumo",
dataTableOutput("resultado"),
tags$br(),
plotlyOutput("pieplot"),
material_button(input_id = "enviar", label = "Enviar")
),
material_tab_content(
tab_id = "cenario",
dataTableOutput("cenarios"),
plotlyOutput("plotcenarios"),
material_button(input_id = "limpar", label = "Limpar")
)
)
server <- function(input, output, session) {
source("cmg.R")
tabela <- reactive({
custos <-
cmg(porte = input$porte,
relevo = input$relevo,
classe = input$classe,
padrao = input$canteiro,
k = input$fator,
d = input$dura,
et = input$exte,
peca = input$pavi_a/100,
pecb = input$pavi_b/100,
pecc = input$pavi_c/100,
pecd = input$pavi_d/100,
pece = input$pavi_e/100,
pecf = input$pavi_f/100,
pecg = input$pavi_g/100)
tabela <-
tibble(cenario = input$name,
desc = c("Mobilização e desmobilização (CM1)",
"Administração local (CM2)",
"Canteiro de obras (CM3)",
"Terraplenagem, drenagem e OAC, obras complementares, sinalização e proteção ambiental (CM4)",
"Pavimentação, aquisição e transporte de material betuminoso (CM5)",
"Custo médio gerencial (CMG)"),
custos = c(custos$Parcial$CM1,
custos$Parcial$CM2,
custos$Parcial$CM3,
custos$Parcial$CM4,
custos$Parcial$CM5,
custos$CMG))
})
output$resultado <- renderDataTable({
tabela() %>%
datatable(rownames = FALSE,
colnames = c("Cenário", "Custos", "Valores"),
options = list(dom = "t", columnDefs = list(list(className = 'dt-center', targets = 2)))) %>%
formatCurrency(columns = "custos", digits = 2, mark = ".", dec.mark = ",", currency = "R$")
})
output$pieplot <- renderPlotly({
tabela() %>%
slice(-6) %>%
plot_ly(labels = ~desc,
values = ~custos,
hoverinfo = "percent+label",
type = "pie") %>%
layout(showlegend = FALSE)
})
observeEvent(input$enviar, {
dbWriteTable(conn, "cenarios", value = tabela(), append = TRUE)
showNotification("Enviado",
action = a(href = "javascript:location.reload();", "Atualizar tabela")
)
}, ignoreInit = TRUE)
output$cenarios <- renderDataTable({
datatable(tbl(conn, "cenarios") %>% as_tibble(),
options = list(dom = "tip", pageLength = 6, columnDefs = list(list(className = 'dt-center', targets = 2))),
colnames = c("Cenário", "Custos", "Valores (R$)"),
rownames = FALSE) %>%
formatCurrency(columns = "custos", digits = 2, mark = ".", dec.mark = ",", currency = "R$")
})
output$plotcenarios <- renderPlotly({
tbl(conn, "cenarios") %>%
filter(desc != "Custo médio gerencial (CMG)") %>%
as_tibble() %>%
mutate(desc = factor(desc, levels = c("Mobilização e desmobilização (CM1)",
"Administração local (CM2)",
"Canteiro de obras (CM3)",
"Terraplenagem, drenagem e OAC, obras complementares, sinalização e proteção ambiental (CM4)",
"Pavimentação, aquisição e transporte de material betuminoso (CM5)"))) %>%
ggplot(aes(cenario, custos/1000000, fill = desc, text = glue("{desc} <br> Cenário: {cenario} <br> R${format(custos, big.mark = '.', decimal.mark = ',')}"))) +
geom_col() +
labs(x = "Cenários", y = "Custos em milhões (R$)", fill = "") +
guides(fill = FALSE) +
theme_minimal()
ggplotly(tooltip = "text") %>%
layout(showlegend = FALSE)
})
observeEvent(input$limpar, {
dbWriteTable(conn, "cenarios", value = tibble(cenario = character(),
desc = character(),
custos = numeric()),
overwrite = TRUE)
showNotification("Enviado",
action = a(href = "javascript:location.reload();", "Atualizar tabela")
)
}, ignoreInit = TRUE)
}
shinyApp(ui = ui, server = server)
|
e718533f37b881a08c7acce88fd71158392c2ce8
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RESS/R/readessentia.R
|
28ace10c66898e684a35a341a11c00dcbf5484cb
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
readessentia.R
|
read.essentia <- function(file) {
pipedline <- pipe(sprintf("bash %s",file), open = "r")
colspec <- TRUE
if (grepl("-notitle", file)) {
colspec <- FALSE
}
t2 <- read.csv(pipedline, header = colspec, sep = ",",
quote = "\"'", comment.char = "#", blank.lines.skip = FALSE,
allowEscapes = TRUE, skip = 0)
rm(colspec)
close(pipedline)
return(t2)
}
|
e8a72f84f4889793bd925583527993d383d116ee
|
d60ed73758ee30f0d68af60119c0edd0684a52a7
|
/Ejercicios_Jose.R
|
46eb388fd67fca0f22237985a68d47cbf63bb20b
|
[] |
no_license
|
JoseVictorio/Ejercicios_Progra
|
dd657e9fec08fcb42413e2350e535bc835e5e6ce
|
42b87a25c2926830d3c0538daa812422c450c07f
|
refs/heads/main
| 2023-06-25T16:11:01.126124
| 2021-07-10T20:31:14
| 2021-07-10T20:31:14
| 381,551,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,279
|
r
|
Ejercicios_Jose.R
|
#1RA PARTE ----
#1 ----
#1.a
a <- (0.3*0.15)/((0.3*0.15) + (0.2*0.8) + (0.5*0.12))
round(a, 2)
#1.b
b <- (5^6 / factorial(6)) * (exp(1))^(-5)
round(b, 2)
#1.c
comb <- factorial(20) / (factorial(7) * (factorial(20-7)))
pot <- (0.4^7)*(0.6^13)
c <- comb * pot
round(c, 2)
#2 ----
#2.a Suma de primeros n numeros
(1000*(1000+1))/2 #(n*(n+1))/2
sum(1:1000)
#2.b Suma de serie de potencias
(1-(2^(10+1)))/(1-2) #(1-(r^(n+1)))/(1-r)
#3 ----
grupo
#3.a ¿Cuántos elementos tiene?
table(grupo)
length(grupo)
#3.b ¿En qué posiciones del vector está la letra “A”?
which(grupo == "A")
#4 ----
nota
#4.a
names(nota) = NULL
sum(nota)
#4.b
mean(nota)
#4.c
which(nota > 7.0)
#4.d
sort(nota, decreasing = T)
#4.e
which(nota == max(nota))
#5 ----
#a
sum(nota[1:10])
#b
sum(grupo == "C")
table(grupo) #Comprobacion
#c
#Aprobado >= 5.5
sum(nota >= 5.5)
#d
#Con tibble me complique.
library(tidyverse)
df_notas <- tibble(grupo, nota)
df_notas
c_aprob <- dplyr::filter(df_notas, grupo == "B" & nota >= 6)
dim(c_aprob)[1] #Cantidad de aprobados
table(c_aprob)
#Con vectores
names(nota) <- grupo
nota
nota[grupo == "B" & nota >= 5.5]
length(nota[ grupo == "B" & nota >= 5.5])
sum(grupo == "B" & nota >= 5.5)
#e
x <- length(nota[ grupo == "C" & nota >= 5.5])
y <- length(nota[grupo == "C"])
(x/y)*100
#f
names(nota) <- NULL #Si borramos el nombre del vector no sale
names(nota) <- grupo
which(nota == min(nota))
which(nota == max(nota))
which.max(nota)
which.min(nota)
#g
nota[grupo %in% c("A", "B") & nota >= 5.5] %>% mean()
#6 ----
quantile(nota, 0.66) #Percentil 66 de toda la muestra
quantile(nota[grupo == "C"], 0.66)
#El 66% de la muestra tiene una nota de ""
#7 ----
(sum(nota <= 4.9)/length(nota))*100
(sum(nota >= 4.9)/length(nota))*100
#8 ----
boxplot(nota[grupo == "A"])
boxplot(nota[grupo == "B"])
boxplot(nota[grupo == "C"])
boxplot(nota[grupo == "D"])
boxplot(nota[grupo == "E"])
table(grupo)
#9 ----
conc
#a
max(conc)
#b
sum(conc >= 40.0)
#c
mean(conc)
#d
sort(conc) %>%
head(10)
#e >>> Falta comprobar
library(magrittr)
conc
length(conc)
max(conc)
ind_max <- which(conc == max(conc))
hora_Data <- (24)/length(conc)
hour <- ind_max*hora_Data
min <- round((hour - floor(hour))*60, 0)
min
hour %<>% floor()
hour
paste(hour, ":", min, "hrs")
#11
A <- cbind(c(1:10),
seq(2, 20, by = 2),
seq(3, 30, by = 3),
seq(4, 40, by = 4),
seq(5, 50, by = 5))
B <- cbind(c(0, 1, 0, 0, 1),
c(1, 0, 1, 1, 0),
c(0, 1, 0, 0, 1),
c(1, 0, 1, 0, 1),
c(0, 1, 0, 1, 0))
(A %*% B) - (A %*% (t(B)))
#B <- matrix(B, nrow = 5, ncol = 5, byrow = T)
#12
x <- cbind(c(1:5), c(1, -1, 0, 1, 2))
y <- cbind(c(0, 0, 1, 1, 3))
(solve(t(x) %*% x)) %*% t(x) %*% y
#2DA PARTE ----
#1----
x <- c(1, 2, 3, 4, 5, 6, 7, 8, 9 , 10)
y <- c(1, 4, 6, 8, 25, 36, 49, 61, 81, 100)
plot(x, y, type = "o")
#2----
A <- cbind(c(1:4), seq(2, 8, by = 2), seq(3, 12, by = 3))
A
#3----
i <- diag(1, 3, 3)
i
#4----
#Con matriz
m_nula <- matrix(rep(0, 16), nrow = 4, ncol = 4)
m_nula
#Con function
matrix_nula <- function(k) {
nula <- diag(k);
for (i in 1:k) {
nula[i, i] = 0
};
return(nula)
}
matrix_nula(4)
#5----
diag(c(0, 2, 3, 4))
B <- diag(4)
B[1, 1] = 0
B[2, 2] = 2
B[3, 3] = 3
B[4, 4] = 4
B
#6----
A
A_T <- matrix(A, nrow = 3, ncol = 4, byrow = T)
A_T
t(A) #Funcion transpuesta de A
#7----
#8----
P <- matrix(c(1, -2, 1, 2, 4, 0, 3, -2, 1), nrow = 3, ncol = 3)
P %*% P
pot_m <- function(m, n) {
pot = m;
for (i in 2:n) {
pot <- pot %*% m
};
return(pot)
}
pot_m(P, 6)
#9----
solve()
#13----
data(co2)
means = aggregate(co2, FUN=mean)
year = as.vector(time(means))
co2 = as.vector(means)
co2
lag(co2)
dif <- co2 - lag(co2)
dif
dif_2020_2019 <- 2.64
year_2020 <- 2020
library(ggplot2)
library(plotly)
plot(x = year, y = dif,
type = "b",
pch = 16,
xlab = "Año",
ylab = "CO2 aumento por año",
xlim = c(1960, 2020),
ylim = c(0.2, 2.7))
points(x = year_2020, y = dif_2020_2019, pch = 4, col = "red")
#14----
rainfall <- read_csv("data/rainfall.csv")
rainfall
result <- rainfall %>%
dplyr::filter(sep >= 180 | oct >= 180 |
nov >= 180 | dec >= 180 |
jan >= 180 | feb >= 180 |
mar >= 180 | apr >= 180 |
may >= 180) %>%
select(name)
result2 <- as.vector(result[[1]])
rainf <- read_csv("data/rainfall.csv") %>%
print()
rainf2 <- select(rainf, sep:name)
rainf2
pp180 <- gather(data = rainf2, key = "mes", value = "pp", 1:9) %>%
filter(pp >= 180) %>%
collect %>% .[[1]]
#3RA PARTE ----
library(skimr)
cod_lambayeq <- read_csv("data/listRaingauge.csv") %>%
dplyr::filter(NOM_EST == "LAMBAYEQUE") %>%
select(CODIGO) %>%
collect %>% .[[1]]
data_lambayeq <- read_csv("data/raingaugeDataset.csv") %>%
dplyr::select(date, qc00000301) %>%
mutate(date = as.Date(date, format = "%d/%m/%Y")) %>%
rename(pp = qc00000301)
#a ----
data_lambayeq %>%
dplyr::select(pp) %>%
is.na() %>%
sum()
#
stat_data <- skim(data_lambayeq)
#
as_tibble(stat_data)
#summary(data_lambayeq)
#b ----
|
6b52597c30e0dc73f5338b778be5bdd67ad62ba2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VIM/examples/countInf.Rd.R
|
74942d60702b95c0709700f001b1256248a619b0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
countInf.Rd.R
|
library(VIM)
### Name: countInf
### Title: Count number of infinite or missing values
### Aliases: countInf countNA
### Keywords: utilities
### ** Examples
data(sleep, package="VIM")
countInf(log(sleep$Dream))
countNA(sleep$Dream)
|
c7eab25edacc9fbfb245c78a8fa18d14594ff903
|
d61bc937531a128437d6e53d65636ce3dd1f8648
|
/Install_R_Dependancies.R
|
ea93078840a8096a2b735f2166ead0b97ef50606
|
[] |
no_license
|
sysbiolux/DCcov
|
9080e74321d4c6f9cbdfebe28d85161cbf7d522e
|
e0dce02ca88626fc534ce6ff34b946d1c22d13a5
|
refs/heads/main
| 2023-08-21T22:01:33.801504
| 2021-11-25T15:00:57
| 2021-11-25T15:00:57
| 341,609,209
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 644
|
r
|
Install_R_Dependancies.R
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
if (!requireNamespace("edgeR", quietly = TRUE))
BiocManager::install("edgeR",version ="3.30.3" )
if (!requireNamespace("DESeq2", quietly = TRUE))
BiocManager::install("DESeq2",version ="1.28.1")
if (!requireNamespace("tidyverse", quietly = TRUE))
install.packages("tidyverse")
if (!requireNamespace("FactoMineR", quietly = TRUE))
install.packages("FactoMineR",)
if (!requireNamespace("tidyverse", quietly = TRUE))
install.packages("tidyverse")
library('devtools')
#networkD3==0.4
devtools::install_github('christophergandrud/networkD3')
|
6b2648a235c84c264d7324181fd50b260ed7e8a1
|
942b499a16ff485928e6a005ce641a204bab251a
|
/man/calculate.gold.Rd
|
458645bb75f1db5553ad8027692813f967824bfc
|
[] |
no_license
|
dcourvoisier/doremi
|
c16032fbfd8254f03d52643ebd01df1e1ce63d84
|
f2adc5037fef7e621eec4c32990896f3279e67ab
|
refs/heads/master
| 2022-01-26T21:38:26.947001
| 2022-01-13T09:42:00
| 2022-01-13T09:42:00
| 175,785,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,052
|
rd
|
calculate.gold.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doremi.R
\name{calculate.gold}
\alias{calculate.gold}
\title{Calculation of derivatives using the GOLD method}
\usage{
calculate.gold(signal, time, embedding = 3, n = 2)
}
\arguments{
\item{signal}{is a vector containing the data from which the derivative is estimated.}
\item{time}{is a vector containing the time values corresponding to the signal. Arguments signal and time must have the same length.}
\item{embedding}{is an integer indicating the number of points to consider for derivative calculation. Embedding must be greater than 1 because at least
two points are needed for the calculation of the first derivative and at least 3 for the calculation of the second derivative.}
\item{n}{is the maximum order of derivative to estimate.}
}
\value{
Returns a list containing the following elements:
dtime- contains the time values in which the derivative was calculated. That is, the moving average of the input time over embedding points.
dsignal- is a data.frame containing n+1 columns and the same number of rows as the signal.
The column k is the k-1 order derivative of the signal over embedding points.
embedding- number of points used for the derivative calculation.
n - the maximum derivative order calculated n
}
\description{
\code{calculate.gold} estimates the derivatives of a variable using the Generalized Orthogonal Local Derivative (GOLD)
method described in \doi{10.1080/00273171.2010.498294}{Deboeck (2010)}. The code available on this paper was extracted and adapted for non constant time steps.
This method allows calculating over a number of measurement points (called the embedding number) derivatives with uncorrelated errors.
}
\examples{
#In the following example the derivatives for the function y(t) = t^2 are calculated.
#The expected results are:
#y'(t) = 2t and y''(t) = 2
time <- c(1:500)/100
signal <- time^2
result <- calculate.gold(signal = signal, time = time, embedding = 5)
}
\keyword{derivative}
\keyword{embedding-number}
|
06e98ded93d2f8d604fc9c61b66257250cf407cd
|
a01e939f9cb6a9e70ecd372178006d85131777bd
|
/Plot1.R
|
82ca9fe83bd975c1056a052caf51e3d0e2b041fa
|
[] |
no_license
|
jodanmo/ExData_Plotting1
|
4c8e173c4ac2a4b6b24cbd02e40003e18a5851a5
|
4980c7415b92c42801d051f2cc07fd69902e5a81
|
refs/heads/master
| 2021-01-17T23:08:05.800097
| 2015-06-03T17:53:41
| 2015-06-03T17:53:41
| 36,812,661
| 0
| 0
| null | 2015-06-03T15:28:11
| 2015-06-03T15:28:11
| null |
UTF-8
|
R
| false
| false
| 596
|
r
|
Plot1.R
|
#read in data
power <- read.table("household_power_consumption.txt",
sep = ";",
stringsAsFactors= FALSE,
header=TRUE,
na.strings= "?")
#transform dates
library(lubridate)
power$Date <- dmy(power$Date)
#subset dates to 2007-02-01 and 2007-02-02
library(dplyr)
startDate <- ymd("2007-02-01")
endDate <- ymd("2007-02-02")
power<- filter(power, Date >= startDate, Date <= endDate)
#create plot
png(filename = "Plot1.png")
hist(power$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
06b14c443a96dc58491410e74ef993794c5b1fe0
|
73d6b9e8adbd873875ed51751abc7182133b3e8a
|
/man/fill_missing_dates_and_create_daily_counts_for_mtl_data.Rd
|
f8b05783cd20eaf1182a63665227f0d75efedacc
|
[
"MIT"
] |
permissive
|
SimonCoulombe/covidtwitterbot
|
cc307e395312e5bbbff09b36c6dda9aef3e1747b
|
128749bfd4d0fcec5e3970f60f87424bff8cd50f
|
refs/heads/master
| 2023-04-19T10:03:03.008866
| 2021-04-26T16:26:36
| 2021-04-26T16:26:36
| 311,527,367
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 375
|
rd
|
fill_missing_dates_and_create_daily_counts_for_mtl_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_mtl_data.R
\name{fill_missing_dates_and_create_daily_counts_for_mtl_data}
\alias{fill_missing_dates_and_create_daily_counts_for_mtl_data}
\title{Title}
\usage{
fill_missing_dates_and_create_daily_counts_for_mtl_data(mtl_data)
}
\arguments{
\item{rls_data}{}
}
\value{
}
\description{
Title
}
|
b71906b831fc6cda4072f6dd504173cd50d7ba56
|
3d255512db3b6f6ccbf5384e307489627458ce39
|
/Classification average.R
|
267d06a129e1a1bbbcd3f49025d24ec0c39e6db7
|
[] |
no_license
|
XWeiZhou/Data-analysis
|
f0e8bc21793da665b8c3027930ef75928b1ba46a
|
f28b568b505b581b003a5b47930ba97837e2819d
|
refs/heads/master
| 2021-07-06T03:34:46.560554
| 2021-02-05T15:18:22
| 2021-02-05T15:18:22
| 221,002,668
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
Classification average.R
|
library(dplyr)
Example_data<- read.csv("C:/Example-Groups.csv",header=T)
Example_data_group_means <- Example_data %>%
dplyr::group_by(Site,Year) %>%
dplyr::summarise(sum=sum(Value),avg=mean(Value),sd=sd(Value))
|
6d8a761255eb342597fb34d4535ed7fbc249e307
|
5e4273c9870b9dc46708dc9e88bf631db565c707
|
/tests/test-yapa.R
|
a04c7e7c29250d06d5929c0af7b9a80a414b6502
|
[
"MIT"
] |
permissive
|
alexpavlakis/pollagg
|
28f03b4f2ad775a5c4f30466fe8b312801b2bc6e
|
f29b64d4fa87277b19b8a6d22695b51451407b03
|
refs/heads/master
| 2022-12-16T13:01:37.688375
| 2020-09-24T19:35:36
| 2020-09-24T19:35:36
| 295,246,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
r
|
test-yapa.R
|
library(pollagg)
# Fit a model
y <- matrix(c(10, 900, 50, 50), ncol = 2, byrow = TRUE)
n <- rowSums(y)
fit <- yapa(y = y, n = n, dates = NULL, iter = 1000, chains = 3)
# Test model fit
if(!all(dim(fit$params$theta) == c(1500, 2, 2))) {
stop("yapa is not returning results as expected")
}
# Test plot
p <- plot(fit)
if(!"ggplot" %in% class(p)) {
stop("plot.yapafit is not returning an object of class ggplot2")
}
# Test summary
sf <- summary(fit)
if(!identical(names(sf), c("delta", "polls", "pct", "trend"))) {
stop("summary.fit is not summarising the four output data.frames (delta, polls, pct, and trend)")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.