blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7369da95ca97bf05f7b629632374700f9e2ceace
|
5358afe2a4683d2d56b17353d2bc4a1a088cf8cd
|
/chapter3.R
|
aedf40b0959a5f1e846a155b7f3ef5b50b1c1b08
|
[] |
no_license
|
jhirx/IODS-project
|
2af17b8f56638771285d639ba66e6606dd154a4e
|
93b856cbfbdcd6077f24d12cf8a851126151c9b5
|
refs/heads/master
| 2020-08-31T13:30:45.158269
| 2019-12-09T16:49:16
| 2019-12-09T16:49:16
| 218,700,989
| 0
| 0
| null | 2019-10-31T06:41:39
| 2019-10-31T06:41:35
| null |
UTF-8
|
R
| false
| false
| 3,129
|
r
|
chapter3.R
|
# rs 17.11.2019 jouni hirvionen
#metadta
#ja tähän sitten koodi kommentoituna
#install.packages("dplyr")
#luetaan data sisään, read data in
#readin data in
#d1=read.table("d:/yliopisto/IODS-project/data/student-mat.csv",sep=";",header=TRUE)
#d2=read.table("d:/yliopisto/IODS-project/data/student-por.csv",sep=";",header=TRUE)
#=merge(d1,d2,by=c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet"))
#readtest <- read.csv("~/IODS-project/data/learning2014.csv")
#first test
# home readtest <- read.csv("d:/yliopisto/IODS-project/data/student-mat.csv",sep=";",header=TRUE)
readtest <- read.csv("~/IODS-project/data/student-mat.csv",sep=";",header=TRUE)
str(readtest)
readtest2 <- read.csv("~/IODS-project/data/student-por.csv",sep=";",header=TRUE)
str(readtest2)
#readin data in
math <- read.table("~/IODS-project/data/student-mat.csv",sep=";",header=TRUE)
por <- read.table("~/IODS-project/data/student-por.csv",sep=";",header=TRUE)
colnames(math)
colnames(por)
library(dplyr)
join_by <-c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
#making table
math_por <- inner_join(math, por, by = join_by, suffix = c(".math", ".por"))
# see the new column names
colnames(math_por)
# glimpse at the data
glimpse(math_por)
print(nrow(math_por)) # 382 students
#testing data
str(math_por)
dim(math_por)
colnames(math_po)
# create a new data frame with only the joined columns
alc <- select(math_por, one_of(join_by))
# columns that were not used for joining the data
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
glimpse(alc)
#avarage of alcohoil consumpition
# access the 'tidyverse' packages dplyr and ggplot2
library(ggplot2)
# define a new column alc_use by combining weekday and weekend alcohol use
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
# initialize a plot of alcohol use
g1 <- ggplot(data = alc, aes(x = alc_use, fill = sex))
# define the plot as a bar plot and draw it
g1 + geom_bar()
# define a new logical column 'high_use'
alc <- mutate(alc, high_use = alc_use > 2)
# initialize a plot of 'high_use'
g2 <- ggplot(alc, aes(high_use))
# draw a bar plot of high_use by sex
g2 + facet_wrap("sex") + geom_bar()
glimpse(alc)
#Observations: 382
#Variables: 35
write.csv(alc,file="~/IODS-project/data/alc_table.csv")
|
964786567f0184416e7a697bab6301b111cca10c
|
8ae7107d0d5d51dda8f8b4d5d9e67879f54223ea
|
/dotplot_Fig4i/dot.grah.R
|
ef90a58114a106900af8eeb1249d1a67c91f9b4a
|
[] |
no_license
|
biolchen/TNF-and-Ferroptotic-Cell-Death
|
f7016a4eaccd71d9ab52fa41a77fc65cee3a2756
|
1c5b200f08c28645e932d8999eff9c198b528532
|
refs/heads/main
| 2023-08-22T17:56:50.642062
| 2021-10-11T08:59:24
| 2021-10-11T08:59:24
| 415,839,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,860
|
r
|
dot.grah.R
|
library(ggplot2)
all_pval <- read.table("pvalues.txt",head=T,sep="\t",check.name=0,comment.char = "",quote="")
all_means <- read.table("means.txt",head=T,sep="\t",check.name=0,comment.char = "",quote="")
all_pval <- all_pval[,-c(1:11)]
all_means <- all_means[,-c(1:11)]
intr_pairs <- all_pval$interacting_pair
selected_rows <- c("TNF_TNFRSF1A","TNF_TNFRSF1B","TGFB1_TGFbeta receptor1","TGFB1_TGFbeta receptor2","IL1 receptor_IL1A","IL1 receptor_IL1B","IL6 receptor_IL6","IL7 receptor_IL7","IL10 receptor_IL10","IL17 receptor AC_IL17A","IL15 receptor_IL15","PDGFA_PDGFRA","FGF1_FGFR1")
selected_columns <- c("macrophages|macrophages","macrophages|non-sensitive","macrophages|sensitive","non-sensitive|macrophages","non-sensitive|non-sensitive","non-sensitive|sensitive","sensitive|macrophages","sensitive|non-sensitive","sensitive|sensitive")
sel_pval <- all_pval[match(selected_rows, intr_pairs), selected_columns]
sel_means <- all_means[match(selected_rows, intr_pairs), selected_columns]
df_names <- expand.grid(selected_rows, selected_columns)
pval <- unlist(sel_pval)
pval[pval==0] <- 0.0009
pr = unlist(as.data.frame(sel_means))
pr[pr==0] <- 1
plot.data <- cbind(df_names,pval,log2(pr))
colnames(plot.data) <- c('pair', 'clusters', 'pvalue', 'mean')
my_palette <- colorRampPalette(c("black", "blue", "yellow", "red"), alpha=TRUE)(n=399)
p <- ggplot(plot.data,aes(x=clusters,y=pair)) +
geom_point(aes(size=-log10(pvalue),color=mean)) +
scale_color_gradientn('Log2 mean (Molecule 1, Molecule 2)', colors=my_palette)+
labs(x = "", y = "")+ theme_bw() +
theme(panel.grid.minor = element_blank(),panel.grid.major = element_blank())+
theme(axis.text.x = element_text(size=16,angle=90,hjust=1),axis.text.y = element_text(size=16),
legend.title = element_text(size = 16))
ggsave(p,filename = "ligand.pdf", width = 14, height = 10)
|
6ff18f8d158aa811239d7e72901123607486b7c8
|
f6058fd58749cbca65d7a69bafe87d817d84af41
|
/scripts/r/seg.distortion.project/bedtools.processing.R
|
2cefd2a212ca91d9f0d2f8957e21f70a9999f4c5
|
[] |
no_license
|
alexcoulton/phd
|
ac9eca4e5f3202912150151097755d752343ec27
|
b6c402c7e85e1a75d97bc2fc0caa5b35b77d7b2d
|
refs/heads/master
| 2023-07-05T22:00:43.246138
| 2021-08-13T11:48:44
| 2021-08-13T11:48:44
| 345,630,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,149
|
r
|
bedtools.processing.R
|
# ____________________________________________________________________________
# PROCESSING BEDTOOLS EXTRACTION FILE ####
fafile = ReadFasta("rotation1scripts_v4/original_data/IWGSC/bed.tools.genes.only.extraction.fa")
fafile[] = lapply(fafile, as.character)
fafile$coords = lapply(strsplit(fafile[, 1], ":"), function(x) x[[2]])
gfffile = read.table("rotation1scripts_v4/original_data/IWGSC/iwgsc.genes.only.gff3")
fafile[] = lapply(fafile, as.character)
fafile$startcoord = lapply(strsplit(fafile$coords, "-"), function(x) x[[1]])
fafile$endcoord = lapply(strsplit(fafile$coords, "-"), function(x) x[[2]])
fafile$genename = ""
fafile$genename = gfffile$V9
fafile$chr = lapply(strsplit(fafile$name, ":"), function(x) x[[1]])
fafile$name = paste(fafile$chr, fafile$genename)
fafilenew = fafile[, 1:2]
writefasta(fafilenew, "rotation1scripts_v4/original_data/IWGSC/iwgsc.full.gene.sequences.fa")
fafilenew2 = ReadFasta("rotation1scripts_v4/processed_data/fasta/unique.seg.dist.genes.v2.fasta")
fafilenew2 = convert.to.character.data.frame(fafilenew2)
list.of.seg.genes = unlist(lapply(lapply(strsplit(fafilenew2$name, " "), function(x) x[[1]]), function(x) substr(x, 1, (nchar(x)-2))))
fafile.seg.genes.extracted = newdf(colnames(fafilenew))
for(i in 1:length(list.of.seg.genes)){
print(grep(list.of.seg.genes[i], fafilenew$name))
fafile.seg.genes.extracted = rbind(fafile.seg.genes.extracted, fafilenew[grep(list.of.seg.genes[i], fafilenew$name), ])
}
fafile.seg.genes.extracted = fafile.seg.genes.extracted[-1, ]
fafile.seg.genes.extracted$name = unlist(lapply(lapply(strsplit(fafile.seg.genes.extracted$name, " "), function(x) x[[2]]), function(x) substr(x, 4, nchar(x))))
fafile.seg.genes.extracted$name = paste(unlist(lapply(fafile.seg.genes.extracted$name, function(x) substr(x, 1, (nchar(x)-12)))), ".1", sep = "")
writefasta(fafile.seg.genes.extracted, "rotation1scripts_v4/processed_data/fasta/unique.seg.dist.genes.full.genonmic.sequence.v3.fa")
blastfilenew = read.table("bioinf/blast/genes.vs.paragon.genome/results.blast/unique.seg.dist.genes.cs.x.para.vs.para.genome.v3.outputfmt6.word_sizev3.blast")
blastfilenew2 = grab.best.groups.of.hits(blastfilenew)
# ____________________________________________________________________________
# NEW EXTRACTION 08032018 ####
bedfa = ReadFasta("rotation1scripts_v4/processed_data/fasta/bedtools.genomic.extraction08032018.fa")
bedfa = convert.to.character.data.frame(bedfa)
max.orf.len = function(seq){
max.orf = max_orf(seq, reverse.strand = T)
g = c(max.orf$ORF.Forward$ORF.Max.Len, max.orf$ORF.Reverse$ORF.Max.Len)
g2 = max(as.numeric(g))
if(which(g == g2) == 1) for.or.rev = "forward"
if(which(g == g2) == 2) for.or.rev = "reverse"
if(for.or.rev == "forward"){
attr(g2, "max.orf.seq") = max.orf$ORF.Forward$ORF.Max.Seq
} else {
attr(g2, "max.orf.seq") = max.orf$ORF.Reverse$ORF.Max.Seq
}
attr(g2, "for.or.rev") = for.or.rev
return(g2)
}
orf.lengths = unlist(lapply(bedfa$sequence, max.orf.len))
add.probe.names.to.bedtools.ex.file = function(bed.tools.df, blastdf){
fafile = bed.tools.df
fafile[] = lapply(fafile, as.character)
fafile$coords = lapply(strsplit(fafile[, 1], ":"), function(x) x[[2]])
fafile[] = lapply(fafile, as.character)
fafile$startcoord = lapply(strsplit(fafile$coords, "-"), function(x) x[[1]])
fafile$endcoord = lapply(strsplit(fafile$coords, "-"), function(x) x[[2]])
fafile$genename = ""
fafile$startcoord = as.numeric(fafile$startcoord)
fafile$endcoord = as.numeric(fafile$endcoord)
fafile$probenames = as.character(blastdf$V1[unlist(lapply(fafile$startcoord, function(x){
which(blastdf$V9 == (x + 2001) | blastdf$V10 == (x + 2001))
}))])
return(fafile)
}
bedfa2 = add.probe.names.to.bedtools.ex.file(bedfa, newblast)
bedfa2 = bedfa2[which(orf.lengths > 1000), ] #NEED TO ADD PROBE NAMES TO bedfa; SEE CODE ABOVE
bedfa2$max.orf.seq = unlist(lapply(bedfa2$sequence, function(x) attr(max.orf.len(x), "max.orf.seq")))
fastadf = bedfa2[, 7:8]
colnames(fastadf) = c("header", "sequence")
fastadf$header = unlist(lapply(fastadf$header, function(x) p(">", x)))
writefasta(fastadf, "rotation1scripts_v4/processed_data/fasta/bedtools.genomic.orf.bigger.1000.fa")
# ____________________________________________________________________________
# bedtools blast ####
b = read.table("bioinf/blast/genes.vs.paragon.genome/results.blast/bedtools.orf.1000.cs.x.para.vs.para.genome.v3.outputfmt6.word_sizev3.blast")
b = read.table("bioinf/blast/probe.vs.genes.blast/results.blast/bedtools.orfs.refined.cs.x.para.vs.genes.blast")
|
384de27e97dc78b0275b89601caac830768212e9
|
082c09005c40c587ee4bca23b9572794133006ae
|
/Projects Exhibit/D1 CapStoneProj_Hotels/JamesGangavarapu My Project Code.R
|
b2d6233a097f2bd2b4efc2d2a5c96635dc8355dd
|
[] |
no_license
|
jamesrohan/Projects_SampleCode_Exhibit_For_Employers
|
e381def4012f43fef72b84d98294591340c661f6
|
bd1b5765ddcda3f6f130fd6f4d98cc81e739ebd2
|
refs/heads/master
| 2021-01-23T07:09:38.935342
| 2017-09-05T16:22:03
| 2017-09-05T16:22:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,009
|
r
|
JamesGangavarapu My Project Code.R
|
############## 1 Reading CSV file into R #################
setwd("C:/Users/Nikki/Desktop/James/Data Science Internship/Assignments/Projects/CapStoneProj_Hotels")
hotels.df <- read.csv(paste("Cities42.csv", sep=""))
attach(hotels.df)
############ 2 Summary Stats ############
summary(hotels.df)
library(psych)
describe(hotels.df)
#################### 3 4 5 6 7 #############################
#See if Room rent is effected by Binary Factors such as IsWeekend+IsNewYearEve+IsMetroCity+IsTouristDestination
# +freeWifi+freeBreakfast+hasSwimmingPool
boxplot(hotels.df$RoomRent~hotels.df$IsTouristDestination,horizontal = TRUE,main = "Room Rent vs Tourist Desti",
xlab = "Room Rent"
,col = (c("green","red")), ylab = "Tourist Desti")
#
hotels_RoomRentLessThan.df <- hotels.df[ which(hotels.df$RoomRent <= 50000*2) , ]
View(hotels_RoomRentLessThan.df)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$IsWeekend,horizontal = TRUE,
main = "Room Rent vs IsWeekend",xlab = "Room Rent",col = (c("green","red")), ylab = "IsWeekend")
hist(hotels.df$IsWeekend)
plot(hotels.df$IsWeekend,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$IsNewYearEve,horizontal = TRUE,
main = "Room Rent vs IsNewYearEve",xlab = "Room Rent",col = (c("green","red")), ylab = "IsNewYearEve")
hist(hotels.df$IsNewYearEve)
plot(hotels.df$IsNewYearEve,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$IsMetroCity,horizontal = TRUE,
main = "Room Rent vs Metro City",xlab = "Room Rent",col = (c("green","red")), ylab = "Metro City")
hist(hotels.df$IsMetroCity)
plot(hotels.df$IsMetroCity,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$IsTouristDestination,horizontal = TRUE,
main = "Room Rent vs IsTouristDestination",xlab = "Room Rent",col = (c("green","red")), ylab = "IsTouristDestination")
hist(hotels.df$IsTouristDestination)
plot(hotels.df$IsTouristDestination,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$FreeWifi,horizontal = TRUE,
main = "Room Rent vs Free Wifi",xlab = "Room Rent",col = (c("green","red")), ylab = "Free Wifi")
hist(hotels.df$FreeWifi)
plot(hotels.df$FreeWifi,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$FreeBreakfast,horizontal = TRUE,
main = "Room Rent vs Free Breakfast",xlab = "Room Rent",col = (c("green","red")), ylab = "Free Breakfast")
hist(hotels.df$FreeBreakfast)
plot(hotels.df$FreeBreakfast,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$HasSwimmingPool,horizontal = TRUE,
main = "Room Rent vs Swimming Pool",xlab = "Room Rent",col = (c("green","red")), ylab = "Has Swimming Pool")
hist(hotels.df$HasSwimmingPool)
plot(hotels.df$HasSwimmingPool,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$StarRating,horizontal = TRUE,
main = "Room Rent vs Star Rating",xlab = "Room Rent",col = (c("green","red")), ylab = "Star Rating")
hist(hotels.df$StarRating)
plot(hotels.df$StarRating,hotels.df$RoomRent)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$Airport,horizontal = TRUE,
main = "Room Rent vs Star Rating",xlab = "Room Rent",col = (c("green","red")), ylab = "Airport Dist")
hist(hotels.df$Airport)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$CityName,horizontal = TRUE,
main = "Room Rent vs City",xlab = "Room Rent",col = (c("green","red")), ylab = "City")
plot(hotels.df$CityName)
boxplot(hotels_RoomRentLessThan.df$RoomRent~hotels_RoomRentLessThan.df$CityRank,horizontal = TRUE,
main = "Room Rent vs CityRank",xlab = "Room Rent",col = (c("green","red")), ylab = "CityRank")
hist(hotels.df$CityRank)
plot(hotels.df$Airport,hotels.df$RoomRent)
plot(hotels.df$Airport,hotels.df$RoomRent,log = 'xy')
plot(hotels.df$StarRating,hotels.df$RoomRent)
plot(hotels.df$StarRating,hotels.df$RoomRent,log = 'xy')
IsWeekend.df <- aggregate(RoomRent ~ IsWeekend, data=hotels.df, mean)
IsWeekend.df
plot(IsWeekend.df)
IsNewYearEve.df <- aggregate(RoomRent ~ IsNewYearEve, data=hotels.df, mean)
IsNewYearEve.df
IsMetroCity.df <-aggregate(RoomRent ~ IsMetroCity, data=hotels.df, mean)
IsMetroCity.df
IsTouristDestination.df <- aggregate(RoomRent ~ IsTouristDestination, data=hotels.df, mean)
IsTouristDestination.df
FreeWifi.df <- aggregate(RoomRent ~ FreeWifi, data=hotels.df, mean)
FreeWifi.df
HasSwimmingPool.df <-aggregate(RoomRent ~ HasSwimmingPool, data=hotels.df, mean)
HasSwimmingPool.df
StarRating.df <- aggregate(RoomRent ~ hotels.df$StarRating, data=hotels.df, mean)
StarRating.df
#AirpotDist.df <- aggregate(RoomRent ~ hotels.df$Airport, data=hotels.df, mean)
#AirpotDist.df
CityName.df <- aggregate(RoomRent, list(CityName),data= hotels.df,mean)
CityName.df
CityRank.df <- aggregate(RoomRent, list(CityRank),data= hotels.df,mean)
CityRank.df
library(car)
scatterplotMatrix(formula = ~ FreeWifi+HasSwimmingPool+CityRank,
cex=0.6,data=hotels.df)
#RoomRent+IsWeekend+IsNewYearEve+Population+CityRank+IsMetroCity+IsTouristDestination
############ 8 Corrgram ##################
library(corrgram) # install if needed
corrgram(hotels.df, order=FALSE, lower.panel=panel.shade,
upper.panel=panel.pie, text.panel=panel.txt,
main="Corrgram of MBASalaries")
################ 9 Variance-Covariance Matrix ###################3
cov(hotels.df)
var(hotels.df)
######################################################################
######################################################################
######################################################################
######################################################################
######################## Final Report Code ###########################
######################################################################
######################################################################
######################################################################
######################################################################
####### ###### #####
# # # # ## # # # ###### ##### #### ##### ##### # # #### ##### ######
# # ## # # # # # # # # # # # # # # # # # # # #
##### # # # # # # # ###### ##### # # # # # # # # # # # # #####
# # # # # ###### # # # # ##### # # ##### # # # # # # #
# # # ## # # # # # # # # # # # # # # # # # # #
# # # # # # ###### # # ###### # #### # # # ##### #### ##### ######
######################################################################
######################################################################
######################################################################
######################################################################
######################## Final Report Code ###########################
######################################################################
######################################################################
######################################################################
######################################################################
summary(hotels.df[which(hotels.df$IsTouristDestination=='1') ,])
library(psych)
describe(hotels.df)
#Comparing the Hotel Price based on City Rank
library(lattice)
bwplot(CityRank ~ RoomRent, data=hotels.df, horizontal=TRUE,
xlab = "Room Rent")
#Insted using data set with less than a certain room price
bwplot(CityRank ~ RoomRent, data=hotels_RoomRentLessThan.df, horizontal=TRUE,
xlab = "Room Rent")
#Taking the log of RoomRents
logRoomRent.df <- log2(hotels.df$RoomRent)
hotels.df$LogRoomRent <- logRoomRent.df
View(hotels.df)
bwplot(CityRank ~ LogRoomRent, data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
#Comparing the Hotel Price based on City Rank and if it is Metro City
bwplot(CityRank ~ LogRoomRent | IsMetroCity, data=hotels.df, horizontal=TRUE,
xlab = "LogRoom Rent")
#Comparing the Hotel Price based on City Rank and if it is Tourist Destination
bwplot(CityRank ~ LogRoomRent | IsTouristDestination , data=hotels.df, horizontal=TRUE,
xlab = "LogRoom Rent")
###Hypothesis H0: The average Price of Room Rents is equal for different Cities based on Rank.
###Hypothesis H1: The average Price of Room Rents is not equal for different Cities based on Rank.
#Room Rent based on wether it is a Metro City Or Not
bwplot(IsMetroCity ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "LogRoom Rent")
IsMetro.df <- xtabs(~IsMetroCity,data = hotels.df)
prop.table(IsMetro.df)*100
bwplot(FreeWifi ~ LogRoomRent | IsMetroCity , data=hotels.df, horizontal=TRUE, notch = TRUE,
xlab = "LogRoom Rent")
#Room Rent Based on hasSwimming Pool and is MetroCity
bwplot(HasSwimmingPool ~ LogRoomRent | IsMetroCity , data=hotels.df, horizontal=TRUE, notch = TRUE,
xlab = "Log Room Rent")
#Room Rent with Respect to Date
bwplot( LogRoomRent ~ Date , data=hotels.df, horizontal=FALSE,
xlab = "Dates")
#with respecto weekend or not and new year
bwplot(IsWeekend ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
#with respec to new year
bwplot(IsNewYearEve ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
#with respec to new year
bwplot(IsTouristDestination ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
#with respec to new year
bwplot(IsTouristDestination ~ LogRoomRent | IsMetroCity , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
#Distance to Airport vs logRoomRent
hist(hotels.df$Airport)
plot(hotels.df$Airport,hotels.df$LogRoomRent, log = 'xy')
plot(hotels.df$Airport[hotels.df$Airport<=20],hotels.df$LogRoomRent[hotels.df$Airport<=20])
airportDist.df <- aggregate(logRoomRent ~ Airport, data=hotels.df, mean)
airportDist.df
plot(airportDist.df)
abline(lm(airportDist.df$Airport~airportDist.df$logRoomRent ))
library(car)
scatterplotMatrix(formula = ~ Airport+LogRoomRent,
cex=0.6,data=hotels.df)
#Free Wi-Fi vs Room Rent
hist(hotels.df$FreeWifi)
HotelWithFreeWifi.df <-xtabs(~hotels.df$FreeWifi)
prop.table(HotelWithFreeWifi.df)*100
bwplot(FreeWifi ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
library(car)
scatterplotMatrix(formula = ~ FreeWifi+LogRoomRent,
cex=0.6,data=hotels.df)
# Free Breakfast vs Room Rent
bwplot(FreeBreakfast ~ LogRoomRent , data=hotels.df, horizontal=TRUE,
xlab = "Log Room Rent")
HotelWithFreeBreakfast.df <-xtabs(~hotels.df$FreeBreakfast)
prop.table(HotelWithFreeBreakfast.df)*100
library(car)
scatterplotMatrix(formula = ~ FreeBreakfast+LogRoomRent,
cex=0.6,data=hotels.df)
#Hotel Capacity vs Room Rent
hotelCap.df <- aggregate(logRoomRent ~ HotelCapacity, data=hotels.df, mean)
hotelCap.df
plot(hotelCap.df)
abline(lm(hotelCap.df$logRoomRent~hotelCap.df$HotelCapacity ))
hist(hotels.df$HotelCapacity)
library(car)
scatterplotMatrix(formula = ~ HotelCapacity+LogRoomRent,
cex=0.6,data=hotels.df)
#########################Hypothesis Testing#############################
#2.3.1
library(car)
scatterplotMatrix(formula = ~ CityRank+LogRoomRent,
cex=0.6,data=hotels.df)
t.test(hotels.df$LogRoomRent~hotels.df$CityRank, alternative = c("two.sided"), var.equal= TRUE )
RoomRent_CityRank <- xtabs(hotels.df$LogRoomRent~hotels.df$CityRank)
RoomRent_CityRank
chisq.test(RoomRent_CityRank)
#2.3.2
library(car)
scatterplotMatrix(formula = ~ IsMetroCity+LogRoomRent,
cex=0.6,data=hotels.df)
describe(hotels.df$IsMetroCity)
describe(hotels.df$LogRoomRent)
t.test(hotels.df$LogRoomRent~hotels.df$IsMetroCity, alternative = c("two.sided"), var.equal= FALSE)
#2.3.3
library(car)
scatterplotMatrix(formula = ~ HotelCapacity+LogRoomRent,
cex=0.6,data=hotels.df)
RoomRent_HotelCap <- xtabs(hotels.df$LogRoomRent~hotels.df$HotelCapacity)
RoomRent_HotelCap
chisq.test(RoomRent_HotelCap)
#2.3.4
library(car)
scatterplotMatrix(formula = ~ IsTouristDestination+LogRoomRent,
cex=0.6,data=hotels.df)
describe(hotels.df$LogRoomRent)
describe(hotels.df$IsTouristDestination)
t.test(hotels.df$LogRoomRent~hotels.df$IsTouristDestination, alternative = c("two.sided"), var.equal= FALSE)
############## Linear Models #######################
lm1 <- lm(LogRoomRent ~ IsWeekend+IsNewYearEve+CityRank+
IsMetroCity+IsTouristDestination
+Airport+FreeWifi+FreeBreakfast+HotelCapacity+
HasSwimmingPool+StarRating,data = hotels.df)
summary(lm1)
lm2 <- lm(LogRoomRent ~ IsNewYearEve+CityRank+IsTouristDestination
+Airport+FreeBreakfast+HotelCapacity+HasSwimmingPool
+FreeWifi+StarRating,data = hotels.df)
summary(lm2)
confint(lm1)
confint(lm2)
var(hotels.df)
cor(hotels.df$LogRoomRent,hotels.df$Population)
cor(hotels.df$LogRoomRent, hotels.df$HotelCapacity)
library(corrgram)
corrgram(hotels.df, order=FALSE, lower.panel=panel.shade,
upper.panel=panel.pie, text.panel=panel.txt,
main="Corrgram of Hotel Data")
lm3<- lm(LogRoomRent ~ HotelCapacity+HasSwimmingPool
+StarRating,data = hotels.df)
summary(lm3)
lm4 <- lm(LogRoomRent ~ IsNewYearEve+CityRank+IsTouristDestination
+Airport+FreeBreakfast+HasSwimmingPool
+FreeWifi+StarRating,data = hotels.df)
summary(lm4)
|
3558a7bc32239e47b4a918a34a698ba698481a2e
|
3236820dba5b12fd922cc5178a1ed2aed45a3eb1
|
/BayesS_ingenuo.R
|
21a8df04193453c7419c6c069c4dbbf06cf11a23
|
[] |
no_license
|
PauOrtiz/R
|
378ef008c9be74fca52db6546679875f0302f43e
|
d75ee532807888d141d06736a99ce7a300873c18
|
refs/heads/master
| 2020-03-19T07:05:56.066824
| 2018-10-02T14:51:12
| 2018-10-02T14:51:12
| 136,084,895
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
BayesS_ingenuo.R
|
install.packages("e1071")
library(e1071)
data(iris)
View(iris)
clasificador <- naiveBayes(Species ~ ., data = iris)
clasificador
predicción<- table(predict(clasificador, iris), iris[,5])
predicción
clasificador$apriori
plot(predicción,col = hcl(c(120, 10,44)))
|
b2909667a78f7f530809f10cd2e0271f9102a80b
|
53090103043ba3e5b92aad4c42c9f85774656719
|
/DavidGrinan/ejercicios 4-8/machine-learning-ex7/ex7/ex7.R
|
6d2202216f1b7f5b3bf771b861768b0a732dcb4e
|
[] |
no_license
|
nanirg/SharedCourseraR
|
2dc0b767eed463e4dc55dcd48b1023a2fc5d151b
|
4db5e52e60f5c241b33b1824246723be34e9ff30
|
refs/heads/master
| 2021-01-17T21:22:18.602596
| 2017-03-10T12:37:33
| 2017-03-10T12:37:33
| 84,178,499
| 0
| 1
| null | 2017-03-07T10:01:00
| 2017-03-07T09:10:37
|
Matlab
|
UTF-8
|
R
| false
| false
| 920
|
r
|
ex7.R
|
#esercise 7
library("R.matlab")
data <- readMat("ex7data2.mat")
X <- data$X
source("findClosestCentroids.R")
K <- 3
initial_centroids <- matrix(c(3, 3, 6, 2, 8, 5),3,2,byrow = TRUE)
idx <- findClosestCentroids(X, initial_centroids)
######Compute Means
source("computeCentroids.R")
centroids = computeCentroids(X, idx, K);
####K means clustering
K <- 3
max_iters <- 10
source("runkMeans.R")
kMean <- runkMeans(X, initial_centroids, max_iters, FALSE)
centroids <- kMean$centriods
idx <- kMean$idx
##########pixel
data <- readMat("bird_small.mat")
A=data$A
A=A/255
img_size=dim(A)
X <- matrix(A, img_size[1] * img_size[2], 3)
K <- 16
max_iters <- 10
source("kMeansInitCentroids.R")
initial_centroids <- kMeansInitCentroids(X, K)
kMean <- runkMeans(X, initial_centroids, max_iters)
centroids <- kMean$centroids
idx <- kMean$idx
########compress
#########
|
b0c68222d9337c3439d242dc59b5b38f7781c8f0
|
b9db037ee7bc2ebf9c228ad1f66fecabccfa70be
|
/man/connected_matrix.Rd
|
85c6f9eb333a36b349f804865ef90fe6e7dcfa44
|
[] |
no_license
|
IsaakBM/prioritizr
|
924a6d8dcc7c8ff68cd7f5a2077de2fa1f300fe7
|
1488f8062d03e8736de74c9e7803ade57d6fcc29
|
refs/heads/master
| 2020-12-10T06:23:19.437647
| 2019-12-22T00:04:20
| 2019-12-22T00:04:20
| 233,524,401
| 1
| 0
| null | 2020-01-13T06:13:19
| 2020-01-13T06:13:18
| null |
UTF-8
|
R
| false
| true
| 3,581
|
rd
|
connected_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connected_matrix.R
\name{connected_matrix}
\alias{connected_matrix}
\alias{connected_matrix.Raster}
\alias{connected_matrix.SpatialPolygons}
\alias{connected_matrix.SpatialLines}
\alias{connected_matrix.SpatialPoints}
\alias{connected_matrix.default}
\title{Connected matrix}
\usage{
connected_matrix(x, ...)
\method{connected_matrix}{Raster}(x, directions = 4L, ...)
\method{connected_matrix}{SpatialPolygons}(x, ...)
\method{connected_matrix}{SpatialLines}(x, ...)
\method{connected_matrix}{SpatialPoints}(x, distance, ...)
\method{connected_matrix}{default}(x, ...)
}
\arguments{
\item{x}{\code{\link[raster]{Raster-class}} or
\code{\link[sp]{Spatial-class}} object. Note that if \code{x} is a
\code{\link[raster]{Raster-class}} object then it must have only one
layer.}
\item{...}{not used.}
\item{directions}{\code{integer} If \code{x} is a
\code{\link[raster]{Raster-class}} object, the number of directions
in which cells should be connected: 4 (rook's case), 8 (queen's case),
16 (knight and one-cell queen moves), or "bishop" to connect cells with
one-cell diagonal moves.}
\item{distance}{\code{numeric} If \code{x} is a
\code{\link{SpatialPoints-class}} object, the distance that planning units
have to be within in order to qualify as being connected.}
}
\value{
\code{\link[Matrix]{dsCMatrix-class}} object.
}
\description{
Create a matrix showing which planning units are spatially connected to
each other.
}
\details{
This function returns a \code{\link[Matrix]{dgCMatrix-class}} sparse
matrix. Cells along the off-diagonal indicate if two planning units are
connected. Cells along the diagonal are zero to reduce memory consumption.
Note that for \code{\link[raster]{Raster-class}} arguments to \code{x},
pixels with \code{NA} have zeros in the returned object to reduce
memory consumption and be consistent with \code{\link{boundary_matrix}},
and \code{\link{connectivity_matrix}}.
}
\examples{
# load data
data(sim_pu_raster, sim_pu_polygons, sim_pu_lines, sim_pu_points)
# create connected matrix using raster data
## crop raster to 9 cells
r <- crop(sim_pu_raster, c(0, 0.3, 0, 0.3))
## make connected matrix
cm_raster <- connected_matrix(r)
# create connected matrix using polygon data
## subset 9 polygons
ply <- sim_pu_polygons[c(1:2, 10:12, 20:22), ]
## make connected matrix
cm_ply <- connected_matrix(ply)
# create connected matrix using polygon line
## subset 9 lines
lns <- sim_pu_lines[c(1:2, 10:12, 20:22), ]
## make connected matrix
cm_lns <- connected_matrix(lns)
## create connected matrix using point data
## subset 9 points
pts <- sim_pu_points[c(1:2, 10:12, 20:22), ]
# make connected matrix
cm_pts <- connected_matrix(pts, distance = 0.1)
# plot data and the connected matrices
\donttest{
par(mfrow = c(4,2))
## plot raster and connected matrix
plot(r, main = "raster", axes = FALSE, box = FALSE)
plot(raster(as.matrix(cm_raster)), main = "connected matrix", axes = FALSE,
box = FALSE)
## plot polygons and connected matrix
plot(r, main = "polygons", axes = FALSE, box = FALSE)
plot(raster(as.matrix(cm_ply)), main = "connected matrix", axes = FALSE,
box = FALSE)
## plot lines and connected matrix
plot(r, main = "lines", axes = FALSE, box = FALSE)
plot(raster(as.matrix(cm_lns)), main = "connected matrix", axes = FALSE,
box = FALSE)
## plot points and connected matrix
plot(r, main = "points", axes = FALSE, box = FALSE)
plot(raster(as.matrix(cm_pts)), main = "connected matrix", axes = FALSE,
box = FALSE)
}
}
|
0e78effd05d9962bf93effeb0f84c0919e53f549
|
3afb3135c2db4945f2c9b8695e63ab51da725e27
|
/R/FastExpected.R
|
3d7e42a8c14a41c05781899468e52e153f7bcc39
|
[] |
no_license
|
YeeJeremy/rcss
|
96aef85de8258659dea09d301f22ce3acde249c1
|
8ae215b6e74cbd5d8a288706057b363e4955bba9
|
refs/heads/master
| 2020-12-25T08:38:28.263763
| 2018-01-18T14:31:31
| 2018-01-18T14:31:31
| 59,882,147
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
FastExpected.R
|
## Copyright 2017 <Jeremy Yee> <jeremyyee@outlook.com.au>
## Expected value function using conditional expectation matrices
################################################################################
FastExpected <- function(grid, value, disturb, weight, r_index, smooth = 1) {
.Call('_rcss_FastExpected', PACKAGE = 'rcss', grid, value, r_index,
disturb, weight,smooth)
}
|
34acea1faf9b88148ccc727d8ff7b74003798f3a
|
41be39c34b0437295d56e6d279560128041cad78
|
/to_cluster/anominate_example.R
|
31f7590df4f5787654c189e461ea5fed8f81979c
|
[] |
no_license
|
saudiwin/ARP_Research
|
1172e48c1b20694a814927af4ab6bbc0d97a41ba
|
c71f20bcd9ecdd95cf2a6814c394ce9082507444
|
refs/heads/master
| 2022-06-13T21:03:25.399989
| 2022-06-07T11:31:55
| 2022-06-07T11:31:55
| 61,546,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
anominate_example.R
|
setwd("~/Box Sync/Measurement/2014")
library(anominate)
library(foreign)
sen <- read.dta("sen112kh.dta")
senvotes <- sen[,-c(1:9)]
rownames(senvotes) <- paste(sen$name,sen$lstate,sep=" ")
senvotes.rc <- rollcall(senvotes, yea=1, nay=6, notInLegis=0, missing=c(7,9), legis.names=paste(sen[,9],sen[,5]))
sen.nominate <- anominate(senvotes.rc, dims=1)
|
2ab0a93460d158f511ad667492988c8d6774056c
|
8f7d16a98769da225b1c40046a7fbc08383ab143
|
/Simulation_Ideas/geneModel_code/sim_functions.R
|
1bfafac13662f175e35ef79bcf1600b00b01e061
|
[] |
no_license
|
hheiling/deconvolution
|
de95073d9733f57499e67300438dbc6cbdb3c72b
|
aa27b899061d8b576be56fda7825db0f41bac223
|
refs/heads/master
| 2021-07-12T02:45:45.267918
| 2020-09-14T19:35:07
| 2020-09-14T19:35:07
| 201,944,813
| 0
| 1
| null | 2020-03-10T14:13:03
| 2019-08-12T14:18:34
|
HTML
|
UTF-8
|
R
| false
| false
| 31,550
|
r
|
sim_functions.R
|
# Functions to simulate gene counts, isoform probability distributions, and exon set counts
#-----------------------------------------------------------------------------#
# Create gene-level output for all J cell types #
#-----------------------------------------------------------------------------#
# Create gene_level output for all three cell types
gene_level = function(total_cts, gene_alpha, seed){
set.seed(seed)
# Define variables
n = nrow(total_cts)
J = ncol(total_cts)
CT_names = rep(1:J, each = n)
ref_num = rep(1:n, times = J)
gene_names = names(gene_alpha)
# Matrix of counts for each gene
count = matrix(NA, nrow = length(gene_alpha), ncol = n*J)
# Sample from UCLA Dirichlet distribution
prob = t(rdirichlet(n = n*J, alpha = gene_alpha))
rownames(prob) = gene_names
colnames(prob) = str_c("CT",CT_names,":","ref_",ref_num)
# Using above prob vec, sample total_cts from multinomial distribution
for(j in 1:J){
for(i in 1:n){
count[,i+n*(j-1)] = rmultinom(n = 1, size = total_cts[i,j], prob = prob[,i+n*(j-1)])
}
}
rownames(count) = rownames(prob)
colnames(count) = colnames(prob)
# Vector of dispersion parameters
# For each sample, assign a constant dispersion parameter (applies to all genes)
## Parameterization of interest: mu + mu^2 / theta
theta = sample(90:120, n*J, replace = TRUE)
names(theta) = colnames(prob)
gene_output = list(p_mat = prob, ct_mat = count, gene_names = names(gene_alpha),
theta = theta)
return(gene_output)
}
#-----------------------------------------------------------------------------#
# Select genes for differential expression and differential isoform usage #
#-----------------------------------------------------------------------------#
diff_genes = function(CT1_counts, nTE_filtered, num_diff = 200, seed){
set.seed(seed)
# Specify vars
n = ncol(CT1_counts)
num_genes = nrow(CT1_counts)
gene_names = rownames(CT1_counts)
# Check inputs
if(num_diff %% 2 != 0){
# Note: will split num_diff into diff expression and diff usage.
stop("Number of genes selected for differential expression must divisible by 2")
}
counts_subset = CT1_counts[which(gene_names %in% nTE_filtered$geneId),]
genes_nT_limit = nTE_filtered$geneId[which(nTE_filtered$nT <= 15)]
# Find genes of interest from CT1 output with counts above first p25 of counts (wrt 1000 genes of interest)
q1 = apply(counts_subset, 2, function(x) quantile(x, probs = 0.25))
above_q1 = matrix(NA, nrow = nrow(counts_subset), ncol = ncol(counts_subset))
for(j in 1:ncol(counts_subset)){
above_q1[,j] = ifelse(counts_subset[,j] > q1[j], 1, 0)
}
gene_expCut = rownames(counts_subset)[which(rowSums(above_q1) >= n*0.9)]
# Select genes for differential expression
gene_choices = intersect(gene_expCut,genes_nT_limit)
cat("number of gene_choices after expression level and isoform number restriction: ",
length(gene_choices), "\n")
all_diff = sample(gene_choices, num_diff, replace = F)
diffExp = sample(all_diff, num_diff/2, replace = F)
diffUsg = all_diff[-which(all_diff %in% diffExp)]
genes_diff = matrix(NA, nrow = num_genes, ncol = 2)
genes_diff[,1] = ifelse(gene_names %in% diffExp, 1, 0)
genes_diff[,2] = ifelse(gene_names %in% diffUsg, 1, 0)
colnames(genes_diff) = c("diffExp","diffUsg")
rownames(genes_diff) = gene_names
return(genes_diff)
}
#-----------------------------------------------------------------------------#
# Apply fold changes to genes for differential expression for specified genes
# from diff_genes() output
#-----------------------------------------------------------------------------#
diff_exp = function(gene_counts, n, J, CT_diffExp = 2, diff_genes_mat, propUp, seed){
set.seed(seed)
# Specify vars
num_genes = nrow(gene_counts)
all_genes = rownames(gene_counts)
# Checks
if(!(CT_diffExp %in% 1:J)){
stop("CT_diffExp must bee a number in 1 to J")
}else if(length(CT_diffExp) > 1){
stop("Function ony set up to handle one cell type with diff expression")
}
# Specify genes selected for differential expression
diff_idx = which(diff_genes_mat[,"diffExp"] == 1)
gene_choices = rownames(diff_genes_mat[diff_idx,])
num_diffExp = sum(diff_genes_mat[,"diffExp"])
num_upExp = round(num_diffExp*propUp)
num_downExp = num_diffExp - num_upExp
# Initialize log2 fold change matrix to be all 0
fc = matrix(matrix(0, nrow = num_genes, ncol = 1))
# Update fold change matrix
up_diffExp = sample(gene_choices, num_upExp, replace = F)
down_diffExp = gene_choices[-which(gene_choices %in% up_diffExp)]
fc[which(all_genes %in% up_diffExp),1] = runif(n = num_upExp, min = log2(1.6), max = log2(2))
fc[which(all_genes %in% down_diffExp),1] = runif(n = num_downExp, min = -log2(2),
max = -log2(1.6))
rownames(fc) = all_genes
colnames(fc) = str_c("CT",CT_diffExp)
# Apply fold change matrix to gene counts of cell type of interest
## Multiply CT_diffExp counts by 2^(fc + rnorm(1, mean = 0, sd = 0.05))
gene_cts = gene_counts[,(1+n*(CT_diffExp-1)):(n*CT_diffExp)]
fc_rand = matrix(fc, nrow = nrow(fc), ncol = n) + matrix(rnorm(n = nrow(fc)*n, mean = 0, sd = 0.05), nrow = nrow(fc))
gene_cts_fc = gene_cts * 2^(fc_rand)
## Determine proportion of counts for gene_choices when no fold change
propA = colSums(gene_cts[which(all_genes %in% gene_choices),]) / colSums(gene_cts)
propB = colSums(gene_cts_fc[which(all_genes %in% gene_choices),]) / colSums(gene_cts_fc)
prop_R = propA / propB
print("proportion ratio")
print(prop_R)
## Multiply ratio of proportions to the counts affected by fc
gene_cts_fc[which(fc != 0),] = gene_cts_fc[which(fc != 0),] *
matrix(prop_R, nrow = num_diffExp, ncol = n, byrow = T)
gene_counts_new = gene_counts
gene_counts_new[,(1+n*(CT_diffExp-1)):(n*CT_diffExp)] = round(gene_cts_fc)
colnames(gene_counts_new) = colnames(gene_counts)
rownames(gene_counts_new) = rownames(gene_counts)
return(gene_counts_new)
}
#-----------------------------------------------------------------------------#
# Determine Ending Fold Change b/w CT ref and CT j #
#-----------------------------------------------------------------------------#
calc_diffExp = function(gene_counts_new, gene_counts_orig, diff_genes_mat){
# Define Variables
# Rows associated with genes selected for differential expression
rows = which(diff_genes_mat[,"diffExp"] == 1)
# Original gene counts for CT j before fold change applied
orig = gene_counts_orig[rows,]
# New gene counts for CT j after fold change and proportion adjustment
new = gene_counts_new[rows,]
fc_indiv = new / orig
fc_avg = rowMeans(fc_indiv)
return(list(fc_indiv = fc_indiv, fc_avg = fc_avg))
}
#-----------------------------------------------------------------------------#
# Simulate exon set negative binomial means #
#-----------------------------------------------------------------------------#
# Isoform and exon set information
# Note: gene clusters chosen so number isoforms I >= 3 for all genes
# iso_dist = "uniform": all probabilities will be close to 1/I (I = number isoforms)
# Note: this type only uses max of alphaRange
# iso_dist = "outlier": one probability will be relatively high and the remaining prob
# will be approx. evenly distributed among the remaining I-1 isoforms
# Note: this type uses both min and max of alphaRange
# iso_dist = "paired": two probabilities will be relatively high and the remaining
# probs will be approx. evenly distributed among the remaining I-2 isoforms
# Note: this type uses both min and max of alphaRange
iso_exon_info = function(genes_info, nTE_filtered, iso_dist,
alphaRange, EffLen_info,
seed = seed){
set.seed(seed)
# names of 1,000 clusters of interest used in simulation
clust_names = nTE_filtered$clustID
# names of genes of interest
## note: rows of genes_info matrix restricted to genes in nTE_filtered object
gene_names = rownames(genes_info)
# Number samples
n = ncol(genes_info)
# Check
if(length(gene_names) != length(clust_names)){
stop("nrow of genes_info does not match nrow of nTE_filtered")
}
# Check iso_dist
iso_dist_options = unique(iso_dist)
if(!all(iso_dist_options %in% c("uniform","outlier","paired"))){
stop("iso_dist elements must be one of 'uniform', 'outlier', or 'paired'")
}
output = list()
for(clust in clust_names){
# name of gene associated with cluster
gene = nTE_filtered$geneId[which(clust_names == clust)]
# vector of counts for gene simulated in gene_level() function for n samples
gene_ct = genes_info[which(gene_names == gene),]
# Effective length matrix - ExI (num exon sets x num isoforms)
X = EffLen_info[[clust]]$X
# number isoforms
I = ncol(X)
# dirichlet alpha parameters for isoforms
dir_dist = iso_dist[which(names(iso_dist) == gene)]
if(dir_dist == "uniform"){
alpha = rep(alphaRange[2], times = I)
}else if(dir_dist == "outlier"){
alpha = c(rep(alphaRange[1], times = (I-1)), alphaRange[2])
}else if(dir_dist == "paired"){
alpha = c(rep(alphaRange[1], times = (I-2)), rep(alphaRange[2], times = 2))
}
# isoform probability matrix - Ixn (col = isoform probability vector associated with sample i)
rho = t(rdirichlet(n = n, alpha = alpha))
candiIsoform = EffLen_info[[clust]]$candiIsoform
rownames(rho) = colnames(candiIsoform)
colnames(rho) = str_c("ref_",1:n)
# scaling factor for gene
r_g = numeric(n)
# coefficient for mu_g = X_g %*% beta_g
beta = matrix(NA, nrow = I, ncol = n)
for(i in 1:n){
r_g[i] = gene_ct[i] / sum(X %*% rho[,i])
beta[,i] = rho[,i] * r_g[i]
}
# Find exon sets corresponding to rows of X
exon_sets = EffLen_info[[clust]]$ExonSetLabels
# negative binomial means for the exon sets within cluster
# result: each col = neg bin means for sample i of n samples,
# each row corresponds with (possible) exon sets
mu = X %*% beta
rownames(mu) = exon_sets
colnames(mu) = str_c("ref_",1:n)
output[[clust]] = list(iso_alpha = alpha, rho = rho, mu = mu, exon_sets = exon_sets)
}
return(output)
}
#-----------------------------------------------------------------------------#
# Simulate exon set negative binomial means - take 2 #
#-----------------------------------------------------------------------------#
# Isoform and exon set information
# Note: gene clusters chosen so number isoforms I >= 3 for all genes
# iso_dist = "uniform": all probabilities will be close to 1/I (I = number isoforms)
# Note: this type only uses max of alphaRange
# iso_dist = "outlier": one probability will be relatively high and the remaining prob
# will be approx. evenly distributed among the remaining I-1 isoforms
# Note: this type uses both min and max of alphaRange
# iso_dist = "paired": two probabilities will be relatively high and the remaining
# probs will be approx. evenly distributed among the remaining I-2 isoforms
# Note: this type uses both min and max of alphaRange
# Note: In this situation, the isoforms with the highest alpha are different
# than in the original iso_exon_info() function
iso_exon_info2 = function(genes_info, nTE_filtered, iso_dist,
alphaRange, EffLen_info,
seed = seed){
set.seed(seed)
# names of 1,000 clusters of interest used in simulation
clust_names = nTE_filtered$clustID
# names of genes of interest
## note: rows of genes_info matrix restricted to genes in nTE_filtered object
gene_names = rownames(genes_info)
# Number samples
n = ncol(genes_info)
# Check
if(length(gene_names) != length(clust_names)){
stop("nrow of genes_info does not match nrow of nTE_filtered")
}
# Check iso_dist
iso_dist_options = unique(iso_dist)
if(!all(iso_dist_options %in% c("uniform","outlier","paired","outlier3"))){
stop("iso_dist elements must be one of 'uniform', 'outlier', or 'paired'")
}
output = list()
for(clust in clust_names){
# name of gene associated with cluster
gene = nTE_filtered$geneId[which(clust_names == clust)]
# vector of counts for gene simulated in gene_level() function for n samples
gene_ct = genes_info[which(gene_names == gene),]
# Effective length matrix - ExI (num exon sets x num isoforms)
X = EffLen_info[[clust]]$X
# number isoforms
I = ncol(X)
# dirichlet alpha parameters for isoforms
dir_dist = iso_dist[which(names(iso_dist) == gene)]
if(dir_dist == "uniform"){
alpha = rep(alphaRange[2], times = I)
}else if(dir_dist == "outlier"){
alpha = c(rep(alphaRange[1], times = (I-1)), alphaRange[2])
}else if(dir_dist == "paired"){
alpha = c(rep(alphaRange[2], times = 2), rep(alphaRange[1], times = (I-2)))
}
# isoform probability matrix - Ixn (col = isoform probability vector associated with sample i)
rho = t(rdirichlet(n = n, alpha = alpha))
candiIsoform = EffLen_info[[clust]]$candiIsoform
rownames(rho) = colnames(candiIsoform)
colnames(rho) = str_c("ref_",1:n)
# scaling factor for gene
r_g = numeric(n)
# coefficient for mu_g = X_g %*% beta_g
beta = matrix(NA, nrow = I, ncol = n)
for(i in 1:n){
r_g[i] = gene_ct[i] / sum(X %*% rho[,i])
beta[,i] = rho[,i] * r_g[i]
}
# Find exon sets corresponding to rows of X
exon_sets = EffLen_info[[clust]]$ExonSetLabels
# negative binomial means for the exon sets within cluster
# result: each col = neg bin means for sample i of n samples,
# each row corresponds with (possible) exon sets
mu = X %*% beta
rownames(mu) = exon_sets
colnames(mu) = str_c("ref_",1:n)
output[[clust]] = list(iso_alpha = alpha, rho = rho, mu = mu, exon_sets = exon_sets)
}
return(output)
}
#-----------------------------------------------------------------------------#
# Simulate exon set negative binomial means - take 3 #
#-----------------------------------------------------------------------------#
# Isoform and exon set information
# Note: gene clusters chosen so number isoforms I >= 3 for all genes
# iso_dist = "uniform": all probabilities will be close to 1/I (I = number isoforms)
# Note: this type only uses max of alphaRange
# iso_dist = "outlier1": The first isoform (isoform 1 as determined by first column of
# knownIsoforms matrix) of the I isoforms will have the highest probability (by a significant
# margin) and the remaining isoforms will have small probabilities that are approx. uniform
# across these I-1 isoforms.
# Note: this type uses both min and max of alphaRange
# iso_dist = "outlier2": The second isoform (isoform 2 as determined by second column of
# knownIsoforms matrix) of the I isoforms will have the highest probability (by a significant
# margin) and the remaining isoforms will have small probabilities that are approx. uniform
# across these I-1 isoforms.
# Note: this type uses both min and max of alphaRange
# iso_dist = "outlier2": The third isoform (isoform 3 as determined by third column of
# knownIsoforms matrix) of the I isoforms will have the highest probability (by a significant
# margin) and the remaining isoforms will have small probabilities that are approx. uniform
# across these I-1 isoforms.
# Note: this type uses both min and max of alphaRange
iso_exon_info3 = function(genes_info, nTE_filtered, iso_dist,
alphaRange, EffLen_info,
seed = seed){
set.seed(seed)
# names of 1,000 clusters of interest used in simulation
clust_names = nTE_filtered$clustID
# names of genes of interest
## note: rows of genes_info matrix restricted to genes in nTE_filtered object
gene_names = rownames(genes_info)
# Number samples
n = ncol(genes_info)
# Check
if(length(gene_names) != length(clust_names)){
stop("nrow of genes_info does not match nrow of nTE_filtered")
}
# Check iso_dist
iso_dist_options = unique(iso_dist)
if(!all(iso_dist_options %in% c("uniform","outlier1","outlier2","outlier3"))){
stop("iso_dist elements must be one of 'uniform', 'outlier1', 'outlier2', or 'outlier3'")
}
output = list()
for(clust in clust_names){
# name of gene associated with cluster
gene = nTE_filtered$geneId[which(clust_names == clust)]
# vector of counts for gene simulated in gene_level() function for n samples
gene_ct = genes_info[which(gene_names == gene),]
# Effective length matrix - ExI (num exon sets x num isoforms)
X = EffLen_info[[clust]]$X
# number isoforms
I = ncol(X)
# dirichlet alpha parameters for isoforms
dir_dist = iso_dist[which(names(iso_dist) == gene)]
if(dir_dist == "uniform"){
alpha = rep(alphaRange[2], times = I)
}else if(dir_dist == "outlier1"){
alpha = c(alphaRange[2], rep(alphaRange[1], times = (I-1)))
}else if(dir_dist == "outlier2"){
alpha = c(alphaRange[1], alphaRange[2], rep(alphaRange[1], times = (I-2)))
}else if(dir_dist == "outlier3"){
if(I == 3){
alpha = c(rep(alphaRange[1], times=2), alphaRange[2])
}else{ # I > 3
alpha = c(rep(alphaRange[1], times=2), alphaRange[2], rep(alphaRange[1], times = (I-3)))
}
}
# isoform probability matrix - Ixn (col = isoform probability vector associated with sample i)
rho = t(rdirichlet(n = n, alpha = alpha))
candiIsoform = EffLen_info[[clust]]$candiIsoform
rownames(rho) = colnames(candiIsoform)
colnames(rho) = str_c("ref_",1:n)
# scaling factor for gene
r_g = numeric(n)
# coefficient for mu_g = X_g %*% beta_g
beta = matrix(NA, nrow = I, ncol = n)
for(i in 1:n){
r_g[i] = gene_ct[i] / sum(X %*% rho[,i])
beta[,i] = rho[,i] * r_g[i]
}
# Find exon sets corresponding to rows of X
exon_sets = EffLen_info[[clust]]$ExonSetLabels
# negative binomial means for the exon sets within cluster
# result: each col = neg bin means for sample i of n samples,
# each row corresponds with (possible) exon sets
mu = X %*% beta
rownames(mu) = exon_sets
colnames(mu) = str_c("ref_",1:n)
output[[clust]] = list(iso_alpha = alpha, rho = rho, mu = mu, exon_sets = exon_sets)
}
return(output)
}
#-----------------------------------------------------------------------------#
# Simulate exon set counts for 'other' genes #
#-----------------------------------------------------------------------------#
# Isoform and exon set information
# Note: gene clusters chosen so number isoforms I >= 3 for all genes
# E = number of singular exon sets
# iso_dist = "uniform": all probabilities will be close to 1/E
# Note: this type only uses max of alphaRange
# iso_dist = "outlier": one probability will be relatively high and the remaining prob
# will be approx. evenly distributed among the remaining E-1 isoforms
# Note: this type uses both min and max of alphaRange
other_exonset_count = function(genes_info, nTE_other, exon_sets_other,
iso_dist = rep("uniform", times = nrow(nTE_other)),
alphaRange = c(20,50), seed = seed){
set.seed(seed)
# names of 'other' clusters
clust_names = nTE_other$clustID
# names of 'other' genes
## Note: rows of genes_info matrix restricted to genes in nTE_other object
gene_names = rownames(genes_info)
# Number samples
n = ncol(genes_info)
# Check
if(length(gene_names) != length(clust_names)){
stop("nrow of genes_info does not match nrow of nTE_other")
}
# Check iso_dist
iso_dist_options = unique(iso_dist)
if(!all(iso_dist_options %in% c("uniform","outlier"))){
stop("iso_dist elements must be one of 'uniform' or 'outlier'")
}
output = list()
for(clust in clust_names){
# name of gene associated with cluster
gene = nTE_other$geneId[which(clust_names == clust)]
# vector of counts for gene simulated in gene_level() function for n samples
gene_ct = genes_info[which(gene_names == gene),]
# singular exon sets
exon_sets = exon_sets_other[[clust]]
# Distribute gene counts to singular exon sets according to iso_dist specification
## E = number singular exon sets
E = length(exon_sets)
## dirichlet alpha parameters
dir_dist = iso_dist[which(gene_names == gene)]
if(dir_dist == "uniform"){
alpha = rep(alphaRange[2], times = E)
}else if(dir_dist == "outlier"){
alpha = c(rep(alphaRange[1], times = (E-1)), alphaRange[2])
}
# exon set probability matrix - Exn (col = exon set probability vector associated with sample i)
rho = t(rdirichlet(n = n, alpha = alpha))
rownames(rho) = exon_sets
colnames(rho) = str_c("ref_",1:n)
# Determine exon set counts from multinomial distriution
exon_set_cts = matrix(NA, nrow = E, ncol = n)
for(i in 1:n){
exon_set_cts[,i] = rmultinom(n = 1, size = gene_ct[i], prob = rho[,i])
}
rownames(exon_set_cts) = exon_sets
colnames(exon_set_cts) = colnames(genes_info)
output[[clust]] = list(exon_sets = exon_sets, exon_set_cts = exon_set_cts)
}
return(output)
}
#-----------------------------------------------------------------------------#
# Create Pure CT Reference Count Files #
#-----------------------------------------------------------------------------#
counts_output = function(exonInfo_1000, exonInfo_other, theta, file_labels,
folder, seed){
set.seed(seed)
# Checks
# Define variables
## Number cell types
J = length(exonInfo_1000)
## Number samples per cell type
n = length(theta) / J
output_1000 = list()
for(ct in 1:J){
ct_info = exonInfo_1000[[ct]]
ct_theta = theta[(1 + n*(ct-1)):(n*ct)]
for(clust in names(ct_info)){
mu = ct_info[[clust]]$mu
exon_sets = ct_info[[clust]]$exon_sets
# Initialize ES_labels (exon set labels) and record of counts (counts_record) in first cluster
if(names(ct_info)[1] == clust){
ES_labels = exon_sets
counts_record = matrix(NA, nrow = length(exon_sets), ncol = n)
for(i in 1:n){
counts_record[,i] = rnegbin(n = length(mu[,i]), mu = mu[,i], theta = ct_theta[i])
}
}else{ # End IF
ES_labels = c(ES_labels, exon_sets)
counts_subset = matrix(NA, nrow = length(exon_sets), ncol = n)
for(i in 1:n){
counts_subset[,i] = rnegbin(n = length(mu[,i]), mu = mu[,i], theta = ct_theta[i])
}
counts_record = rbind(counts_record, counts_subset)
} # End ELSE of IF-ELSE
} # End clust for-loop
rownames(counts_record) = ES_labels
output_1000[[ct]] = list(ES_labels = ES_labels, counts = counts_record)
} # End ct for-loop
output_other = list()
for(ct in 1:J){
ct_info = exonInfo_other[[ct]]
for(clust in names(ct_info)){
exon_sets = ct_info[[clust]]$exon_sets
# Initialize ES_labels (exon set labels) and record of counts (counts_record) in first cluster
if(names(ct_info)[1] == clust){
ES_labels = exon_sets
counts_record = ct_info[[clust]]$exon_set_cts
}else{ # End IF
ES_labels = c(ES_labels, exon_sets)
counts_record = rbind(counts_record, ct_info[[clust]]$exon_set_cts)
} # End ELSE of IF-ELSE
} # End clust for-loop
output_other[[ct]] = list(ES_labels = ES_labels, counts = counts_record)
} # End ct for-loop
for(ct in 1:J){
ct_files = file_labels[(1 + n*(ct-1)):(n*ct)]
counts_combo = rbind(output_1000[[ct]]$counts, output_other[[ct]]$counts)
ES_labels_all = c(output_1000[[ct]]$ES_labels, output_other[[ct]]$ES_labels)
for(i in 1:n){
df = data.frame(counts = counts_combo[,i], exons = ES_labels_all)
write.table(df, file = sprintf("%s/%s.txt", folder, ct_files[i]),
row.names = F, col.names = F)
} # End i for-loop
} # End ct for-loop
}
#-----------------------------------------------------------------------------#
# Simulate Mixture Count Files #
#-----------------------------------------------------------------------------#
mix_creation = function(set_mixSim, out_folder, file_labels, total_cts, probs, seed){
set.seed(seed)
# Define variables
## Number mixture replicates to create
mix_rep = nrow(total_cts)
## Number cell types
J = ncol(probs)
## Number pure reference samples per cell type (assume equal across all cell types)
M = length(set_mixSim[[1]])
# Checks
if(any(rowSums(probs) != 1)){
stop("rows of probs must add to 1")
}
# List of pure reference sample count data.frames
df_list = list()
for(j in 1:J){
pure_files = set_mixSim[[j]]
files_list = list()
for(f in 1:length(pure_files)){
df = read.table(file = pure_files[f], as.is = T)
colnames(df) = c("count","exons")
files_list[[f]] = df
}
df_list[[j]] = files_list
}
# exon set labels (assume same across all pure reference samples)
exon_sets = df_list[[1]][[1]]$exons
# Number exon sets (assume equal across all pure reference samples)
E = length(exon_sets)
for(k in 1:nrow(probs)){
# Identify prob vector
p = probs[k,]
# Randomly select counts files from each cell type
pure_counts = matrix(NA, nrow = E, ncol = J)
for(j in 1:J){
counts_vec = df_list[[j]][[sample(1:M, size = 1)]]$count
pure_counts[,j] = counts_vec
}
# Calculate ratio of total counts between mixture replicate and pure reference counts
cts_Ratio = matrix(NA, nrow = mix_rep, ncol = J)
for(rep in 1:mix_rep){
cts_Ratio[rep,] = total_cts[rep,k] / colSums(pure_counts)
}
# Multiply p and cts_Ratio to appropriate columns of pure_counts to get mixture sample components
## Round results and add results across exon sets
mixture = list()
for(rep in 1:mix_rep){
mix_components = pure_counts * matrix(p, nrow = nrow(pure_counts), ncol = J, byrow = T) *
matrix(cts_Ratio[rep,], nrow = nrow(pure_counts), ncol = J, byrow = T)
mixture[[rep]] = rowSums(round(mix_components))
}
# Save mixture results in counts.txt files
for(rep in 1:mix_rep){
label = file_labels[rep,k]
df_mix = data.frame(count = mixture[[rep]], exons = exon_sets)
write.table(df_mix, file = sprintf("%s/%s.txt", out_folder, label), col.names = F, row.names = F)
}
}
}
mix_creation2 = function(set_mixSim, out_folder, file_labels, total_cts, probs, seed){
set.seed(seed)
# Define variables
## Number cell types
J = ncol(probs)
## Number pure reference samples per cell type (assume equal across all cell types)
M = length(set_mixSim[[1]])
# Checks
if(any(rowSums(probs) != 1)){
stop("probs must add to 1")
}
# List of pure reference sample count data.frames
df_list = list()
for(j in 1:J){
pure_files = set_mixSim[[j]]
files_list = list()
for(f in 1:length(pure_files)){
df = read.table(file = pure_files[f], as.is = T)
colnames(df) = c("count","exons")
files_list[[f]] = df
}
df_list[[j]] = files_list
}
# exon set labels (assume same across all pure reference samples)
exon_sets = df_list[[1]][[1]]$exons
# Number exon sets (assume equal across all pure reference samples)
E = length(exon_sets)
for(k in 1:nrow(probs)){
# Identify prob vector
p = probs[k,]
# Randomly select counts files from each cell type
pure_counts = matrix(NA, nrow = E, ncol = J)
for(j in 1:J){
counts_vec = df_list[[j]][[sample(1:M, size = 1)]]$count
pure_counts[,j] = counts_vec
}
# Calculate ratio of total counts between mixture sample and pure reference counts
# Goal: standardize total counts from each pure sample, then take desired proportion
cts_Ratio = total_cts[k] / colSums(pure_counts)
# Multiply p and cts_Ratio to appropriate columns of pure_counts to get mixture sample components
mix_componenets = pure_counts * matrix(p, nrow = nrow(pure_counts), ncol = J, byrow = T) *
matrix(cts_Ratio, nrow = nrow(pure_counts), ncol = J, byrow = T)
mixture = rowSums(round(mix_components))
# Save mixture results in counts.txt files
label = file_labels[k]
df_mix = data.frame(count = mixture, exons = exon_sets)
write.table(df_mix, file = sprintf("%s/%s.txt", out_folder, label), col.names = F, row.names = F)
}
# # Identify prob vector
# p = probs
# # Randomly select counts files from each cell type
# pure_counts = matrix(NA, nrow = E, ncol = J)
# for(j in 1:J){
# counts_vec = df_list[[j]][[sample(1:M, size = 1)]]$count
# pure_counts[,j] = counts_vec
# }
#
# # Calculate ratio of total counts between mixture replicate and pure reference counts
# cts_Ratio = matrix(NA, nrow = mix_rep, ncol = J)
# for(rep in 1:mix_rep){
# cts_Ratio[rep,] = total_cts[rep,k] / colSums(pure_counts)
# }
#
# # Multiply p and cts_Ratio to appropriate columns of pure_counts to get mixture sample components
# ## Round results and add results across exon sets
# mixture = list()
# for(rep in 1:mix_rep){
# mix_components = pure_counts * matrix(p, nrow = nrow(pure_counts), ncol = J, byrow = T) *
# matrix(cts_Ratio[rep,], nrow = nrow(pure_counts), ncol = J, byrow = T)
# mixture[[rep]] = rowSums(round(mix_components))
# }
#
# # Save mixture results in counts.txt files
# for(rep in 1:mix_rep){
# label = file_labels[rep,k]
# df_mix = data.frame(count = mixture[[rep]], exons = exon_sets)
# write.table(df_mix, file = sprintf("%s/%s.txt", out_folder, label), col.names = F, row.names = F)
# }
}
#-----------------------------------------------------------------------------#
# Simulate Fragment Length Distribution Files #
#-----------------------------------------------------------------------------#
fragLens_out = function(total_reads, mean = 300, SD = 50, lenMin = 150, lenMax = 600,
out_folder, file_names, seed){
set.seed(seed)
# Define variables
mix_rep = nrow(total_reads)
num_pCombos = ncol(total_reads)
for(rep in 1:mix_rep){
for(p in 1:num_pCombos){
# fragLens_dist() in geneModel_code/fragLens_dist.cpp file
freq_dist = fragLens_dist(total_reads[rep,p], mean, SD, lenMin, lenMax)
freq_dist = freq_dist[which(freq_dist[,1] > 0),]
write.table(freq_dist, file = sprintf("%s/%s.txt",out_folder,file_names[rep,p]), col.names = F, row.names = F)
}
}
}
|
ff6a35720dc8289549721a91fda6669bc53036c4
|
064ab8373ed236a7d73e1ea6f76b2547833eb65d
|
/shiny/outputs/app_output3_plotlydygraph.R
|
e8574d6f7478752581c7591f3463c7f0b5d417d8
|
[] |
no_license
|
daianemarcolino/dataVisualizationR
|
fe48692d199aff8c7e9f51dd6d1dc61e21330ae4
|
bc0ce2b94111e06fdcd4b61e7a001ffd7004ed57
|
refs/heads/master
| 2021-04-03T10:07:31.962888
| 2020-12-14T06:18:50
| 2020-12-14T06:18:50
| 124,937,457
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
r
|
app_output3_plotlydygraph.R
|
library(shiny)
library(plotly)
library(dygraphs)
# UI
ui <- fluidPage(
# outputs
fluidRow(
column(6, plotlyOutput("grafico1", height = 300)),
column(6, dygraphOutput("grafico2", height = 300))
)
)
# SERVER
server <- function(input, output) {
output$grafico1 <- renderPlotly({
plot_ly(data = iris, x = ~Sepal.Length, y = ~Petal.Length, symbol = ~Species)
})
output$grafico2 <- renderDygraph({
dygraph(AirPassengers) %>%
dySeries("V1", label = "AirPassengers", color = "#000000") %>%
dyRangeSelector() %>%
dyLegend(labelsSeparateLines = T, show = "always")
})
}
# Executar a aplicação
shinyApp(ui = ui, server = server)
|
9e987c7642930bd1fafa571bc74ea8b0cf7ce765
|
7ac133f9871f201f7a956f3b239d8f0030907c06
|
/man/get_ROI.Rd
|
64976c7f72b5fc2fed53b4f7aa11a4e8634a4347
|
[
"MIT"
] |
permissive
|
gopalpenny/anem
|
1029318ca01a6172b365ddb7d2181135d909d92c
|
f2ba63622e0e67a08423b20c5f09a34b6433f6d0
|
refs/heads/master
| 2021-07-09T21:43:07.502292
| 2020-12-16T15:25:36
| 2020-12-16T15:25:36
| 219,404,991
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,842
|
rd
|
get_ROI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anem_potentials.R
\name{get_ROI}
\alias{get_ROI}
\title{Estimate the radius of influence of a well}
\usage{
get_ROI(..., method)
}
\arguments{
\item{...}{Variable parameters, depending on the method used. Single numbers or vectors (of equal length)}
\item{method}{String containing the name of the desired method}
}
\value{
A numeric value indicating the horizontal radius of influence of the
well.
}
\description{
Estimate the radius of influence of a well
}
\section{Methods}{
The methods below are taken from Fileccia (2015). Acque
Sotterranee - Italian Journal of Groundwater
(\url{http://www.doi.org/10.7343/AS-117-15-0144}).
The following strings can be input for the \code{method} variable, and must be accompanied
by the corresponding variables as part of the \code{...} input:
\describe{
\item{"cooper-jacob":}{\eqn{R=\sqrt{2.25 Tr t / S}}, for confined aquifer after short pumping period. (Cooper and Jacob, 1946)}
\item{"aravin-numerov":}{\eqn{R=\sqrt{1.9 Ksat h t / n}}, for unconfined aquifers (Aravin and Numerov, 1953)}
\item{"sichardt":}{\eqn{R=3000 s \sqrt{Ksat}}, Sichardt formula for unconfined aquifers (Cashman and Preene, 2001)}
}
Where:
\itemize{
\item R = radius of influence [m]
\item Tr = transmissivity [m^2/s]
\item t = time [s]
\item S = storage
\item h = height of the water table above substratum [m]
\item n = effective porosity
\item Ksat = saturated hydraulic conductivity [m/s]
\item s = drawdown in the borehole [m]
}
These inputs can be single numbers or vectors of equal length.
}
\examples{
get_ROI(Tr=0.01,t=3600*12,S=1,method="cooper-jacob")
get_ROI(Ksat=0.0001,h=50,t=3600*12,n=0.4,method="aravin-numerov")
get_ROI(Ksat=0.0001,s=10,method="sichardt")
}
|
9398b4c72a60b6cbd28e208dc9694c0a96547e31
|
96e54a2f183ac913cd533b22560dbb6f9de98e64
|
/man/linCorrKTS.Rd
|
1efb8691878abeb3069adb044f530f372fd82eb1
|
[] |
no_license
|
cran/KarsTS
|
fe9e7cb51abd77edc1cf461b92fe86e9c760b9a8
|
a61bf7a479a7eeba1d2af68ff0fab8041b3d3fe2
|
refs/heads/master
| 2021-08-16T07:19:03.010559
| 2021-01-14T19:50:05
| 2021-01-14T19:50:05
| 92,603,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
rd
|
linCorrKTS.Rd
|
\name{linCorrKTS}
\alias{linCorrKTS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
linCorrKTS: linear correlation plot
}
\description{
This function plots the autocorrelation and partial autocorrelation functions or the cross correlation function, depending on the number of input time series. It is used internally through the Linear correlation button in the Plots menu.
}
%- maybe also 'usage' for other objects documented here.
\author{
Marina Saez Andreu
}
|
d913309276bcb77d9da4af8f53a985f7ff0be24a
|
282e39150079403a324555bc5d20eb0cc801980a
|
/R/nearest_neighbors.R
|
8a1059688e8e6c5aba27ed0860e4a48cdfa2cb76
|
[] |
no_license
|
pipiku915/BTF
|
c3196b1b05eec01c7bc15fc2039a4dcca95c6697
|
2149443249c137a14f2ef3ffb3750bffb37ef3b9
|
refs/heads/master
| 2023-03-27T16:32:12.328162
| 2020-10-06T16:13:27
| 2020-10-06T16:13:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,137
|
r
|
nearest_neighbors.R
|
#'
#'
#'
nearest_neighbors <- function(U, k){
require(FNN)
require(magrittr)
require(plyr)
FNN::get.knn(U, k = k) %>%
magrittr::extract2("nn.index") %>%
magrittr::set_rownames(row.names(U)) %>%
plyr::aaply(1, function(x){
magrittr::extract(row.names(U), x)
})
}
query_distance <- function(query, U, row_names, summary = T){
require(magrittr)
if(length(dim(U)) < 3){
U %<>% array(c(1, dim(U)))
}
plyr::alply(query, 1, function(q){
all_res <- plyr::aaply(U, 1, function(u){
u %<>% magrittr::set_rownames(row_names)
u %>%
apply(1, function(uu){
sum(uu*u[q, ]) / (sqrt(sum(uu^2))*sqrt(sum(u[q, ]^2)))
})
})
if(length(dim(all_res)) < 2){
all_res %<>% array(c(1, length(all_res))) %>% magrittr::set_colnames(row_names)
}
if(summary){
all_res %>%
plyr::adply(2, function(col){
data.frame(mean = mean(col),
sd = sd(col),
lwr = quantile(col, 0.025),
upr = quantile(col, 0.975))
}) %>%
dplyr::mutate(query = q) %>%
dplyr::rename(reference = X1) %>%
dplyr::select(query, reference, mean, sd, lwr, upr) %>%
dplyr::arrange(mean)
} else {
all_res
}
}) %>%
magrittr::set_names(query)
}
nearest_neighbor_distribution <- function(U, k, row_names){
require(FNN)
require(magrittr)
require(plyr)
interim_res <- plyr::aaply(1:nrow(U), 1, function(i){
FNN::get.knn(U[i,,], k = k) %>%
magrittr::extract2("nn.index") %>%
magrittr::set_rownames(row_names) %>%
plyr::aaply(1, function(x){
magrittr::extract(row_names, x)
})
})
plyr::adply(1:(dim(interim_res)[2]), 1, function(i){
m <- interim_res[,i,]
plyr::adply(1:ncol(m), 1, function(j){
plyr::count(m[,j]) %>%
dplyr::mutate(neighbor_k = j) %>%
dplyr::mutate(anchor = row_names[i]) %>%
dplyr::mutate(freq = freq / nrow(m))
})
}) %>%
dplyr::select(anchor, x, neighbor_k, freq) %>%
plyr::arrange(anchor, neighbor_k, 1/freq)
}
|
003ce03c05119db9f0a33d0555c1420df2575092
|
3f9583cc4d69830631728c9096c0d18c366ede30
|
/annotatedTreeReader.R
|
fead42a193742f393f2c0026bf6bf8377763701a
|
[
"Beerware"
] |
permissive
|
fbielejec/phylodiagrams
|
7a8c06049dfa93de680c3850d4962fd79324cf31
|
2c4bef81f108faabf3af28f476aa092c3384d9f8
|
refs/heads/master
| 2016-09-06T18:57:26.123010
| 2014-04-13T18:58:05
| 2014-04-13T18:58:05
| 18,737,023
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,992
|
r
|
annotatedTreeReader.R
|
#
# @author Marc A. Suchard
#
# A class for reading Newick formatted trees with BEAST-style annotations
#
strip.annotations = function(text) {
annotations = list()
end = 1
pattern = "\\[&.*?\\]"
repeat {
match = regexpr(pattern=pattern,text=text)
if (!(match[1] > 0)) {
break
}
annotations[[end]] = regmatches(text, match)
text = sub(pattern,paste("[",end,"]",sep=""), text)
end = end + 1
}
return(list(annotations=annotations,tree=text))
}
.split.tree.names = function(text) {
text = gsub(pattern="\\[.*?\\]=",x=text,replacement="")
text = gsub(pattern="^tree",x=text,replacement="")
return(text)
}
.split.tree.traits = function(text) {
# Pull out annotation
text = regmatches(text,regexpr(pattern="\\[.*?\\]",text))
# Remove leading and trailing delimitors
text = substring(text,3,nchar(text)-1)
return(text)
}
parse.value = function(text) {
value = text
if (length(grep("^\\{",value))) { # starts with {
save = value
value = substring(value, 2, nchar(value)-1)
depth = 0
r = regexpr(pattern="\\{+",value,perl=TRUE)
match.length = attr(r, "match.length")
if (match.length > 0) {
depth = match.length
}
if (depth == 0) {
split = ","
} else {
split = paste(
"(?<=",rep("\\}",depth),")",
",",
"(?=" ,rep("\\{",depth),")",
sep="")
}
if (depth >= 1) {
return(save) # TODO Still error in recursion
}
part = strsplit(value, split, perl=TRUE)[[1]]
value = list()
for (i in 1:length(part)) {
value[[i]] = parse.value(part[i])
}
# TODO Unlist when simple array?
} else {
if (!is.na(suppressWarnings(as.numeric(value)))) { # is a number
value = as.numeric(value)
}
}
return(value)
}
parse.traits = function(text, header=FALSE) {
if (header == TRUE) {
text = substring(text,3,nchar(text)-1)
}
pattern = "(\"[^\"]*\"+|[^,=\\s]+)\\s*(=\\s*(\\{[^=]*\\}|\"[^\"]*\"+|[^,]+))?"
rgx = gregexpr(pattern,text,perl=TRUE)
n = length(attr(rgx[[1]],"match.length"))
traits = list()
start = attr(rgx[[1]],"capture.start")
names = attr(rgx[[1]],"capture.names")
length = attr(rgx[[1]],"capture.length")
names = attr(rgx[[1]],"capture.names")
for (i in 1:n) {
s = start[i,3]
e = s + length[i,3] - 1
value = substring(text,s,e)
s = start[i,1]
e = s + length[i,1] - 1
key = substring(text,s,e)
traits[[key]] = parse.value(value)
}
return(traits)
}
# THE CODE BELOW COMES FROM 'ape'. MY GOAL IS TO DERIVE FROM THIS TO READ IN BEAST-STYLE ANNOTATIONS
annotated.tree.build =
function (tp) {
add.internal <- function() {
edge[j, 1] <<- current.node
edge[j, 2] <<- current.node <<- node <<- node + 1L
index[node] <<- j
j <<- j + 1L
}
add.terminal <- function() {
edge[j, 1] <<- current.node
edge[j, 2] <<- tip
index[tip] <<- j
X <- unlist(strsplit(tpc[k], ":"))
tip.label[tip] <<- X[1]
edge.length[j] <<- as.numeric(X[2])
k <<- k + 1L
tip <<- tip + 1L
j <<- j + 1L
}
go.down <- function() {
l <- index[current.node]
X <- unlist(strsplit(tpc[k], ":"))
node.label[current.node - nb.tip] <<- X[1]
edge.length[l] <<- as.numeric(X[2])
k <<- k + 1L
current.node <<- edge[l, 1]
}
if (!length(grep(",", tp))) {
obj <- list(edge = matrix(c(2L, 1L), 1, 2))
tp <- unlist(strsplit(tp, "[\\(\\):;]"))
obj$edge.length <- as.numeric(tp[3])
obj$Nnode <- 1L
obj$tip.label <- tp[2]
if (tp[4] != "")
obj$node.label <- tp[4]
class(obj) <- "phylo"
return(obj)
}
result = strip.annotations(tp)
annotations = result$annotations
new.tp.stripped = result$tree
annotations = lapply(annotations, parse.traits, header=TRUE)
tp.stripped = gsub("\\[.*?\\]","",tp)
tpc <- unlist(strsplit(tp.stripped, "[\\(\\),;]"))
tpc <- tpc[nzchar(tpc)]
tsp <- unlist(strsplit(tp.stripped, NULL))
skeleton <- tsp[tsp %in% c("(", ")", ",", ";")]
nsk <- length(skeleton)
nb.node <- sum(skeleton == ")")
nb.tip <- sum(skeleton == ",") + 1
nb.edge <- nb.node + nb.tip
node.label <- character(nb.node)
tip.label <- character(nb.tip)
edge.length <- numeric(nb.edge)
edge <- matrix(0L, nb.edge, 2)
current.node <- node <- as.integer(nb.tip + 1)
edge[nb.edge, 2] <- node
index <- numeric(nb.edge + 1)
index[node] <- nb.edge
j <- k <- tip <- 1L
for (i in 2:nsk) {
if (skeleton[i] == "(")
add.internal()
if (skeleton[i] == ",") {
if (skeleton[i - 1] != ")")
add.terminal()
}
if (skeleton[i] == ")") {
if (skeleton[i - 1] == ",") {
add.terminal()
go.down()
}
if (skeleton[i - 1] == ")")
go.down()
}
}
edge <- edge[-nb.edge, ]
obj <- list(edge = edge, Nnode = nb.node, tip.label = tip.label)
root.edge <- edge.length[nb.edge]
edge.length <- edge.length[-nb.edge]
if (!all(is.na(edge.length)))
obj$edge.length <- edge.length
if (is.na(node.label[1]))
node.label[1] <- ""
if (any(nzchar(node.label)))
obj$node.label <- node.label
if (!is.na(root.edge))
obj$root.edge <- root.edge
class(obj) <- "phylo"
attr(obj, "order") <- "cladewise"
obj$annotations = annotations
obj
}
read.annontated.tree = function (file = "", text = NULL, tree.names = NULL, skip = 0,
comment.char = "#", keep.multi = FALSE, ...)
{
unname <- function(treetext) {
nc <- nchar(treetext)
tstart <- 1
while (substr(treetext, tstart, tstart) != "(" && tstart <=
nc) tstart <- tstart + 1
if (tstart > 1)
return(c(substr(treetext, 1, tstart - 1), substr(treetext,
tstart, nc)))
return(c("", treetext))
}
if (!is.null(text)) {
if (!is.character(text))
stop("argument `text' must be of mode character")
tree <- text
}
else {
tree <- scan(file = file, what = "", sep = "\n", quiet = TRUE,
skip = skip, comment.char = comment.char, ...)
}
if (identical(tree, character(0))) {
warning("empty character string.")
return(NULL)
}
tree <- gsub("[ \n\t]", "", tree)
tree <- gsub("\\[&R\\]", "", tree)
tree <- unlist(strsplit(tree, NULL))
y <- which(tree == ";")
Ntree <- length(y)
x <- c(1, y[-Ntree] + 1)
if (is.na(y[1]))
return(NULL)
STRING <- character(Ntree)
for (i in 1:Ntree) STRING[i] <- paste(tree[x[i]:y[i]], sep = "",
collapse = "")
tmp <- unlist(lapply(STRING, unname))
tmpnames <- tmp[c(TRUE, FALSE)]
STRING <- tmp[c(FALSE, TRUE)]
if (is.null(tree.names) && any(nzchar(tmpnames)))
tree.names <- tmpnames
colon <- grep(":", STRING)
if (!is.null(tree.names)) {
traits.text = lapply(tree.names, .split.tree.traits)
tree.names = lapply(tree.names, .split.tree.names)
tree.traits = lapply(traits.text, parse.traits)
}
if (!length(colon)) {
stop(paste("Annotated clado.build is not yet implemented.\n"))
obj <- lapply(STRING, annotated.clado.build)
}
else if (length(colon) == Ntree) {
obj <- lapply(STRING, annotated.tree.build)
}
else {
obj <- vector("list", Ntree)
obj[colon] <- lapply(STRING[colon], annotated.tree.build)
nocolon <- (1:Ntree)[!1:Ntree %in% colon]
obj[nocolon] <- lapply(STRING[nocolon], clado.build)
}
for (i in 1:Ntree) {
ROOT <- length(obj[[i]]$tip.label) + 1
if (sum(obj[[i]]$edge[, 1] == ROOT) == 1 && dim(obj[[i]]$edge)[1] >
1)
stop(paste("The tree has apparently singleton node(s): cannot read tree file.\n Reading Newick file aborted at tree no.",
i))
}
if (Ntree == 1 && !keep.multi)
obj <- obj[[1]]
else {
if (!is.null(tree.names)) {
names(obj) <- tree.names
}
class(obj) <- "multiPhylo"
}
obj
}
read.annotated.nexus = function (file, tree.names = NULL) {
X <- scan(file = file, what = "", sep = "\n", quiet = TRUE)
LEFT <- grep("\\[", X)
RIGHT <- grep("\\]", X)
# browser()
#
# if (length(LEFT)) {
# w <- LEFT == RIGHT
# if (any(w)) {
# s <- LEFT[w]
# X[s] <- gsub("\\[[^]]*\\]", "", X[s])
# }
# w <- !w
# if (any(w)) {
# s <- LEFT[w]
# X[s] <- gsub("\\[.*", "", X[s])
# sb <- RIGHT[w]
# X[sb] <- gsub(".*\\]", "", X[sb])
# if (any(s < sb - 1))
# X <- X[-unlist(mapply(":", (s + 1), (sb - 1)))]
# }
# }
endblock <- grep("END;|ENDBLOCK;", X, ignore.case = TRUE)
semico <- grep(";", X)
i1 <- grep("BEGIN TREES;", X, ignore.case = TRUE)
i2 <- grep("TRANSLATE", X, ignore.case = TRUE)
translation <- if (length(i2) == 1 && i2 > i1)
TRUE
else FALSE
if (translation) {
end <- semico[semico > i2][1]
x <- X[(i2 + 1):end]
x <- unlist(strsplit(x, "[,; \t]"))
x <- x[nzchar(x)]
TRANS <- matrix(x, ncol = 2, byrow = TRUE)
TRANS[, 2] <- gsub("['\"]", "", TRANS[, 2])
n <- dim(TRANS)[1]
}
start <- if (translation)
semico[semico > i2][1] + 1
else semico[semico > i1][1]
end <- endblock[endblock > i1][1] - 1
tree <- X[start:end]
# browser()
rm(X)
tree <- tree[tree != ""]
semico <- grep(";", tree)
Ntree <- length(semico)
if (Ntree == 1 && length(tree) > 1)
STRING <- paste(tree, collapse = "")
else {
if (any(diff(semico) != 1)) {
STRING <- character(Ntree)
s <- c(1, semico[-Ntree] + 1)
j <- mapply(":", s, semico)
if (is.list(j)) {
for (i in 1:Ntree) STRING[i] <- paste(tree[j[[i]]],
collapse = "")
}
else {
for (i in 1:Ntree) STRING[i] <- paste(tree[j[,
i]], collapse = "")
}
}
else STRING <- tree
}
rm(tree)
# browser()
STRING <- STRING[grep("^[[:blank:]]*tree.*= *", STRING, ignore.case = TRUE)]
Ntree <- length(STRING)
STRING <- gsub("\\[&R\\]", "", STRING)
# TODO Parse out tree-level traits
nms.trees <- sub(" *= *.*", "", STRING)
nms.trees <- sub("^ *tree *", "", nms.trees, ignore.case = TRUE)
STRING <- sub("^.*?= *", "", STRING)
STRING <- gsub("\\s", "", STRING)
# browser()
colon <- grep(":", STRING)
if (!length(colon)) {
stop("annotated.clado.build is not yet implemented.\n")
trees <- lapply(STRING, annotated.clado.build)
} else if (length(colon) == Ntree) {
# trees <- if (translation) {
# browser()
# stop("treeBuildWithTokens is not yet implemented.\n")
# lapply(STRING, .treeBuildWithTokens)
# }
# else lapply(STRING, annotated.tree.build)
trees <- lapply(STRING, annotated.tree.build)
# browser()
} else {
# trees <- vector("list", Ntree)
# trees[colon] <- lapply(STRING[colon], annotated.tree.build)
# nocolon <- (1:Ntree)[!1:Ntree %in% colon]
# trees[nocolon] <- lapply(STRING[nocolon], annotated.clado.build)
# if (translation) {
# for (i in 1:Ntree) {
# tr <- trees[[i]]
# for (j in 1:n) {
# ind <- which(tr$tip.label[j] == TRANS[, 1])
# tr$tip.label[j] <- TRANS[ind, 2]
# }
# if (!is.null(tr$node.label)) {
# for (j in 1:length(tr$node.label)) {
# ind <- which(tr$node.label[j] == TRANS[,
# 1])
# tr$node.label[j] <- TRANS[ind, 2]
# }
# }
# trees[[i]] <- tr
# }
# translation <- FALSE
# }
stop("Unknown error in read.annotated.nexus.\n")
}
for (i in 1:Ntree) {
tr <- trees[[i]]
if (!translation)
n <- length(tr$tip.label)
ROOT <- n + 1
if (sum(tr$edge[, 1] == ROOT) == 1 && dim(tr$edge)[1] >
1) {
stop(paste("The tree has apparently singleton node(s): cannot read tree file.\n Reading NEXUS file aborted at tree no.",
i, sep = ""))
}
}
if (Ntree == 1) {
trees <- trees[[1]]
if (translation) {
trees$tip.label <- if (length(colon))
TRANS[, 2]
else TRANS[, 2][as.numeric(trees$tip.label)]
}
}
else {
if (!is.null(tree.names))
names(trees) <- tree.names
if (translation) {
if (length(colon) == Ntree)
attr(trees, "TipLabel") <- TRANS[, 2]
else {
for (i in 1:Ntree) trees[[i]]$tip.label <- TRANS[,
2][as.numeric(trees[[i]]$tip.label)]
trees <- .compressTipLabel(trees)
}
}
class(trees) <- "multiPhylo"
if (!all(nms.trees == ""))
names(trees) <- nms.trees
}
trees
}
|
14803a955930bd08ea4b8a0c7284ec4de4456a3e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Biograph/examples/Trans.Rd.R
|
ffc011c3897b85ff7d712894f8ff461e0111aadf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
Trans.Rd.R
|
library(Biograph)
### Name: Trans
### Title: Transitions by age
### Aliases: Trans
### ** Examples
data (GLHS)
y<- Parameters(GLHS)
z <- Trans (GLHS)
|
9ae302627a130c5189345c4ca5389e21dbfc6079
|
08659e4d7a1a3626d4587346e0617c9835fd829b
|
/vis/circlize.R
|
f501dee16a71d169a8b548e9999151b6751fa49b
|
[] |
no_license
|
wallingTACC/oncourse
|
ada4b459a0392d01fc24435b4ec7482721cce301
|
f6927fc67473c65bd5bdf36f063f4a1d065206a5
|
refs/heads/master
| 2021-01-19T04:33:25.185543
| 2020-01-22T16:55:39
| 2020-01-22T16:55:39
| 87,379,805
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,280
|
r
|
circlize.R
|
library(circlize)
library(data.table)
library(reshape2)
setwd("/home/walling/dev/git/oncourse/vis")
data = fread("./data/fice_transfers.csv", na.strings = '#NULL!', colClass=c('character', rep('numeric', 113)))
# Drop last column 'Total'
data = data[,1:(ncol(data)-1)]
# Drop last 2 rows
data = data[1:(nrow(data)-2),]
# Clean up column names
colnames(data) = substring(colnames(data), 5)
colnames(data)[1] = 'giving_fice'
# Form Matrix
m = as.matrix(data[,2:ncol(data)])
m.w = melt(m)
# Remove NAs
m.w = m.w[!is.na(m.w$value),]
# Limit to those where flow > 10
m.w = m.w[m.w$value > 50,]
# Plot
#chordDiagram(m.w)
# Advanced Plot
chordDiagram(m.w, annotationTrack = "grid", preAllocateTracks = list(track.height = 0.1))
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
if(abs(xplot[2] - xplot[1]) < 10) {
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise",
niceFacing = TRUE, adj = c(0, 0.5))
} else {
circos.text(mean(xlim), ylim[1], sector.name, facing = "inside",
niceFacing = TRUE, adj = c(0.5, 0))
}
}, bg.border = NA)
|
c46f9afe10bf16f124799539b0df7ba3a09dce76
|
eabb29bd2b31990492aa64a251c7bca135eca098
|
/app/r/deconstruct.r
|
1cfe91f4dda265a07a8cf4a22c5a4626ba6dd22d
|
[] |
no_license
|
keller-mark/mutation-visualization
|
f24add09b39cfc6ebd633823f435667cfb340e4a
|
55c221d10e8df2b51277f4c4d6cca9fa3bc70dec
|
refs/heads/master
| 2021-08-23T18:41:08.827060
| 2017-12-06T02:46:58
| 2017-12-06T02:46:58
| 111,315,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,050
|
r
|
deconstruct.r
|
#!/usr/bin/r
library(deconstructSigs)
library(SignatureEstimation)
library(BSgenome.Hsapiens.UCSC.hg19)
selected_columns <- c("icgc_sample_id", "chromosome", "chromosome_start", "reference_genome_allele", "mutated_to_allele")
somatic_mutations_path <- "/app/data_temp/extracted_data.tsv"
somatic_mutations <- read.table(somatic_mutations_path, sep = '\t', header = TRUE, colClasses = c("factor"))
somatic_mutations_stripped <- somatic_mutations[, which(names(somatic_mutations) %in% selected_columns)]
sample.name.to.id <- function(sample.name) {
return(as.numeric(gsub("SA", "", as.character(sample.name))))
}
chr.prefix <- function(chr.input) {
return(paste("chr", as.character(chr.input), sep = ""))
}
start.numeric <- function(start.input) {
return(as.numeric(as.character(start.input)))
}
somatic_mutations_stripped[,2] <- sapply(somatic_mutations_stripped[,2], chr.prefix)
somatic_mutations_stripped[,3] <- sapply(somatic_mutations_stripped[,3], start.numeric)
somatic_mutations_stripped[,4] <- sapply(somatic_mutations_stripped[,4], as.character)
somatic_mutations_stripped[,5] <- sapply(somatic_mutations_stripped[,5], as.character)
somatic_mutations_stripped <- somatic_mutations_stripped[!(somatic_mutations_stripped$chromosome=="chrMT"),]
sigs.input <- mut.to.sigs.input(
mut.ref = somatic_mutations_stripped,
sample.id = "icgc_sample_id",
chr = "chromosome",
pos = "chromosome_start",
ref = "reference_genome_allele",
alt = "mutated_to_allele"
)
sigs.input <- sigs.input[, order(colnames(sigs.input))]
tricontext.fractions <- t(getTriContextFraction(sigs.input, trimer.counts.method = 'default'))
sample.names <- colnames(tricontext.fractions)
signature.distributions <- data.frame(matrix(ncol = 30, nrow = 0))
for (i in 1:ncol(tricontext.fractions)) {
signature.distributions[sample.names[i],] <- decomposeQP(tricontext.fractions[,i], signaturesCOSMIC)
}
#boxplot.matrix(as.matrix(signature.distributions))
write.csv(t(signature.distributions), file = "/app/static/data/signature_distributions_t.csv")
|
761d39e268f8e9d4f2e5c462391f832c66c98b13
|
c3e82a48fee693238344b8cca636af49708a1f47
|
/man/convertIdx.Rd
|
eaa6c29313e65f071f8888e78a3e646188cdb75a
|
[
"BSD-3-Clause"
] |
permissive
|
Sai628/recharts
|
ad380cb9db981afe8584c60b1e06795e4cd13704
|
af204bd930d17a4cced8b3e94078981834a31f07
|
refs/heads/master
| 2021-01-23T11:32:51.108441
| 2017-06-02T09:43:01
| 2017-06-02T09:43:01
| 93,145,422
| 1
| 0
| null | 2017-06-02T08:36:17
| 2017-06-02T08:36:16
| null |
UTF-8
|
R
| false
| true
| 264
|
rd
|
convertIdx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{convertIdx}
\alias{convertIdx}
\title{importFrom DT convertIdx}
\usage{
convertIdx(i, names, n = length(names), invert = FALSE)
}
\description{
importFrom DT convertIdx
}
|
c4fa0759d416778b89bf333981f2099537d00f3b
|
0e92c0b362b230341f9cc31207df8139dbc3ac18
|
/R/rasterFromCells.R
|
b3a3b5eacb5b248bd4575b02f5d114d7ea7f3623
|
[] |
no_license
|
cran/raster
|
b08740e15a19ad3af5e0ec128d656853e3f4d3c6
|
dec20262815cf92b3124e8973aeb9ccf1a1a2fda
|
refs/heads/master
| 2023-07-09T20:03:45.126382
| 2023-07-04T10:40:02
| 2023-07-04T10:40:02
| 17,699,044
| 29
| 35
| null | 2015-12-05T19:06:17
| 2014-03-13T06:02:19
|
R
|
UTF-8
|
R
| false
| false
| 700
|
r
|
rasterFromCells.R
|
# Author: Robert J. Hijmans
# Date : April 2009
# Version 0.9
# Licence GPL v3
rasterFromCells <- function(x, cells, values=TRUE) {
x <- raster(x)
u <- stats::na.omit(unique(cells)) # now removing NAs 2018-02-22
u <- u[ u > 0 & u <= ncell(x) ]
if (length(u) == 0) {
stop('no valid cells')
}
cols <- colFromCell(x, u)
rows <- rowFromCell(x, u)
res <- res(x)
x1 <- xFromCol(x, min(cols)) - 0.5 * res[1]
x2 <- xFromCol(x, max(cols)) + 0.5 * res[1]
y1 <- yFromRow(x, max(rows)) - 0.5 * res[2]
y2 <- yFromRow(x, min(rows)) + 0.5 * res[2]
e <- extent(x1, x2, y1, y2)
r <- crop(x, e)
if (values) {
r <- setValues(r, cellsFromExtent(x, e))
}
return(r)
}
|
0dc44d80ab845578fea5d9e994ab91ac26013a57
|
a2eec2ac0031397469ac9d3842aacff9a873082d
|
/man/subSpecials.Rd
|
b5504a149d9dd740cd6db661d1d96e9525cd680c
|
[] |
no_license
|
cran/useful
|
53f392ae3195e7ee964a6825010e6c0ace603637
|
1b9831ddb97ec68066c8cdf1eaa8b9f9f9eaa605
|
refs/heads/master
| 2021-01-19T01:43:59.075440
| 2018-10-08T15:00:03
| 2018-10-08T15:00:03
| 17,700,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,466
|
rd
|
subSpecials.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subspecials.r
\name{subSpecials}
\alias{subSpecials}
\title{Sub special characters out of character vectors.}
\usage{
subSpecials(..., specialChars = c("!", "(", ")", "-", "=", "*", "."))
}
\arguments{
\item{\dots}{Character vectors that will be altered by subbing the special characters with their escaped equivalents}
\item{specialChars}{The characters to be subbed out}
}
\value{
The provided vectors are returned with any of the defined specialChars subbed out for their escaped equivalents. Each vector is returned as an element of a list.
}
\description{
Converts each of the special characters to their escaped equivalents in each element of each vector.
}
\details{
Each element in the specialChar vector is subbed for its escaped equivalent in each of the elements of each vector passed in
}
\examples{
subSpecials(c("Hello", "(parens)", "Excited! Mark"))
subSpecials(c("Hello", "(parens)", "Excited! Mark"), specialChars=c("!", "("))
subSpecials(c("Hello", "(parens)", "Excited! Mark"),
c("This is a period. And this is an asterisk *"), specialChars=c("!", "("))
subSpecials(c("Hello", "(parens)", "Excited! Mark"),
c("This is a period. And this is an asterisk *"), specialChars=c("!", "(", "*"))
}
\seealso{
\code{\link{sub}} \code{\link{subOut}}
}
\author{
Jared P. Lander
www.jaredlander.com
}
\keyword{string}
\keyword{text}
|
f02bd9e7b8e6373d50dd12e8c12c8b5fd92c941e
|
2b5d480bce0ebf49e36578ee1421fa8941679ef1
|
/sample_data_Save.R
|
bea508ec91aedd73a886d2dd71e3c948d3698f8b
|
[] |
no_license
|
tfrance/Bias-Variance-Tradeoff
|
b7e4e141067c81a1db33341fd8ceb429b3c103fc
|
95d39e60e04f60dc11e5383c60b477a6f961ab49
|
refs/heads/master
| 2021-01-11T15:56:46.871149
| 2017-01-24T05:59:16
| 2017-01-24T05:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 518
|
r
|
sample_data_Save.R
|
filePathRoot <- 'C:\\Users\\deifen\\Documents\\Projects\\Bias and overfitting trade offs\\Project\\SampleData\\sampledata'
n <- 25
r <- 20
vect <- seq(from= -pi, to =pi, by=.1)
signal<-function(n)
{sin(n)}
for(i in 1:10)
{
x <- sample(vect, r)
noise<- rnorm(r, mean=0, sd=.5)
y <- signal(x) + noise
Data <- data.frame(x, y)
write.csv(Data, file=paste(filePathRoot, i, collapse=NULL), quote=FALSE, row.names=FALSE)
}
plot(y ~ x, Data)
curve(signal(x), from=-pi, to=pi, , xlab="x", ylab="y", add=TRUE)
|
b855e2ee6c9e6ad4f7b776425a002c26809a058a
|
d24e56684cfd13f3172fcc07b0831765ae0e2ec2
|
/scripts/tema1/03-NBL-install.R
|
24ee860fee32fb4749d7f3fd529293ef9ac09b71
|
[] |
no_license
|
ferreret/r-basic
|
e0664472888954668e0aaaf83a55a12fa0f4f775
|
d19138402cb3e58f28d98c6f85af72d96cbab565
|
refs/heads/master
| 2023-04-07T08:00:22.502135
| 2023-03-29T15:51:46
| 2023-03-29T15:51:46
| 287,297,847
| 0
| 0
| null | 2020-08-13T14:11:17
| 2020-08-13T14:11:16
| null |
UTF-8
|
R
| false
| false
| 166
|
r
|
03-NBL-install.R
|
install.packages("tidyverse", dependencies = TRUE)
library(tidyverse)
install.packages("magic", dependencies = TRUE)
library(magic)
magic(6)
installed.packages()
|
8bd28645e63397186cbbb85ebaf2563be564f902
|
8eb16684e6d6ef4d0cf2c5890aea6aca25316c1d
|
/tests/testthat/test-create_validation_package.R
|
d65da2ee6996280e068d5d49e432fff1d07945a4
|
[
"MIT"
] |
permissive
|
wfulp/valtools
|
0583f1934459c129a045f5a342df0ead5ca7b82b
|
595c343471b26a60a12b52fcdac3d220af813e2a
|
refs/heads/main
| 2023-05-02T04:06:39.137774
| 2021-05-20T19:38:49
| 2021-05-20T19:38:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
test-create_validation_package.R
|
test_that("Able to create package with validated package basics", {
withr::with_tempdir({
quite <- capture.output({
vt_create_package(".", open = FALSE)
})
expect_true(
devtools::is.package(devtools::as.package("."))
)
expect_true(
dir.exists("vignettes/validation")
)
})
})
test_that("throws standard error when unable to create the package", {
withr::with_tempdir({
expect_error(
vt_create_package("temp_package"),
"Failed to create package. Error"
)
})
})
|
fbfc225e397ab472779d1f82c9737c0574b5ccf3
|
3fdfbd6728f19b4221129cc0d9637e2f1a4e0f3b
|
/man/sprintf_data_frame.Rd
|
b909288e01e39809c565e92b4181d298b9e2b967
|
[] |
no_license
|
billdenney/bsd.report
|
09e2b5642046ee51956d33199d6e1ae670b424ff
|
778f0298da10f6369bd7ffaa1a5c20d3f8f691fa
|
refs/heads/main
| 2023-02-19T22:49:25.273961
| 2023-02-15T21:54:41
| 2023-02-15T21:54:41
| 153,116,819
| 3
| 0
| null | 2023-02-15T22:57:48
| 2018-10-15T13:21:55
|
R
|
UTF-8
|
R
| false
| true
| 1,778
|
rd
|
sprintf_data_frame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sprintf_data_frame.R
\name{sprintf_data_frame}
\alias{sprintf_data_frame}
\alias{sprintf_data_frame_single}
\title{Create new columns in a data.frame with sprintf results}
\usage{
sprintf_data_frame(data, ..., factor_out_if_factor_in = TRUE, ordered = NULL)
sprintf_data_frame_single(
data,
format,
factor_out_if_factor_in = TRUE,
ordered = NULL
)
}
\arguments{
\item{data}{the data to use for formatting}
\item{...}{a named list of character vectors. Names are new columns for
\code{data}, and values are sent to \code{format} in
\code{sprintf_data_frame_single}.}
\item{factor_out_if_factor_in}{If any of the input columns are factors, make
the output column a factor in the same order as the input column factors}
\item{ordered}{If \code{factor_out_if_factor_in} converts the output to a
factor, pass to \code{base::factor}. If \code{NULL}, then it is set to
\code{TRUE} if any of the input columns are ordered factors.}
\item{format}{A named character vector where the names are column names in
\code{data} and the values are sprintf format strings for the column.}
}
\value{
The data frame with columns added for the names of \code{...}.
A character vector with one element per row of \code{data}.
}
\description{
Create new columns in a data.frame with sprintf results
}
\section{Functions}{
\itemize{
\item \code{sprintf_data_frame_single()}: Generate a character vector based on sprintf
input formats
}}
\examples{
sprintf_data_frame(
data=mtcars,
cyl_mpg=c(mpg="\%g miles/gallon, ", cyl="\%g cylinders"),
disp_hp=c(disp="\%g cu.in. displacement, ", hp="\%g hp")
)
sprintf_data_frame_single(
data=mtcars,
format=c(mpg="\%g miles/gallon, ", cyl="\%g cylinders")
)
}
|
928f891fd76657eb3f4ff551df74c5ad4ffc44c5
|
29d9da8abadbffa3f2f083b5ead8b159a578f264
|
/Discrete generation anadromy model two sexes.R
|
84db071a3bd4ab2046b184424ea6cac741dd735a
|
[] |
no_license
|
kanead/Trout-anadromy-simulation
|
84ca8405de509df6f8e7201061151b3198d51a78
|
7bd13a90fddd86d923ea542b11defba926b93392
|
refs/heads/master
| 2020-03-18T14:37:24.675829
| 2018-05-25T13:08:45
| 2018-05-25T13:08:45
| 134,857,558
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 16,759
|
r
|
Discrete generation anadromy model two sexes.R
|
############################################################################################################
# Discrete generation, simple model of evolution of anadromy.
# The evolvable trait here is the "underlying threshold for residency", i.e. the condition value above which
# residency occurs, below which anadromy occurs.
# "Condition" here refers to a trait that varies across individuals randomly (variation is purely environmentally
# driven in this simple model, i.e. there is no genetic basis to condition).
############################################################################################################
rm(list=ls()) # clear all
## PARAMETERS:
nyears <- 10 # number of years for each simulation run
nreps<- 1 # number of simulations to run
mu_thresh <- 0 # initial mean genetic value of the treshold trait
h2 <- 0.5 # heritability of the threshold trait
Vp <- 1 # phenotypic variance of the threshold trait
Va <- Vp*h2 # additive genetic variance of the threshold trait
Ve <- Vp - Va # environmental variance of the threshold trait (Vp = Va + Ve)
mu_cond <- 0 # mean value of the condition trait
V_cond <- 1 # variance of the condition trait (in this case, all the phenotypic variance is environmental)
N_females <- 500 # Number of females
N_males <- 500 # Number of males
N_init <- N_females + N_males # initial population size (number of fry) is the sum of males and females
# NB**** If any of the following 4 parameters are changed, this will drive evolution towards one or other tactic
# NB**** (e.g. if S_anadromous is increased, then the pop will evolve towards a higher fraction of anadromous fish)
S_anadromousF <- 0.35 # fraction of female anadromous fish that survive to breeding age
S_anadromousM <- 0.35 # fraction of male anadromous fish that survive to breeding age
S_residentF <- 0.5 # fraction of female resident fish that survive to breeding age
S_residentM <- 0.5 # fraction of male resident fish that survive to breeding age
F_anadromous <- 10 # fecundity of anadromous fish (number of offspring)
F_resident <- 5 # fecundity of resident fish (number of offspring)
K <- 100 # Carrying capacity (this is a simple ceiling population size, above which individuals
# are randomly culled, to avoid the population sky-rocketing to huge sizes
# The following bits simply create empty vectors, in which the results of each model run can be stored:
sim <- c() # Store simulation number
year <- c() # Store year number
pop.size1 <- c() # Store population size of before survival and fecundity selection
pop.size1_F <- c() # Store population size of females before survival and fecundity selection
pop.size1_M <- c() # Store population size of males before survival and fecundity selection
pop.size2 <- c() # Store population size of after survival and fecundity selection
pop.size2_F <- c() # Store population size of females after survival and fecundity selection
pop.size2_M <- c() # Store population size of males after survival and fecundity selection
mean.thresh <- c() # Store realised mean genetic threshold across all individuals
va.realiz<- c() # Store realised variance in genetic thresholds across all individuals
frac.anadF <- c() # Store realised fraction of anadromous female fish (number from 0 to 1)
frac.anadM <- c() # Store realised fraction of anadromous male fish (number from 0 to 1)
real.surv.anadF <- c()# Store realised survival of anadromous female fish
real.surv.anadM <- c()# Store realised survival of anadromous male fish
real.surv.resF <- c() # Store realised survival of resident female fish
real.surv.resM <- c() # Store realised survival of resident male fish
###########################################################
### SIMULATION STARTS HERE!!!
###########################################################
### cycle over simulations
for (Sim in 1:nreps) {
## Initiate the population with N_init number of breeding values for the threshold trait. These are the individual-specific,
## genetic values for the evolvable trait
a_threshF <- rnorm(N_females, mu_thresh, (sqrt(Va)))
a_threshM <- rnorm(N_males, mu_thresh, (sqrt(Va)))
threshSize<-length(a_threshF) + length(a_threshM)
### cycle over years
for (Y in 1:nyears)
{
### additional random mortality if K exceeded, to keep population size in check
if (length(a_threshF) + length(a_threshM)>K)
{
surv <- ifelse(runif(threshSize,0,1)<(K/threshSize),1,0) # sum(surv==1)
# runif draws a random number from a uniform
#distribution, here between 0 and 1. If this number
#is less than the desired fraction of survivors (K/N),
#that individual survives, otherwise it dies
a_threshF <- a_threshF[surv==1] # subset the breeding values for only those "individuals" that survive
a_threshM <- a_threshM[surv==1] # subset the breeding values for only those "individuals" that survive
a_threshF <- na.omit(a_threshF)
a_threshM <- na.omit(a_threshM)
}
pop.size1 <- c(pop.size1,length(a_threshF) + length(a_threshM)) # calculate and store pop size ( = length of a_thresh vector)
IDF <- 1:length(a_threshF) # allocate arbitraty identities to each "individual"
IDM <- 1:length(a_threshM) + 500 # allocate arbitraty identities to each "individual"
e_threshF <- rnorm(length(a_threshF), 0, (sqrt(Ve))) # draw environmental deviations for the threshold trait
e_threshM <- rnorm(length(a_threshM), 0, (sqrt(Ve))) # draw environmental deviations for the threshold trait
z_threshF <- a_threshF + e_threshF # the phenotypic value (z) for each individual is the sum of the genetic
# (breeding) value and the environmental deviation#
z_threshM <- a_threshM + e_threshM
condF <- rnorm(length(a_threshF), mu_cond, sqrt(V_cond)) # draw random condition values for each individual
condM <- rnorm(length(a_threshM), mu_cond, sqrt(V_cond)) # draw random condition values for each individual
anadromousF <- ifelse(condF > z_threshF, 0, 1) # define migration tactics based on threshold model (if an individual's
# condition is > it's threshold value, it becomes resident (i.e. anadromous==0).
# otherwise it becomes anadromous (anadromous == 1))
anadromousM <- ifelse(condF > z_threshF, 0, 1)
frac.anadF <- c(frac.anadF, mean(anadromousF)) # calculate and store the fraction of anadromous fish in the pop at this timepoint
frac.anadM <- c(frac.anadM, mean(anadromousM)) # calculate and store the fraction of anadromous fish in the pop at this timepoint
anad_fishF <- IDF[anadromousF==1] # pull out the IDs for the anadromous fish
res_fishF <- IDF[anadromousF==0] # pull out the IDs for the resident fish
anad_fishM <- IDM[anadromousM==1] # pull out the IDs for the anadromous fish
res_fishM <- IDM[anadromousM==0] # pull out the IDs for the resident fish
# Calculate survival of anadromous fish by drawing a random number from uniform distribution between 0 and 1. If this number
# is < S_anadromous (i.e. the inputted expected survival of anadromous fish), then that individual survives, otherwise it dies
surv_anadF <- runif(length(anad_fishF)) < rep(S_anadromousF, length(anad_fishF))
surv_anadM <- runif(length(anad_fishM)) < rep(S_anadromousM, length(anad_fishM))
# Do same as above for the resident fish:
surv_resF <- runif(length(res_fishF)) < rep(S_residentF, length(res_fishF))
surv_resM <- runif(length(res_fishM)) < rep(S_residentM, length(res_fishM))
real.surv.anadF <- c(real.surv.anadF, mean(surv_anadF)) # Calculate and store the realised mean survival of anadromous fish, as a check
real.surv.anadM <- c(real.surv.anadM, mean(surv_anadM)) # Calculate and store the realised mean survival of anadromous fish, as a check
real.surv.resF <- c(real.surv.resF, mean(surv_resF)) # Calculate and store the realised mean survival of resident fish, as a check
real.surv.resM <- c(real.surv.resM, mean(surv_resM)) # Calculate and store the realised mean survival of resident fish, as a check
anad_fishF <- anad_fishF[surv_anadF] # Extract the IDs of the surviving anadromous fish
anad_fishM <- anad_fishM[surv_anadM] # Extract the IDs of the surviving anadromous fish
res_fishF <- res_fishF[surv_resF] # Extract the IDs of the resident anadromous fish
res_fishM <- res_fishM[surv_resM] # Extract the IDs of the resident anadromous fish
parents <- c(anad_fishF,anad_fishM,res_fishF,res_fishM) # Make a new vector of parent IDs by stringing together the anadromous and resident IDs
parents <- data.frame(parents)
parents$gender <- ifelse(parents > 500,"male", "female")
parents <- data.frame(parents)
head(parents)
# Make a vector of family sizes (number of offspring) for each parent, which will be used below in the mating and reproduction loop
# This vector is ordered the same way as the vector of parents' IDs (anadromous fish first, then residents).
family_size <- c( rep(F_anadromous, (length(anad_fishF)) + length(anad_fishM) )
, rep(F_resident, (length(res_fishF) + length(res_fishM))))
# check with this length(family_size) == length(parents$parents)
# Mating and reproduction:
# sum(parents$parents<0) & sum(parents$parents>500)>=2 # FALSE condition to test
if (sum(parents$parents<500,na.rm = T) & sum(parents$parents>500,na.rm=T)>=2) # if more than 1 individual, mating happens
{
# sum(parents$parents<500) + sum(parents$parents>500)
### mating
# npairs <- length(parents)%/%2 # number of mating pairs = no. of parents ÷ 2, and rounded down
motherPotential <- sample(parents$parents[parents$parents<500], replace=F) # randomly select n=npairs "mothers" out of the list of parents IDs
fatherPotential <- sample(parents$parents[parents$parents>500], replace=F) # randomly select n=npairs "fathers" out of the list of parents IDs
# we have to correct for the fact that the numbers of mothers and fathers won't match, this ifelse statement
# does that by sampling from the longer parent vector by the length of the smaller one
if(length(motherPotential) > length(fatherPotential)){
father<-fatherPotential;
mother<-sample(motherPotential,length(fatherPotential), replace = FALSE)
} else {
mother<-motherPotential;
father<-sample(fatherPotential,length(motherPotential), replace = FALSE)
}
a_thresh_fath <- a_threshM[match(father,IDM)] # extract the corresponding breeding values for these fathers from the vector a_thresh
a_thresh_moth <- a_threshF[match(mother,IDF)] # extract the corresponding breeding values for these mothers from the vector a_thresh
mid <- (a_thresh_fath + a_thresh_moth)/2 # calculate the mid-parental genetic value (mean of breeding values of each parent)
### breeding
BV <- c() # create an empty vector (BV= "breeding values") to store the new genetic values of the offspring
for (i in 1:length(mother)) # cycle over the n mothers
{
# Generate new genetic values for the offpring that are centred on the mid-parental values of their parents,
# plus a random deviation drawn from 0.5Va. This essentially generates genetic variation among siblings,
# with the expected genetic variation among siblings equal to half the population-wide additive genetic variance.
# This comes directly from quantitative genetic theory, see Chapter 4 of Roff 2009 book on "Modelling Evolution"
BVo <- rep(mid[i] , family_size[parents==mother[i]]) + rnorm(family_size[parents==mother[i]], 0, sqrt(0.5*Va))
# Add these offspring genetic values for each family to the vector BV:
BV <- c(BV, BVo)
}
idx <- sample.int(length(BV),size=length(BV)/2,replace=FALSE)
a_threshF <- BV[idx]
a_threshM <- BV[-idx]
# Now we replace the parental genetic values with a new list of offspring genetic values.
# This is because we here assume that all parents die immediately after reproduction, along with their
# genetic values!
### store results
sim <- c(sim,Sim)
year <- c(year,Y)
pop.size2 <- c(pop.size2,length(a_threshF) + length(a_threshM)) # store population size after survival and mating
mean.thresh <- c(mean.thresh,mean(c(a_threshF,a_threshM, na.rm=T))) # store realised mean genetic value.
va.realiz<- c(va.realiz,var(c(a_threshF,a_threshM, na.rm=T))) # store realised variance in genetic values.
} else
{
sim <- c(sim,Sim)
year <- c(year,Y)
pop.size2 <- c(pop.size2,0) # If the number of parents is <2, then simply store a 0 for pop size
mean.thresh <- c(mean.thresh,NA) # And store an NA here
va.realiz<- c(va.realiz, NA) # And store an NA here
}
} # close the year loop
} # close the simulation replicate loop
# Create a data frame called r1 ("results 1") to store all the results:
r1 <- data.frame(sim,year,pop.size1,pop.size2,mean.thresh,va.realiz,frac.anadF,frac.anadM,real.surv.anadF,real.surv.anadM
,real.surv.resF, real.surv.resM)
# Calculate the mean population size per year across all replicate simulations:
mN <- tapply(r1$pop.size1, r1$year, quantile, 0.5, na.rm=T)
lciN <- tapply(r1$pop.size1, r1$year, quantile, 0.05, na.rm=T) # calculate the lower confidence interval for this variable
uciN <- tapply(r1$pop.size1, r1$year, quantile, 0.95, na.rm=T) # calculate the upper confidence interval for this variable
# Calculate the mean fraction of anadromous fish per year across all replicate simulations:
mA <- tapply(r1$frac.anad, r1$year, quantile, 0.5, na.rm=T)
lcA <- tapply(r1$frac.anad, r1$year, quantile, 0.05, na.rm=T)
ucA <- tapply(r1$frac.anad, r1$year, quantile, 0.95, na.rm=T)
# Calculate the mean genetic threshold value per year across all replicate simulations:
mT <- tapply(r1$mean.thresh, r1$year, quantile, 0.5, na.rm=T)
lcT <- tapply(r1$mean.thresh, r1$year, quantile, 0.05, na.rm=T)
ucT <- tapply(r1$mean.thresh, r1$year, quantile, 0.95, na.rm=T)
# Calculate the mean realised survival of anadromous fish per year across all replicate simulations:
mSA <- tapply(r1$real.surv.anad, r1$year, quantile, 0.5, na.rm=T)
lcSA <- tapply(r1$real.surv.anad, r1$year, quantile, 0.05, na.rm=T)
ucSA <- tapply(r1$real.surv.anad, r1$year, quantile, 0.95, na.rm=T)
# Calculate the mean realised survival of resident fish per year across all replicate simulations:
mSR <- tapply(r1$real.surv.res, r1$year, quantile, 0.5, na.rm=T)
lcSR <- tapply(r1$real.surv.res, r1$year, quantile, 0.05, na.rm=T)
ucSR <- tapply(r1$real.surv.res, r1$year, quantile, 0.95, na.rm=T)
yr<- 1:nyears # create a vector of year IDs, for plotting purposes below
par(mfrow=c(2,2)) # open a 2 x 2 tiled plotting window
# Plot 1 = Population size against year:
plot(yr, mN, xlab='Year', ylab='Population size', type='l', lwd=1, ylim=c(0,250), cex.lab=1.2)
points(yr, lciN, type='l', lwd=1, lty=2, ylim=c(0,250))
points(yr, uciN, type='l', lwd=1, lty=2, ylim=c(0,250))
# Plot 2 = Survival or anadromous and resident fish against year:
plot(yr, mSA, xlab='Year', ylab='Survival anadromous/resident', type='l', lwd=1, ylim=c(0,1), cex.lab=1.2, col="blue")
points(yr, mSR, type='l', lwd=1, ylim=c(0,1), cex.lab=1.2, col="red")
legend("topright",legend=c("andadromous","resident"), col=c("blue","red"), lty=c(1,1))
# Plot 3 = Fraction of anadromous fish against year:
# ***NB This fraction will change as the population evolves!!
plot(yr, mA, xlab='Year', ylab='Fraction anadromous', type='l', lwd=1, ylim=c(0,1), cex.lab=1.2)
points(yr, lcA, type='l', lwd=1, lty=2, ylim=c(0,1))
points(yr, ucA, type='l', lwd=1, lty=2, ylim=c(0,1))
# Plot 4 = Mean genetic threshold against year:
# ***NB The mean genetic threshold will change as the population evolves!!
plot(yr, mT, xlab='Year', ylab='Mean genetic threshold', type='l', lwd=1, ylim=c(-5,5), cex.lab=1.2)
points(yr, lcT, type='l', lwd=1, lty=2, ylim=c(0,1))
points(yr, ucT, type='l', lwd=1, lty=2, ylim=c(0,1))
|
d0b2f3b1d348ffb9ec05ea8346679fb301d7b059
|
b3b1b011ab46f024467282baeff0f160e2e91e31
|
/tests/testthat/test-document.R
|
96021e20759aaf116685330232049324af308785
|
[
"Apache-2.0"
] |
permissive
|
schuemie/PatientLevelPrediction
|
5265629020a2406f9f96a4975aa3ab35c9663b92
|
0b59c97a53ab4c6aaf6236048d5bcc9363c2716e
|
refs/heads/master
| 2020-09-05T00:50:10.021513
| 2019-11-06T07:46:44
| 2019-11-06T07:46:44
| 88,721,641
| 0
| 1
| null | 2019-05-01T04:30:23
| 2017-04-19T08:40:26
|
R
|
UTF-8
|
R
| false
| false
| 5,798
|
r
|
test-document.R
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library("testthat")
context("Document.R")
# Test unit for the document creation
test_that("document creation parameters", {
#test createPlpDocument inputs
expect_error(createPlpJournalDocument(plpResult=NULL))
expect_error(createPlpJournalDocument(plpResult=1:5))
plpResult <- list(1)
class(plpResult) <- 'plpModel'
# target name not character
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName=1))
# outcomeName not character
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test', outcomeName=1))
# characterisationSettings not list
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test',
outcomeName='outcome test',characterisationSettings=1 ))
# includeTrain not logical
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test',
outcomeName='outcome test',characterisationSettings=list(),
includeTrain='Y'))
# includeTest not logical
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test',
outcomeName='outcome test',characterisationSettings=list(),
includeTrain=T, includeTest='Y'))
# includePredictionPicture not logical
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test',
outcomeName='outcome test',characterisationSettings=list(),
includeTrain=T, includeTest=T, includePredictionPicture='Y'))
# includeAttritionPlot not logical
expect_error(createPlpJournalDocument(plpResult=plpResult, targetName='target test',
outcomeName='outcome test',characterisationSettings=list(),
includeTrain=T, includeTest=T, includePredictionPicture=T,
includeAttritionPlot='Y'))
#set.seed(1234)
#data(plpDataSimulationProfile)
#sampleSize <- 2000
#plpData <- PatientLevelPrediction::simulatePlpData(plpDataSimulationProfile, n = sampleSize)
#population <- PatientLevelPrediction::createStudyPopulation(plpData, outcomeId=2,
# riskWindowEnd = 365)
#modelset <- PatientLevelPrediction::setLassoLogisticRegression()
#plpResult <- PatientLevelPrediction::runPlp(population, plpData, modelset, saveModel = F)
#doc <- PatientLevelPrediction::createPlpJournalDocument(plpResult=plpResult, plpData = plpData,
# targetName='target test',
# outcomeName='outcome test',
# includeTrain=T, includeTest=T,
# includePredictionPicture=T,
# includeAttritionPlot=T)
#expect_equal(doc, TRUE)
## clean up
#file.remove(file.path(getwd(), 'plp_journal_document.docx'))
})
data(plpDataSimulationProfile)
sampleSize <- 2000
plpData <- PatientLevelPrediction::simulatePlpData(plpDataSimulationProfile, n = sampleSize)
population <- PatientLevelPrediction::createStudyPopulation(plpData, outcomeId=2,
riskWindowEnd = 365)
modelset <- PatientLevelPrediction::setCoxModel()
plpResult <- PatientLevelPrediction::runPlp(population=population,
plpData=plpData, modelSettings = modelset,
savePlpData = F, saveEvaluation = F,
savePlpResult = F,
savePlpPlots = F, verbosity = 'NONE')
test_that("createPlpJournalDocument document works", {
doc <- PatientLevelPrediction::createPlpJournalDocument(plpResult=plpResult, plpData = plpData,
targetName='target test',
outcomeName='outcome test',
includeTrain=T, includeTest=T,
includePredictionPicture=T,
includeAttritionPlot=T, save=F)
expect_equal(class(doc), "rdocx")
})
test_that("createPlpReport document works", {
doc <- createPlpReport(plpResult=plpResult, plpValidation=NULL,
plpData = plpData,
targetName = '<target population>',
outcomeName = '<outcome>',
targetDefinition = NULL,
outcomeDefinition = NULL,
outputLocation=file.path(getwd(), 'plp_report.docx'),
save = F)
expect_equal(class(doc), "rdocx")
})
|
8f10c47690912b93cbee0400e103ca343a0fcb64
|
08ddb75a5a64c94ce2653d3bb453f2d0d367de8f
|
/BATCH_simulation-multComp-type1.R
|
79a31730511001a524fc97d63bc1038ad6cb5e26
|
[] |
no_license
|
bozenne/Article-lvm-multiple-comparisons
|
4bbf4f2ea9b8601bf600c9c35204f809c4761b70
|
7ca5a1a4b17336e275f0b673a7c4467a8c35b09a
|
refs/heads/master
| 2022-04-05T15:32:19.514160
| 2022-02-04T11:11:56
| 2022-02-04T11:11:56
| 224,150,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,421
|
r
|
BATCH_simulation-multComp-type1.R
|
## path <- "P:/Cluster/LVMproject/article-multipleComparisons"
## setwd(path)
## source("BATCH_simulation-multComp-type1.R")
## * seed
iter_sim <- as.numeric(Sys.getenv("SGE_TASK_ID"))
n.iter_sim <- as.numeric(Sys.getenv("SGE_TASK_LAST"))
if(is.na(iter_sim)){iter_sim <- 97}
if(is.na(n.iter_sim)){n.iter_sim <- 100}
cat("iteration ",iter_sim," over ",n.iter_sim,"\n", sep = "")
set.seed(1)
seqSeed <- sample(1:max(1e5,n.iter_sim),size=n.iter_sim,replace=FALSE)
iSeed <- seqSeed[iter_sim]
set.seed(iSeed)
cat("seed: ",iSeed,"\n")
## * path
path <- "."
path.res <- file.path(path,"Results","simulation-multComp-type1")
if(dir.exists(path.res)==FALSE){
dir.create(path.res)
}
path.output <- file.path(path,"output","simulation-multComp-type1")
if(dir.exists(path.output)==FALSE){
dir.create(path.output)
}
## * libraries
library(lava)
library(data.table)
library(multcomp)
library(lavaSearch2)
## * settings
seqN <- c(30,50,75,100,150,200,300,500)
seqCor <- c(0.1,0.2,0.35,0.65,1,5)##c(0,0.5,1,1.5,3,5)
n.Cor <- length(seqCor)
n.rep <- 100
## * model
## ** generative model
m.sim <- lvm(c(log.thalamus,
log.pallidostriatum,
log.neocortex,
log.midbrain,
log.pons,
log.cingulateGyrus,
log.hippocampus,
log.supramarginalGyrus,
log.corpusCallosum)~ a * eta + genotypeHAB + b * groupconcussion)
latent(m.sim) <- ~eta
## ** investigator model
m.test <- lvm(c(log.thalamus,
log.pallidostriatum,
log.neocortex,
log.midbrain,
log.pons,
log.cingulateGyrus,
log.hippocampus,
log.supramarginalGyrus,
log.corpusCallosum)~ eta + genotypeHAB + groupconcussion)
latent(m.test) <- ~eta
## * prepare
n.N <- length(seqN)
n.Cor <- length(seqCor)
## ** value for the simulation
sim.coef <- c("log.pallidostriatum" = 0.07255,
"log.neocortex" = -0.08699,
"log.midbrain" = 0.25676,
"log.pons" = 0.28991,
"log.cingulateGyrus" = 0.09924,
"log.hippocampus" = 0.09823,
"log.supramarginalGyrus" = -0.1254,
"log.corpusCallosum" = -0.00549,
"eta" = 1.43044,
"log.thalamus~genotypeHAB" = 0.60113,
"log.thalamus~groupconcussion" = 0.11999,
"log.pallidostriatum~eta" = 0.83452,
"log.pallidostriatum~genotypeHAB" = 0.57197,
"log.pallidostriatum~groupconcussion" = 0.11358,
"log.neocortex~eta" = 0.85006,
"log.neocortex~genotypeHAB" = 0.57948,
"log.neocortex~groupconcussion" = 0.04283,
"log.midbrain~eta" = 0.84739,
"log.midbrain~genotypeHAB" = 0.61591,
"log.midbrain~groupconcussion" = 0.09895,
"log.pons~eta" = 0.86516,
"log.pons~genotypeHAB" = 0.53958,
"log.pons~groupconcussion" = 0.01545,
"log.cingulateGyrus~eta" = 0.76682,
"log.cingulateGyrus~genotypeHAB" = 0.65551,
"log.cingulateGyrus~groupconcussion" = 0.15936,
"log.hippocampus~eta" = 0.76147,
"log.hippocampus~genotypeHAB" = 0.57525,
"log.hippocampus~groupconcussion" = 0.11901,
"log.supramarginalGyrus~eta" = 0.87999,
"log.supramarginalGyrus~genotypeHAB" = 0.57436,
"log.supramarginalGyrus~groupconcussion" = 0.05089,
"log.corpusCallosum~eta" = 0.67779,
"log.corpusCallosum~genotypeHAB" = 0.57192,
"log.corpusCallosum~groupconcussion" = 0.17416,
"log.thalamus~~log.thalamus" = 0.01308,
"log.pallidostriatum~~log.pallidostriatum" = 0.00987,
"log.neocortex~~log.neocortex" = 0.00603,
"log.midbrain~~log.midbrain" = 0.00402,
"log.pons~~log.pons" = 0.01053,
"log.cingulateGyrus~~log.cingulateGyrus" = 0.00451,
"log.hippocampus~~log.hippocampus" = 0.01247,
"log.supramarginalGyrus~~log.supramarginalGyrus" = 0.00612,
"log.corpusCallosum~~log.corpusCallosum" = 0.01602,
"eta~~eta" = 0.06319)
## ** give appropriate name
dfType <- coefType(lava::estimate(m.sim,lava::sim(m.sim,1e2)),
as.lava = FALSE)[,c("name","param","lava")]
## name2lava <- setNames(dfType[!is.na(dfType$lava),"lava"],dfType[!is.na(dfType$lava),"name"])
sim.coefLava <- sim.coef[setdiff(names(sim.coef), dfType[is.na(dfType$lava),"name"])]
## dfType[is.na(dfType$lava),"name"]
## ** null hypotheses
name.test <- paste0(c("log.thalamus",
"log.pallidostriatum",
"log.neocortex",
"log.midbrain",
"log.pons",
"log.cingulateGyrus",
"log.hippocampus",
"log.supramarginalGyrus",
"log.corpusCallosum"),"~groupconcussion")
n.test <- length(name.test)
## * loop
dt <- NULL
pb <- txtProgressBar(max = n.Cor)
dt.res <- NULL
for(iN in 1:n.N){ # iN <- 5
for(iCor in 1:n.Cor){ # iCor <- 1
cat("sample size=",seqN[iN],", correlation=",seqCor[iCor],": ", sep = "")
n.tempo <- seqN[iN]
a.tempo <- seqCor[iCor]
for(iRep in 1:n.rep){ # iRep <- 1
cat(iRep," ")
ls.max <- list()
## ** Simulate data
dt.data <- lava::sim(m.sim, n = n.tempo, p = c(a = a.tempo, sim.coefLava, b = 0), latent = FALSE)
## ** models
e.lvm <- estimate(m.test, data = dt.data)
## summary(e.lvm)
##setdiff(c(endogenous(m.test),exogenous(m.test)), names(dt.data))
## e.lvm <- estimate(m.test, data = dt.data, control = list(constrain = TRUE, starterfun = "startvalues"))
## e.lvm <- estimate(m.test, data = dt.data, control = list(constrain = TRUE, starterfun = "startvalues0"))
## e.lvm <- estimate(m.test, data = dt.data, control = list(constrain = TRUE, starterfun = "startvalues1"))
## e.lvm <- estimate(m.test, data = dt.data, control = list(starterfun = "startvalues"))
## e.lvm <- estimate(m.test, data = dt.data, control = list(starterfun = "startvalues0"))
## e.lvm <- estimate(m.test, data = dt.data, control = list(starterfun = "startvalues1"))
if (e.lvm$opt$convergence == 1) {
next
}
if (any(eigen(getVarCov2(e.lvm))$values <= 0)) {
next
}
name.coef <- names(coef(e.lvm))
n.coef <- length(name.coef)
## ** create contrast matrix
Ccontrast <- matrix(0, ncol = n.coef, nrow = n.test,
dimnames = list(name.test,name.coef))
diag(Ccontrast[name.test,name.test]) <- 1
## ** adjustment for multiple comparison
## *** lava
e.glht <- glht(e.lvm, linfct = Ccontrast)
cor.test <- cov2cor(e.glht$vcov[name.test,name.test])
medianCor.test <- median(abs(cor.test[lower.tri(cor.test)]))
e0.glht <- summary(e.glht, test = univariate())
eB.glht <- summary(e.glht, test = adjusted(type = "bonferroni"))
eHochberg.glht <- summary(e.glht, test = adjusted(type = "hochberg"))
eHommel.glht <- summary(e.glht, test = adjusted(type = "hommel"))
eS.glht <- summary(e.glht, test = adjusted(type = "single-step"))
name.X <- names(e0.glht$test$coef)
p.value.none <- as.double(e0.glht$test$pvalues)
p.value.bonf <- as.double(eB.glht$test$pvalues)
p.value.hoch <- as.double(eHochberg.glht$test$pvalues)
p.value.homm <- as.double(eHommel.glht$test$pvalues)
p.value.max <- as.double(eS.glht$test$pvalues)
### *** lavaSearch 2
e.glht2 <- try(glht2(e.lvm, linfct = Ccontrast, rhs = rep(0, n.test)),
silent = TRUE)
## e.glht2 <- try(glht2(e.lvm, linfct = Ccontrast), silent = TRUE)
if("try-error" %in% class(e.glht2) || is.na(e.glht2$df) || (e.glht2$df<0)){
p.value.none2 <- NA
p.value.bonf2 <- NA
p.value.max2 <- NA
medianCor.test2 <- NA
}else{
cor.test2 <- cov2cor(e.glht2$vcov[name.test,name.test])
medianCor.test2 <- median(abs(cor.test2[lower.tri(cor.test)]))
e0.glht2 <- summary(e.glht2, test = univariate())
eB.glht2 <- summary(e.glht2, test = adjusted(type = "bonferroni"))
eHochberg.glht2 <- summary(e.glht2, test = adjusted(type = "hochberg"))
eHommel.glht2 <- summary(e.glht2, test = adjusted(type = "hommel"))
eS.glht2 <- summary(e.glht2, test = adjusted(type = "single-step"))
p.value.none2 <- as.double(e0.glht2$test$pvalues)
p.value.bonf2 <- as.double(eB.glht2$test$pvalues)
p.value.hoch2 <- as.double(eHochberg.glht2$test$pvalues)
p.value.homm2 <- as.double(eHommel.glht2$test$pvalues)
p.value.max2 <- as.double(eS.glht2$test$pvalues)
}
## ** store results
dt.tempo <- rbind(data.table(method = "none", variable = name.X, p.value = p.value.none),
data.table(method = "Bonferroni", variable = name.X, p.value = p.value.bonf),
data.table(method = "Hochberg", variable = name.X, p.value = p.value.hoch),
data.table(method = "Hommel", variable = name.X, p.value = p.value.homm),
data.table(method = "Max", variable = name.X, p.value = p.value.max),
data.table(method = "none2", variable = name.X, p.value = p.value.none2),
data.table(method = "Bonferroni2", variable = name.X, p.value = p.value.bonf2),
data.table(method = "Hochberg2", variable = name.X, p.value = p.value.hoch2),
data.table(method = "Hommel2", variable = name.X, p.value = p.value.homm2),
data.table(method = "Max2", variable = name.X, p.value = p.value.max2)
)
dt.tempo[, n := n.tempo]
dt.tempo[, a := a.tempo]
dt.tempo[, rep := iRep]
dt.tempo[, seed := iSeed]
dt.tempo[, corTest := medianCor.test]
dt.tempo[, corTest2 := medianCor.test2]
dt.res <- rbind(dt.res, dt.tempo)
}
cat("\n")
}
filename <- paste0("type1error-S",iter_sim,"(tempo).rds")
saveRDS(dt.res, file = file.path(path.res,filename))
}
## * export
filename <- paste0("type1error-S",iter_sim,".rds")
saveRDS(dt.res, file = file.path(path.res,filename))
## * display
print(sessionInfo())
|
28676db266fe8376cbee09f3e1413d96ad8a4229
|
d370ba62ca4bf8f1e851225d18541cb150e2842e
|
/Utils/ArgoExtract.R
|
ae163f47e04c7e6e20e725308f2dd2afd86ecda7
|
[] |
no_license
|
MAST-ULiege/ArgoDiva
|
9bcc88a595a4d89714cc89119e0f98b25c19e5f1
|
d770e6590c98ada1abdb3be69ed77954495f12f3
|
refs/heads/master
| 2021-05-15T18:07:13.491428
| 2019-02-20T08:17:31
| 2019-02-20T08:17:31
| 107,600,790
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
ArgoExtract.R
|
ArgoExtract<- function(fulldf, flist){
sdf <- ddply(fulldf, ~ aprofile+Platform, function(profile){
flistoutputs <- ldply(flist,function(f){
f<- match.fun(f)
f(profile)
}
)
onelinedf <- data.frame(
qc = max(profile$qc), # TODO several variables , etc ...
lon = mean(profile$lon),
lat = mean(profile$lat) ,
day = profile$day[1],
month = profile$month[1],
year = profile$year[1],
juld = profile$juld[1])
return( cbind(flistoutputs, onelinedf) )
})
return(sdf)
}
|
743480c366e761fbea27d7897fbf1d7da64d7660
|
608cb9bb405cbe8104888d875497afa11a15eef4
|
/run_analysis.R
|
0866a1d446ab7f54660c2a34bd69deec6763c228
|
[] |
no_license
|
TDCARLSON/ProgrammingAssignmentWeek4
|
ea41ce87ec5bd95dde04a47ecf5b114ce11db300
|
f8a257122efe28ab729abc9a6a7851a110367000
|
refs/heads/master
| 2021-01-13T07:33:06.435815
| 2016-10-20T18:03:06
| 2016-10-20T18:03:06
| 71,490,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,776
|
r
|
run_analysis.R
|
##Working Directory
work_dir <- "C:/Coursera/Getting and Cleaning Data/ProgrammingAssignmentWeek4"
##data Directory
data_dir <- "Data"
##download URL
download_URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
##file Name
file_name <- "Datasets.zip"
#create working directory and data subdirectory if not there
if (!file.exists(work_dir)) dir.create(work_dir)
if (!file.exists(file.path(work_dir,data_dir)))
dir.create(file.path(work_dir,data_dir))
setwd(work_dir) ##set working directory
datafile <- file.path(work_dir,data_dir,file_name)
if (!file.exists(datafile)) {
download.file(download_URL,datafile)
unzip(datafile,exdir=file.path(work_dir,data_dir))
}
udir <- "UCI HAR Dataset" ##Unzipped files dir
##Step 1 - Merge training and test sets to create a combined set
dataset_train <- read.table(file.path(work_dir,data_dir,udir,"train/X_train.txt"))
dataset_test <- read.table(file.path(work_dir,data_dir,udir,"test/X_test.txt"))
label_train <- read.table(file.path(work_dir,data_dir,udir,"train/y_train.txt"))
label_test <- read.table(file.path(work_dir,data_dir,udir,"test/y_test.txt"))
subject_train <- read.table(file.path(work_dir,data_dir,udir,"train/subject_train.txt"))
subject_test <- read.table(file.path(work_dir,data_dir,udir,"test/subject_test.txt"))
dataset_combined <- rbind(dataset_train,dataset_test)
label_combined <- rbind(label_train,label_test)
subject_combined <- rbind(subject_train,subject_test)
##Step 2 - Extract only measurements on mean and std dev
##read in the features
features_list <- read.table(file.path(work_dir,data_dir,udir,"features.txt"))
##we take only the columns with mean and stdev
meanstd_cols <- grep(".*mean.*|.*std.*",features_list[,2])
##take only that subset of columns, and name them based on the description
dataset_meanstd <- dataset_combined[,meanstd_cols]
names(dataset_meanstd) <- features_list[meanstd_cols, 2]
##Step 3 use the descriptive activity names
activity <- read.table(file.path(work_dir,data_dir,udir,"activity_labels.txt"))
label_combined[,1] <- activity[label_combined[,1],2]
##Step 4 create combined tidy dataset
names(label_combined) <- "activity"
names(subject_combined) <- "subject"
dataset_tidy <- cbind(subject_combined,label_combined,dataset_meanstd)
write.table(dataset_tidy,"tidydata.txt")
##Step 5 create second tidy data set for average of each variable for each activity and subject
dataset_tidy$subject <-factor(dataset_tidy$subject) #make factor
##"melt and recast data using reshape library"
library(reshape2)
dataset_melted <- melt(dataset_tidy, id=c("subject","activity"))
dataset_means <- dcast(dataset_melted, subject + activity ~ variable, mean)
write.table(dataset_means, "tidydatameans.txt")
|
ead8f338913cc69b0b8aad8ea0bfaa8b482003af
|
e639b8dc495cee285869e27c3714affee2d9d876
|
/tests/testthat.R
|
290996fb97b1ba9aeefeffa88ea5427b7663b260
|
[] |
no_license
|
jefferys/SamSeq
|
606d200bf2cc6c58cc2b931d8d2bcacf2ac8e57f
|
62523c0b3e4023d4b031f4e1973e682c467ba57d
|
refs/heads/master
| 2021-01-12T17:48:50.140263
| 2019-10-24T01:18:16
| 2019-10-24T01:18:16
| 69,393,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(SamSeq)
test_check("SamSeq")
|
21db64abd512613ddb520dfb13e38215945f3a2d
|
80856cb3f8b4d51641fc359ec4392358ee88d96a
|
/R/s3io_write.R
|
010fbf55482ebb3b784762dd48966715cd0dc271
|
[
"MIT"
] |
permissive
|
mmuurr/s3io
|
0afb9663415a43c7088cb8770e50b9c8722f03e6
|
7597b1881c9522380673313c6f265829dcf1bded
|
refs/heads/master
| 2023-01-13T22:54:37.380308
| 2022-12-22T21:29:10
| 2022-12-22T21:29:10
| 103,452,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,867
|
r
|
s3io_write.R
|
#' @title Write a file to AWS S3
#' @param obj <anything> The object to write.
#' @param bucket <chr(1)> AWS S3 bucket name.
#' @param key <chr(1)> AWS S3 object key.
#' @param writefun <function> A write function where the first argument is the object to write and the second argument is a filepath.
#' The function's signature should look like \code{writefun(obj, file, ...)}.
#' @param ... Additional arguments passed on to \code{writefun}.
#' @param .localfile <chr(1)> The local filepath for the initial write-to-disk.
#' @param .rm_localfile <lgl(1)> Remove \code{localfile} once the copy-to-S3 is complete?
#' @param .opts <dict> Additional --opts for the AWS CLI `aws s3api put-object` command.
#' A common option you may want to specify, e.g., is content-type: \code{.opts = list("content-type" = "application/json")}.
#' @return The returned value from \code{writefun}.
#' @examples
#' \dontrun{
#' s3io_write(iris, "mybucket", "my/object/key.csv", readr::write_csv, col_names = TRUE)
#' }
#' @export
## TODO: Possibly split .opts into .copy_opts and .aws_config to splice the values into the correct/expected positions in the final awscli command.
## Same with s3io_read.
s3io_write <- function(obj, bucket, key,
writefun, ...,
.localfile = fs::file_temp(), .rm_localfile = TRUE,
.aws_config = NULL, .put_object_opts = NULL, .opts = .aws_config) {
if (isTRUE(.rm_localfile)) on.exit(try_file_remove(.localfile), add = TRUE)
retval <- withVisible(writefun(obj, .localfile, ...))
retval <- if (isTRUE(retval$visible)) retval$value else invisible(retval$value)
rlang::inject(awscli2::awscli(
c("s3api", "put-object"),
"--bucket" = bucket,
"--key" = key,
"--body" = .localfile,
!!!.put_object_opts,
.config = .aws_config
))
retval
}
|
07b878fa0cf162a4eef5d6517a21a5aeee3d5ea9
|
573ccf72400b611572d126d6bb180053d9fe652e
|
/watstats.r
|
373da945309bde17c9307891c26d42741cd4f028
|
[] |
no_license
|
DrOppenheimer/predict_depth
|
3dc5bb31e1c21d65fc61a854440985c036372640
|
0441fde42308bc4ad8bf1485f716aa7e0e3adbb2
|
refs/heads/master
| 2021-01-25T05:35:50.511929
| 2013-03-15T13:49:46
| 2013-03-15T13:49:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,539
|
r
|
watstats.r
|
watstats <- function (
num_reads = 1000000,
percent_data = 20, # use to get num reads
genome_length = 4000000,
read_length = 125,
min_overlap = 30,
verbose=TRUE
)
{
taxa_num_reads = ( num_reads * ( percent_data/100 ) ) #
alpha <<- ( taxa_num_reads/genome_length ) # $alpha=$N/$GM; # $GM = $G*1000; (input in original was in KB)
theta <<- ( min_overlap/read_length ) # $theta=$T/$L;
sigma <<- ( 1-theta ) # $sigma=1-$theta;
coverage_redundancy <<- ( (read_length*taxa_num_reads)/genome_length ) # $c=$L*$N/$GM;
num_contigs <<- taxa_num_reads*exp(-coverage_redundancy*sigma) # $i =$N*exp(-$c*$sigma);
if ( num_contigs < 1 ){ num_contigs <<- 1 } # $i=1 if $i < 1;
if ( num_contigs > num_reads ){ num_contigs <<- num_reads } # $i =$N if $i > $N;
seqs_per_contig <<- exp(coverage_redundancy*sigma) # exp($c*$sigma);
if ( seqs_per_contig > num_reads ){ seqs_per_contig <<- num_reads } # $iii=$N if $iii > $N;
if ( seqs_per_contig < 1 ){ seqs_per_contig <<- 1 } # $iii=1 if $iii < 1;
contig_length <<- read_length*(((exp(coverage_redundancy*sigma)-1)/coverage_redundancy)+(1-sigma)) # $iv=int($L*(((exp($c*$sigma)-1)/$c)+(1-$sigma)));
if ( contig_length > genome_length ){ contig_length <<- genome_length } # $iv=$GM if $iv > $GM;
percent_coverage <<- 100*num_contigs*contig_length/genome_length # $compl=int(100*$i*$iv/$GM);
if ( percent_coverage > 100 ){ percent_coverage <- 100 }
if( verbose==TRUE ){
print("INPUT")
print(paste("num_reads :", round(taxa_num_reads, digits=0)))
print(paste("percent_data :", round(percent_data, digits=1)))
print(paste("genome_length :", round(genome_length, digits=0)))
print(paste("read_length :", round(read_length,digits=0)))
print(paste("min_overlap :", round(min_overlap, digits=0)))
print("")
print("OUTPUT")
print(paste("coverage_redundancy :", round(coverage_redundancy, digits=1)))
print(paste("num_contigs :", round(num_contigs, digits=1)))
print(paste("seqs_per_contig :", round(seqs_per_contig, digits=1)))
print(paste("contig_length :", round(contig_length, digits=1)))
print(paste("percent_coverage :", round(percent_coverage, digits=1), "%"))
print('------------------------------------------------------')
}
watstats_command <- paste("watstats.pl -g", genome_length, "-n", taxa_num_reads, "-l", read_length, "-t", min_overlap, "intern=TRUE")
my_watstats <<- system(watstats_command)
#watstats_command <- paste("watstats.pl -g", genome_size, "-n", taxa_num_reads, "-l", read_length, "-t", min_overlap, "intern=TRUE")
# system2(watstats_command, stdout=my_watstats.2)
#my_watstats.2 <<- capture.output(system(watstats_command), file = NULL, append = FALSE)
my_date <<- system("date", intern=TRUE)
}
# watstats.pl -g 5000 -n 1000 -l 480 -t 30
# watstats 1.1 ; Based on
# Lander, E. S. and Waterman, M. S., "Genomic mapping by fingerprinting
# random clones: a mathematical analysis", Genomics 2, 231-239 (1988).
|
629513e081bab2cac50e5e1c3acb9c77ecf85600
|
3fd29e830f8066f1fe49d0c1536578d9a5a55533
|
/run_twitch_script.R
|
81549526d4490a015141291456684ce89207ad2c
|
[] |
no_license
|
rcrondeau/twitch-stream-data
|
003f6364fa41f2075e873f233ea35589818822f3
|
4d8a31fb28eb565384ececffee059216f7fe019e
|
refs/heads/master
| 2020-03-09T20:43:24.304984
| 2018-04-10T21:38:13
| 2018-04-10T21:38:13
| 128,992,007
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 108
|
r
|
run_twitch_script.R
|
source("twitch_libraries.R")
source("twitch_api.R")
source("twitch_boxart.R")
source("twitch_mergefiles.R")
|
fd482365fab3850f5bd239f2d0c8b507d4e160c7
|
0c6add397d446f5bd82f437c5d9ab77ff7acf654
|
/man/likJ.Rd
|
60dc65ed05f0c98251f4d5646bf15ae637a6f067
|
[] |
no_license
|
thoree/inbred
|
e72adebf4825efc2167eb81789b06eb72691669a
|
d9714720ed1fd3c56cc05b1e3b86e945da44d90f
|
refs/heads/master
| 2022-11-29T05:24:39.076623
| 2021-03-25T17:33:32
| 2021-03-25T17:33:32
| 194,614,223
| 1
| 0
| null | 2022-11-20T11:25:22
| 2019-07-01T06:38:08
|
R
|
UTF-8
|
R
| false
| true
| 1,194
|
rd
|
likJ.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likJ.R
\name{likJ}
\alias{likJ}
\title{Likelihood for two Possibly Inbred Individuals as a function of
the Condensed 'Jacquard' Coefficients}
\usage{
likJ(a, b, cc, d, pa, pb, pc, pd, Delta)
}
\arguments{
\item{a}{Vector of positive integers (allele 1 of individual 1)}
\item{b}{Vector of positive integers (allele 2 of individual 1)}
\item{cc}{Vector of positive integers (allele 1 of individual 2)}
\item{d}{Vector of positive integers (allele 2 of individual 2)}
\item{pa}{Double vector of allele frequencies}
\item{pb}{Double vector of allele frequencies}
\item{pc}{Double vector of allele frequencies}
\item{pd}{Double vector of allele frequencies}
\item{Delta}{Double vector of length 9 summing to unity}
}
\description{
Likelihood for two Possibly Inbred Individuals as a function of
the Condensed 'Jacquard' Coefficients
}
\examples{
p = c("1" = 0.1, "2" = 0.9)
a = c(rep(1,6), rep(2,3))
b = c(rep(1,3), rep(2,6))
cc = rep(c(1,1,2), 3)
d = rep(c(1,2,2), 3)
pa = p[a]
pb = p[b]
pc = p[cc]
pd = p[d]
Delta = c(0, 0, 0, 0, 0, 0, 1,0, 0)
l2 = likJ(a,b,cc,d, pa,pb,pc,pd, Delta = Delta)
sum(l2) == 1
}
|
0949de7a53e3ca4dbb72102ebb575c0ac8478f6f
|
189f4c7990ddb0c51d2f3fb9de7848b2b3273c4e
|
/tests/testthat/test_t_test_outlier.R
|
99db53bac9416c57e126230fb12bddf3c905c135
|
[] |
no_license
|
AnthonyTedde/utilitR
|
1ee26c1b442704e4571a9454a749614edd3e80df
|
da02668047591a218324add9384e10cfcd0bf46e
|
refs/heads/master
| 2022-01-24T07:15:08.201729
| 2022-01-14T13:14:24
| 2022-01-14T13:14:24
| 233,807,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
test_t_test_outlier.R
|
################################################################################
### dataset mtcars used to perform the following test. Please use the same
### if you want to create new tests. Otherwise update that comment and mention
### which set do you use.
################################################################################
testthat::test_that("recipes are accepted", {
recipe <- recipes::recipe(hp ~ mpg + cyl, data = mtcars) %>%
recipes::step_center(recipes::all_predictors()) %>%
recipes::step_scale(recipes::all_predictors())
methods <- list(
list(method = "lm")
)
testthat::expect_output(
subset <- utilitR::t_outlier_test(recipe, data = mtcars, method = methods),
regexp = "[[:digit:]]+/[[:digit:]]+ rows removed"
)
testthat::expect_vector(subset, ptype = logical(), size = nrow(mtcars))
})
testthat::test_that("Formulae are accepted", {
methods <- list(
list(method = "lm")
)
testthat::expect_output(
subset <- utilitR::t_outlier_test(hp ~ mpg + cyl,
data = mtcars,
method = methods,
preProcess = c("center", "scale", "nzv")),
regexp = "[[:digit:]]+/[[:digit:]]+ rows removed"
)
testthat::expect_vector(subset, ptype = logical(), size = nrow(mtcars))
})
|
34fddcf2b880befc169c25a63d2e1c178b4f2c9c
|
3ffc3317907fdb90a5805ce97c0331df1a72bf99
|
/server.R
|
5dd733e5a527f79f696bde1edbdb6777354a4c58
|
[] |
no_license
|
energyfirefox/Shiny_Titanic
|
f1486927fa30fb5111afd9ed6c0cef8a8056b277
|
9af756398d7b817143fbb08262acdb2dd85ea4c1
|
refs/heads/master
| 2021-01-23T18:59:06.712188
| 2014-06-20T13:42:12
| 2014-06-20T13:42:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(caret)
titanic <- read.csv("cutted_titanic.csv")
titanic$survived[titanic$survived == 0] <- 'No'
titanic$survived[titanic$survived == 1] <- 'Yes'
titanic$survived <- as.factor(titanic$survived)
surviving_prob <- function(age, sex){
tree_mod <- train(survived ~ ., data=titanic, method="rpart")
prediction <- predict(tree_mod, data.frame(age=age, sex=sex))
return(prediction)
}
shinyServer(function(input, output) {
output$prediction <- renderPrint({as.character(surviving_prob(input$age, input$sex))})
output$distPlot <- renderPlot({
titanic$user_age <- input$age
titanic$user_sex <- input$sex
p <- ggplot(titanic, aes(age, sex, color=survived))+
geom_point(pch = 19) +
geom_point(aes(user_age, user_sex), color="black", cex=10, pch=4) +
geom_text(aes(user_age+3, user_sex), color="darkblue", label="You")
p
})
})
|
4425ae008ffdc108022b1e360761a7d7d74723b7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/telegram.bot/examples/bot_token.Rd.R
|
c729eea9365a2f454304a72a9d43366998c93c88
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
bot_token.Rd.R
|
library(telegram.bot)
### Name: bot_token
### Title: Get a token from environment
### Aliases: bot_token
### ** Examples
## Not run:
##D bot_token("RTelegramBot")
## End(Not run)
|
2a76cd1624fe9acc46fe9621eb17bd76c4c81ffe
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/dataCamp/openCourses/dataAnalysisAndStatisticalInference/8_introductionToLinearRegression/3_question2.R
|
7f61880b5649228c3fecf3c5f124614faf85bbde
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654
| 2023-01-24T14:47:52
| 2023-01-24T14:47:52
| 61,184,927
| 2
| 1
|
MIT
| 2022-03-06T11:07:18
| 2016-06-15T06:57:19
|
Python
|
UTF-8
|
R
| false
| false
| 562
|
r
|
3_question2.R
|
# Question 2
#######################################################################################################################
#
# Using the correct type of plot of the previous exercises, plot the relationship between runs and at_bats,
# using at_bats as the explanatory variable.
#
# The relationship is...
#
#######################################################################################################################
1 linear
2 negative
3 horseshoe-shaped ( ∩∩ )
4 u-shaped ( ∪∪ )
plot(mlb11$at_bats,mlb11$runs)
Answer - 1 linear
|
a6a6ef31b25d8f1265a1c5f6799a4780b5d78cf6
|
8b36ad284a170740f3995a0c85020d56c13015a6
|
/DTC_genomics_convertor.r
|
9c3a2561e3210f72e1571b4775bfa8fab1563989
|
[] |
no_license
|
awreynolds/DTC_genomics_convertor
|
05905e11d08c05ae799b958aee771f401cbe5963
|
e2755a9b6b66a900b8c14b46c3d76bbb012d6f22
|
refs/heads/master
| 2021-04-18T18:37:33.309994
| 2019-08-13T17:18:17
| 2019-08-13T17:18:17
| 94,587,390
| 3
| 1
| null | 2019-08-13T17:18:18
| 2017-06-16T23:07:52
|
R
|
UTF-8
|
R
| false
| false
| 6,534
|
r
|
DTC_genomics_convertor.r
|
### DTC Genomics Convertor v0.1 ###
### A command line tool to go between 23andMe, AncestryDNA, and FTDNA raw data formats ###
### Austin Reynolds ###
### awreynolds@utexas.edu ###
### June 2017 ###
#note: make sure your file paths do not have spaces in them. for whatever reason the shell gets confused with spaces when you call from R
# opt<-list()
# opt$file<-"~/Desktop/FTDNA.csv"
# opt$format<-"FTDNA"
# opt$convert_to<-"23andMe"
# opt$out<-"~/test.txt"
#welcome text
intro<-"\n"
intro<-paste(intro,"-------------------------------------------------------------------\n")
intro<-paste(intro,"-------------------------------------------------------------------\n")
intro<-paste(intro," DTC Genomics Convertor\n")
intro<-paste(intro," by Austin Reynolds\n")
intro<-paste(intro," v0.1 (June 2017)\n")
intro<-paste(intro,"-------------------------------------------------------------------\n")
intro<-paste(intro,"-------------------------------------------------------------------\n\n")
cat(intro)
#import required packages
library(optparse)
library(data.table)
library(stringr)
#define options
option_list <- list(
make_option(c("-i", "--file"), action="store", default="",
help="Path to input file."),
make_option(c("-f", "--format"), action="store", default="",
help="The format of the input file. Accepted options: \"23andMe\",\"AncestryDNA\",\"FTDNA\" "),
make_option(c("-c", "--convert_to"), action="store", default="",
help="The format that you want the output file to be. Accepted options: \"23andMe\",\"AncestryDNA\",\"FTDNA\" "),
make_option(c("-o", "--out"), action="store", default="",
help="Path to output file")
)
opt<-parse_args(OptionParser(option_list=option_list))
#define functions
convertor_Ancto23<-function(x){
#hold header of AncestryDNA file
recover_header_command<-paste("sed '/^rsid/q' ",opt$file," | sed '$d' > temp_header.txt",sep = "")
system(recover_header_command)
#convert to 23andMe format
x$genotype<-paste(x$allele1,x$allele2,sep = "")
x<-x[,c(1,2,3,6)]
colnames(x)[1]<-"# rsid"
write.table(x,"temp_tailer.txt",sep = "\t",col.names = TRUE,row.names = FALSE,quote = FALSE)
#combine header and tailer
combine_command<-paste("cat temp_header.txt temp_tailer.txt > ",opt$out,sep = "")
system(combine_command)
#cleanup
cleanup_command<-paste("rm temp_header.txt temp_tailer.txt")
system(cleanup_command)
}
convertor_FTto23<-function(x){
#convert to 23andMe format
colnames(x)<-c("# rsid","chromosome","position","genotype")
write.table(x,opt$out,sep = "\t",col.names = TRUE,row.names = FALSE,quote = FALSE)
}
convertor_23toAnc<-function(x){
#hold header of 23andMe file
recover_header_command<-paste("sed '/^# rsid/q' ",opt$file," | sed '$d' > temp_header.txt",sep = "")
system(recover_header_command)
#convert to 23andMe format
genotypes<-as.data.frame(str_split_fixed(x$genotype,"",2))
x<-cbind(x[,c(1,2,3)],genotypes)
colnames(x)<-c("rsid","chromosome","position","allele1","allele2")
write.table(x,"temp_tailer.txt",sep = "\t",col.names = TRUE,row.names = FALSE,quote = FALSE)
#combine header and tailer
combine_command<-paste("cat temp_header.txt temp_tailer.txt > ",opt$out,sep = "")
system(combine_command)
#cleanup
cleanup_command<-paste("rm temp_header.txt temp_tailer.txt")
system(cleanup_command)
}
convertor_FTtoAnc<-function(x){
#convert to AncestryDNA format
genotypes<-as.data.frame(str_split_fixed(x$RESULT,"",2))
x<-cbind(x[,c(1,2,3)],genotypes)
colnames(x)<-c("rsid","chromosome","position","allele1","allele2")
write.table(x,opt$out,sep = "\t",col.names = TRUE,row.names = FALSE,quote = FALSE)
}
convertor_23toFT<-function(x){
#convert to FTDNA format
new_colnames<-list("RSID","CHROMOSOME","POSITION","RESULT")
write.table(new_colnames,"temp_header.txt",sep=",",col.names = FALSE, row.names = FALSE,quote = FALSE)
x<-data.frame(lapply(x, as.character), stringsAsFactors=FALSE)
write.table(x,"temp_tailer.txt",sep = ",",col.names = FALSE, row.names = FALSE,quote = TRUE)
#combine header and tailer
combine_command<-paste("cat temp_header.txt temp_tailer.txt > ",opt$out,sep = "")
system(combine_command)
#cleanup
cleanup_command<-paste("rm temp_header.txt temp_tailer.txt")
system(cleanup_command)
}
convertor_AnctoFT<-function(x){
#convert to FTDNA format
x$genotype<-paste(x$allele1,x$allele2,sep = "")
x<-x[,c(1,2,3,6)]
x<-data.frame(lapply(x, as.character), stringsAsFactors=FALSE)
new_colnames<-list("RSID","CHROMOSOME","POSITION","RESULT")
write.table(new_colnames,"temp_header.txt",sep=",",col.names = FALSE, row.names = FALSE,quote = FALSE)
write.table(x,"temp_tailer.txt",sep = ",",col.names = FALSE, row.names = FALSE,quote = TRUE)
#combine header and tailer
combine_command<-paste("cat temp_header.txt temp_tailer.txt > ",opt$out,sep = "")
system(combine_command)
#cleanup
cleanup_command<-paste("rm temp_header.txt temp_tailer.txt")
system(cleanup_command)
}
#workflow
if (opt$format=="23andMe"){
#read in 23andMe data
input_df<-fread(opt$file)
if (opt$convert_to=="23andMe"){
print("Input and output format are the same.")
quit()
} else if (opt$convert_to=="AncestryDNA"){
convertor_23toAnc(input_df)
} else if (opt$convert_to=="FTDNA"){
convertor_23toFT(input_df)
} else{
print("--convert_to option not recognized. Type '--help' for accepted options.")
quit()
}
} else if (opt$format=="AncestryDNA"){
#read in AncestryDNA data
input_df<-fread(opt$file)
if (opt$convert_to=="AncestryDNA"){
print("Input and output format are the same.")
quit()
} else if (opt$convert_to=="23andMe"){
convertor_Ancto23(input_df)
} else if (opt$convert_to=="FTDNA"){
convertor_AnctoFT(input_df)
} else{
print("--convert_to option not recognized. Type '--help' for accepted options.")
quit()
}
} else if (opt$format=="FTDNA"){
#read in FTDNA data
input_df<-fread(opt$file)
if (opt$convert_to=="FTDNA"){
print("Input and output format are the same.")
quit()
} else if (opt$convert_to=="23andMe"){
convertor_FTto23(input_df)
} else if (opt$convert_to=="AncestryDNA"){
convertor_FTtoAnc(input_df)
} else{
print("--convert_to option not recognized. Type '--help' for accepted options.")
quit()
}
} else {
print("--format option not recognized. Type '--help' for accepted options.")
quit()
}
print("Conversion complete!")
|
dccabbe309d45aea20a0408dcda7894473b36055
|
bf9aeaeb7a87d08f5b64cab35e5329bfe54620a2
|
/tests/testthat/test_extract_params.R
|
baf28addf892bc05902d5b8fc0e615ecd73f7c14
|
[] |
no_license
|
certara/survivalnma
|
eae3579c548617d2538bcea9753465c689b8f560
|
49ba1dcfddc6cd7b4c7ed30f0eb4f2d7712186c5
|
refs/heads/master
| 2021-07-02T12:31:16.906756
| 2020-10-13T15:20:17
| 2020-10-13T15:20:17
| 182,971,010
| 11
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
test_extract_params.R
|
context("Testing extraction of parameters")
df <- data.frame(
stringsAsFactors = FALSE,
"treatment" = c("Suni", "Ifn", "Suni", "Pazo"),
"study" = c("Study 1", "Study 1", "Study 2", "Study 2"),
"baseline" = c("Suni", "Suni", "Suni", "Suni"),
"filepath" = sapply(c("Mota_OS_Suni_KM.txt",
"Mota_OS_Ifn_KM.txt",
"Mot_OS_Suni_KM.txt",
"Mot_OS_Pazo_KM.txt"), function(x)
system.file("extdata", "narrow", x, package="survnma", mustWork=TRUE))
)
nma <- survnma(df, "weibull", min_time_change = 0.05)
test_that("Rubbish study/ treatment causes an error", {
expect_error(extract_mu(nma, "wrong study"))
expect_error(extract_d(nma, "wrong treatment"))
})
test_that("Checking format", {
expect_is(extract_mu(nma, "Study 1"), "matrix")
expect_is(extract_d(nma, "Ifn"), "matrix")
})
test_that("Testing if baseline", {
global.base <- names(which(nma$trt_labels == 1))
expect_equal(unique(c(extract_d(nma, global.base))), 0)
for(trt in nma$treatments[-1]){
expect_gt(length(unique(c(extract_d(nma, trt)))), 1)
}
expect_equal(unique(c(relative_d_in_study(nma, "Suni", "Study 2"))), 0)
expect_gt(length(unique(c(relative_d_in_study(nma, "Pazo", "Study 2")))), 1)
})
|
c295732ef4e619c1bbf5eefcc3b579b4cc9b03b8
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/networkmanager_get_transit_gateway_peering.Rd
|
d816254925b7a13ff4533a15e59cc373dfd40b8f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 598
|
rd
|
networkmanager_get_transit_gateway_peering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networkmanager_operations.R
\name{networkmanager_get_transit_gateway_peering}
\alias{networkmanager_get_transit_gateway_peering}
\title{Returns information about a transit gateway peer}
\usage{
networkmanager_get_transit_gateway_peering(PeeringId)
}
\arguments{
\item{PeeringId}{[required] The ID of the peering request.}
}
\description{
Returns information about a transit gateway peer.
See \url{https://www.paws-r-sdk.com/docs/networkmanager_get_transit_gateway_peering/} for full documentation.
}
\keyword{internal}
|
1050dba6ad66130a22557e5012503895785f1ac6
|
475856e028f1ab7e8259c8d34f02bdcaefcf4db0
|
/R/gs3.R
|
142be2c295ac41aa3a0e42f34b6ace09b9c3e49e
|
[] |
no_license
|
cran/BNPdensity
|
44ee5d2ebc9aae83b243574090d211514014a9d1
|
314c943b35f83098fa87c7b15a16757c7334d346
|
refs/heads/master
| 2023-04-07T12:52:52.293810
| 2023-03-24T14:10:02
| 2023-03-24T14:10:02
| 17,677,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,957
|
r
|
gs3.R
|
#' Conditional posterior distribution of latent U
#'
#' This function simulates from the conditional posterior distribution of the
#' latent U.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ut, n = 200, r = 20, alpha = 1, kappa = 1, gama = 1 / 2,
#' delta = 2) {
#' w <- ut
#' ratio <- NaN
#' while (is.nan(ratio)) {
#' v <- ustar <- rgamma(1, shape = delta, rate = delta / ut)
#' vw <- v / w
#' vb <- v + kappa
#' wb <- w + kappa
#' A <- vw^(n - 2 * delta)
#' B <- (vb / wb)^(r * gama - n)
#' D <- vb^gama - wb^gama
#' E <- 1 / vw - vw
#' ratio <- A * B * exp(-alpha / gama * D - delta * E)
#' }
#' p <- min(1, ratio)
#' u <- ifelse(runif(1) <= p, ustar, ut)
#' return(u)
#' }
gs3 <-
function(ut, n, r, alpha, kappa, gama, delta) {
w <- ut
ratio <- NaN
while (is.nan(ratio)) {
v <- ustar <- rgamma(1, shape = delta, rate = delta / ut)
vw <- v / w
vb <- v + kappa
wb <- w + kappa
A <- vw^(n - 2 * delta)
B <- (vb / wb)^(r * gama - n)
D <- vb^gama - wb^gama
E <- 1 / vw - vw
ratio <- A * B * exp(-alpha / gama * D - delta * E)
}
p <- min(1, ratio)
u <- ifelse(runif(1) <= p, ustar, ut)
return(u)
}
#' Target logdensity of U given the data
#'
#' @keywords internal
#'
logf_u_cond_y <- function(u, n, r, gamma, kappa, a) {
(n - 1) * log(u) + (r * gamma - n) * log(u + kappa) - a / gamma * (u + kappa)^gamma
}
#' Contribution of the target logdensity of logU to the Metropolis-Hastings ratio
#'
#' @keywords internal
#'
logf_logu_cond_y <- function(logu, n, r, gamma, kappa, a) {
logu + logf_u_cond_y(u = exp(logu), n = n, r = r, gamma = gamma, kappa = kappa, a = a)
}
#' Contribution of the proposal kernel logdensity to the Metropolis-Hastings ratio
#'
#' @keywords internal
#'
logdprop_logu <- function(logu_prime, logu, delta) {
dnorm(x = logu_prime, mean = logu, sd = delta, log = T)
}
#' Proposal distribution for logU
#'
#' This function makes a proposal for a new value of logU
#'
#' @inheritParams logacceptance_ratio_logu
#' @keywords internal
#'
rprop_logu <- function(logu, delta) {
rnorm(n = 1, mean = logu, sd = delta)
}
#' Metropolis-Hastings ratio for the conditional of logU
#'
#' This function computes the Metropolis-Hastings ratio to decide whether to accept or reject a new value for logU.
#'
#' @param logu Real, log of the latent variable U at the current iteration.
#' @param logu_prime Real, log of the new proposed latent variable U.
#' @param a Positive real. Total mass of the centering measure.
#' @inheritParams gs3_log
#'
#' @keywords internal
#'
logacceptance_ratio_logu <- function(logu, logu_prime, n, r, gamma, kappa, a, delta) {
log_ratio <- logf_logu_cond_y(logu_prime, n, r, gamma, kappa, a) - logf_logu_cond_y(logu, n, r, gamma, kappa, a) + logdprop_logu(logu, logu_prime, delta) - logdprop_logu(logu_prime, logu, delta)
return(min(0, log_ratio))
}
#' Conditional posterior distribution of latent logU
#'
#' This function simulates from the conditional posterior distribution of a log transformation of the
#' latent U.
#'
#' @param logut Real, log of the latent variable U at the current iteration.
#' @param n Integer, number of data points.
#' @param r Integer, number of clusters.
#' @param alpha Positive real. Total mass of the centering measure.
#' @param kappa Positive real. A parameter of the NRMI process.
#' @param gama Real. \eqn{0\leq \texttt{gama} \leq 1}{0 <= gama <=
#' 1}. See details.
#'
#' @param delta Scale of the Metropolis-Hastings proposal distribution
#'
#' @keywords internal
#'
gs3_log <-
function(logut, n, r, alpha, kappa, gama, delta) {
logu_prime <- rprop_logu(logu = logut, delta = delta)
logq1 <- logacceptance_ratio_logu(logu = logut, logu_prime = logu_prime, n = n, r = r, gamma = gama, kappa = kappa, a = alpha, delta = delta)
if (log(runif(n = 1)) < logq1) {
return(logu_prime)
}
else {
return(logut)
}
}
#' Conditional posterior distribution of latent U
#'
#' This function simulates from the conditional posterior distribution of the
#' latent U, with an adaptive proposal
#'
#' @keywords internal
#'
gs3_adaptive3 <- function(ut, n, r, alpha, kappa, gama, delta, U, iter, adapt = FALSE) {
target_acc_rate <- 0.44
batch_size <- 100
if (adapt && (iter %% batch_size == 0)) {
acc_rate <- length(unique(U[(iter - batch_size + 1):iter])) / batch_size
logincrement <- 2 * min(0.25, 1 / sqrt(iter))
# increment = min(0.5, 5 / sqrt(iter))
if (acc_rate < 0.44) {
delta_i <- delta * exp(-logincrement)
}
else {
delta_i <- delta * exp(+logincrement)
}
}
else {
delta_i <- delta
}
logu_prime <- gs3_log(logut = log(ut), n = n, r = r, alpha = alpha, kappa = kappa, gama = gama, delta = delta_i)
return(list(u_prime = exp(logu_prime), delta = delta_i))
}
|
c006bd77b426fab85a79a8641b6123688d2789e2
|
babe7393d6be37f3d83318673e4f57364569e21a
|
/ES_Toolkit_R/man/getAOAFeature.Rd
|
2f190bb601f4c115bc591600088cdd8c8d22ac35
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
nationalparkservice/EnvironmentalSetting_Toolkit
|
544d088ab7c087b9e57a529b358e5577c94191c3
|
7ddc44c133a458f4d78a894a564ce014efc328a8
|
refs/heads/master
| 2021-03-27T20:45:21.298338
| 2020-08-25T23:01:16
| 2020-08-25T23:01:16
| 115,563,489
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 517
|
rd
|
getAOAFeature.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilityFunctions.R
\name{getAOAFeature}
\alias{getAOAFeature}
\title{getAOAFeature}
\usage{
getAOAFeature(unitCode, aoaExtent = "km30")
}
\arguments{
\item{unitCode}{unitCode One NPS unit code as a string}
\item{aoaExtent}{aoaExtent one of park, km3 or km30 as a string. Default is "km30"}
}
\description{
Function retrieves a GeoJSON-formatted area of analysis (AOA) polygon in the
NAD83 geographic coordinate reference system (CRS).
}
|
36492fbba7a899647d18964ed289a55e401ac51c
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/11639_0/rinput.R
|
dbcf03af49cd141dcc8b897a5c7f2faa3e6a9828
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("11639_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11639_0_unrooted.txt")
|
37b9e8c88ae82721fc0bb9470236baf9ebe602e5
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkWidgetSetCanDefault.Rd
|
cd215af5778badcacb52263230a3b118e80c7c76
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 533
|
rd
|
gtkWidgetSetCanDefault.Rd
|
\alias{gtkWidgetSetCanDefault}
\name{gtkWidgetSetCanDefault}
\title{gtkWidgetSetCanDefault}
\description{Specifies whether \code{widget} can be a default widget. See
\code{\link{gtkWidgetGrabDefault}} for details about the meaning of
"default".}
\usage{gtkWidgetSetCanDefault(object, can.default)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkWidget}}}
\item{\verb{can.default}}{whether or not \code{widget} can be a default widget.}
}
\details{Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
94159a031d90439eddfb2340becbb69a54f2f400
|
8e5881985817e48d70b0dad6a6f00e0d85102f9f
|
/man/calc_roc.rfsrc.Rd
|
c9b462fc0675c5b00be338b4f20c9042a8bcfc50
|
[] |
no_license
|
timelyportfolio/ggRandomForests
|
914480189908058386c891546ba64e3673f2120e
|
32be731a01fa75d56702b6a2236b3f81ed62fc2c
|
refs/heads/master
| 2020-04-06T04:32:52.892350
| 2014-12-10T02:48:24
| 2014-12-10T02:48:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
rd
|
calc_roc.rfsrc.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{calc_roc.rfsrc}
\alias{calc_roc}
\alias{calc_roc.randomForest}
\alias{calc_roc.rfsrc}
\title{Internal Reciever Operator Characteristic calculator for randomForest objects}
\usage{
calc_roc.rfsrc(rf, dta, which.outcome = "all", oob = TRUE)
}
\arguments{
\item{rf}{\code{randomForestSRC::rfsrc} or \code{randomForestSRC::predict} object
containing predicted response}
\item{dta}{True response variable}
\item{which.outcome}{If defined, only show ROC for this response.}
\item{oob}{Use OOB estimates, the normal validation method (TRUE)}
}
\description{
Internal Reciever Operator Characteristic calculator for randomForest objects
}
\details{
Given the randomForest or randomForestSRC prediction and the actual
response value, calculate the specificity (1-False Positive Rate) and sensitivity
(True Positive Rate) of a predictor.
This is a helper function for the \code{\link{gg_roc}} functions, and not intended
for use by the end user.
}
\examples{
\dontrun{
##
## Taken from the gg_roc example
iris.obj <- rfsrc(Species ~ ., data = iris)
gg_dta <- calc_roc.rfsrc(iris.obj, iris.obj$yvar, which.outcome=1, oob=TRUE)
}
}
\seealso{
\code{\link{calc_auc}} \code{\link{gg_roc}} \code{\link{plot.gg_roc}}
}
|
33a90935147e1f5278400708410b7a693b0102bc
|
eae1c50702de5d990646885cf95d7a0bbfdf91a2
|
/R/utils-p_values.R
|
0cf9c97f8493b7eee9d45561c6bffe4deb8291e7
|
[
"MIT"
] |
permissive
|
MethodsConsultants/tibbletest
|
d0cda7ca21624291a8aa42830aaacae27d00d159
|
e23332b0087b7370860f67ffc6609a78f630fd51
|
refs/heads/master
| 2021-12-24T17:16:57.496749
| 2020-09-04T20:45:42
| 2020-09-04T20:49:18
| 191,244,403
| 0
| 0
|
NOASSERTION
| 2021-03-03T21:59:06
| 2019-06-10T20:55:50
|
R
|
UTF-8
|
R
| false
| false
| 3,561
|
r
|
utils-p_values.R
|
#' Helper function which calculates p-value via chi-square or fisher
#' Uses `survey::svychisq` for weighted tests, otherwise uses `stats::chisq.test` or `stats::fisher.test` depending on cell counts
#'
#' @param df <`tbl_df`> Dataframe that has variable and treatment columns of interest
#' @param var <`character(1)`> Name of variable column
#' @param treatment <`character(1)`> Name of treatment column
#' @param weight_var <`character(1)`> Name of variable with observation weights
#'
#' @return <`numeric(1)`> p-value
#'
#' @importFrom survey svydesign svychisq
#' @import purrr
#' @importFrom rlang !!
#'
#' @noRd
p_chi_fisher <- function(df, var, treatment, weight_var) {
if (any(df[[weight_var]] != 1)) {
df <- df %>%
tidyr::drop_na(!!var, !!treatment)
survey_obj <- svydesign(~1, data = df, weights = df[[weight_var]])
p_val <- var %>%
paste0("~", ., " + ", treatment) %>%
stats::as.formula() %>%
svychisq(design = survey_obj) %>%
pluck("p.value") %>%
as.numeric()
return(p_val)
}
chisq_wrapper <- function(var, df, treatment) {
stats::chisq.test(
x = as.factor(df[[var]]),
y = as.factor(df[[treatment]])
) %>%
pluck("p.value") %>%
as.numeric()
}
fisher_wrapper <- function(var, df, treatment) {
p_val <- stats::fisher.test(
x = as.factor(df[[var]]),
y = as.factor(df[[treatment]]),
simulate.p.value = TRUE
) %>%
pluck("p.value")
}
chisq_wrapper <- purrr::quietly(chisq_wrapper)
chisq <- chisq_wrapper(var, df, treatment)
if (length(chisq$warnings) == 0) {
return(chisq$result)
} else {
return(fisher_wrapper(var, df, treatment))
}
}
#' Helper function which calculates p-value via anova
#' Uses `survey::svyglm` and `survey::regTermTest` for weighted tests and `stats::lm` and `stats::anova` otherwise
#'
#' @inheritParams p_chi_fisher
#'
#' @return <`numeric(1)`> p-value
#'
#' @import dplyr
#' @importFrom survey svydesign svyglm regTermTest
#' @importFrom rlang !!
#'
#' @noRd
p_anova <- function(df, var, treatment, weight_var) {
if (any(df[[weight_var]] != 1)) {
df <- df %>%
tidyr::drop_na(!!var, !!treatment)
survey_obj <- svydesign(~1, data = df, weights = df[[weight_var]])
p_val <- var %>%
paste0(" ~ ", treatment) %>%
stats::as.formula() %>%
svyglm(design = survey_obj) %>%
regTermTest(
test.terms = treatment,
method = "Wald"
) %>%
purrr::pluck("p") %>%
as.numeric()
return(p_val)
}
paste0(var, " ~ ", treatment) %>%
stats::lm(data = df) %>%
stats::anova() %>%
pull(`Pr(>F)`) %>%
purrr::pluck(1)
}
#' Helper function which calculates p-value via Kruskal-Wallis
#' Uses `survey::svyranktest` for weighted tests and `stats::kruskal.test` otherwise
#'
#' @inheritParams p_chi_fisher
#'
#' @return <`numeric(1)`> p-value
#'
#' @importFrom survey svydesign svyranktest
#' @importFrom rlang !!
#'
#' @noRd
p_kruskal <- function(df, var, treatment, weight_var) {
if (any(df[[weight_var]] != 1)) {
df <- df %>%
tidyr::drop_na(!!var, !!treatment)
survey_obj <- svydesign(~1, data = df, weights = df[[weight_var]])
p_val <- var %>%
paste0(" ~ ", treatment) %>%
stats::as.formula() %>%
svyranktest(design = survey_obj) %>%
purrr::pluck("p.value") %>%
as.numeric()
return(p_val)
}
paste0(var, " ~ ", treatment) %>%
stats::as.formula() %>%
stats::kruskal.test(data = df) %>%
purrr::pluck("p.value")
}
|
2612e7e9660db9bd31c2f49541138dd0490ec34f
|
05678f03a83ce73472b1473f2d0743c9f015f2b8
|
/tests/testthat/test_polygon_geometry.R
|
6a721631e5058b880d590d9536ae05338eb1c4dc
|
[] |
no_license
|
Breeding-Insight/brapi-r-v2
|
3a7b4168c6d8516eb1128445a2f281d1199668a3
|
5cfa7453947121496780b410661117639f09c7ff
|
refs/heads/main
| 2023-03-14T22:20:29.331935
| 2021-03-17T01:31:11
| 2021-03-17T01:31:11
| 348,535,689
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
test_polygon_geometry.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test PolygonGeometry")
model.instance <- PolygonGeometry$new()
test_that("coordinates", {
# tests for the property `coordinates` (array[Array])
# An array of linear rings
# uncomment below to test the property
#expect_equal(model.instance$`coordinates`, "EXPECTED_RESULT")
})
test_that("type", {
# tests for the property `type` (character)
# The literal string \"Polygon\"
# uncomment below to test the property
#expect_equal(model.instance$`type`, "EXPECTED_RESULT")
})
|
ed084905f5e012d4f4e8ab96c755bf5f7e7fb8c1
|
96bb1dca9df61f0cddd526921f071e403169b53c
|
/R/dograph.R
|
f41fd06d33d731ac78cfdc1cc6769614426dab76
|
[] |
no_license
|
byadu/libcubolap
|
c3f8d582ca2ee1d52147022c8e2c5c640b1beafc
|
aab240405b1868dfdbf23c4f618b7fcd8e84db9f
|
refs/heads/master
| 2022-11-19T16:07:50.921510
| 2020-07-17T13:09:06
| 2020-07-17T13:09:06
| 278,988,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,153
|
r
|
dograph.R
|
#' @title dograph
#' @description create an aggregate dataframe from selected measures, dimensions and filters
#' @param M is database connection for Meta data
#' @param D is database connection fro data
#' @param measures are measures selected
#' @param dims are dimensions selected
#' @param filters are filters selected
#' @param gtype is graph type
#'
#' @import dplyr
#' @import DBI
#' @import RMySQL
#' @import libcubmeta
#' @export
#'
dograph<- function(M, D, measures, dims, filters, gtype) {
mddf<- getmddf(M, D, measures, dims)
mdd<- mddf$mdd; mdf<- mddf$mdf; measures<- mddf$measures
series<- c()
timings<- c()
for(i in 1:length(measures)) {
if(grepl("]", mdf[[i]]$md_column))
timings[i]<- system.time(s<- calcol(M, D, mdf[[i]], mdd, filters))[3]
else
timings[i]<- system.time(s<- doseries(M, D$mydata, mdf[[i]], mdd, filters))[3]
thisdxy<- s$dxy
series[i]<- s$series
if(i>1) {
dxy<- merge(dxy, thisdxy, all=T)
}
else
dxy<- thisdxy
}
gp<- setgp(mdd, mdf, series, timings, measures, dims, filters, gtype)
#gp$gfid<- addgraph(gp)
return(list(dxy=dxy, gp=gp))
}
doseries<- function(M, my_data, mdf, mdd, filters) {
# g<- isolate(rg$g)
sel<- makeaggsel(M, mdf, mdd, filters)
dxy<-dbGetQuery(my_data, sel)
my_cfg<- M$mycfg
if(ncol(dxy)<8)
series<- addxy(my_cfg, dxy)
addseries(my_cfg, series, M$uid, mdf, mdd, filters)
if(!is.null(mdd))
dimnames<- c(sapply(mdd, '[[', 'md_name'))
if(!is.null(mdf))
measnames<- mdf$md_name
colnames(dxy)<- c(dimnames, measnames)
return(list(dxy=dxy, series=series))
}
aggcol<- function(tab, col, aggfun) {
paste0(aggfun, '(', ifelse(isacol(col), paste0(tab, "."), ""), col, ')')
}
makeagg<- function(mdf, mdd) {
if(is.null(mdd))
"count(*)"
else
aggcol(mdf$md_table, mdf$md_column, mdf$md_sumcnt)
}
makeaggsel<- function(M, mdf, mdd, filters) {
sp<- olapdims(M, mdf, mdd, filters)
agg<- makeagg(mdf, mdd)
sel<- paste("select", sp$dimcols, ",", agg , "from", sp$frm , ifelse(!is.null(sp$w), paste("where", sp$w), ''), "group by", sp$dimcols)
# if(!is.null(f$having))
# sel<- paste(sel, f$having)
print(sel)
# qry$sql<- sel
sel
}
|
b2c422975ab23b0609fafdfa3d577f62c5092964
|
4abfb1910c9503593f9fb4c366edc4241371e6b6
|
/R/explore_mobloc_functions.R
|
0a828b9e771514938b7e95425b11c68cb4e6631b
|
[] |
no_license
|
mtennekes/mobloc
|
2661783dadc55f7d10622b95250486810e07ab4f
|
f4284d84437d55b7b8c048b13fe4173865c8b5d9
|
refs/heads/master
| 2020-06-20T18:04:23.754274
| 2019-05-21T10:09:37
| 2019-05-21T10:09:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,270
|
r
|
explore_mobloc_functions.R
|
move_cp_to_direction <- function(cp, distance = 100) {
cp$x2 <- cp$x + ifelse(cp$small | is.na(cp$direction), 0, (SIN(cp$direction) * distance))
cp$y2 <- cp$y + ifelse(cp$small | is.na(cp$direction), 0, (COS(cp$direction) * distance))
cp2 <- st_set_geometry(cp, NULL)
st_as_sf(cp2, coords = c("x2", "y2"), crs = st_crs(cp))
}
create_connection_lines <- function(cp1, cp2) {
c1 <- st_coordinates(cp1)
c2 <- st_coordinates(cp2)
st_sf(geometry = do.call(st_sfc, lapply(1:nrow(c1), function(i) {
co <- rbind(c1[i,],
c2[i,])
st_linestring(co)
})), cell = cp1$cell, crs = st_crs(cp1))
}
get_leafletCRS <- function(epsg) {
if (epsg == 3035) {
leafletCRS(crsClass = "L.Proj.CRS",
code='EPSG:3035',
proj4def="+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs",
resolutions = 2^(7:0))
} else {
leafletCRS(crsClass = "L.CRS.EPSG3857")
}
}
get_epsg_tiles <- function(map, epsg) {
if (epsg == 3035) {
addWMSTiles(map, "https://image.discomap.eea.europa.eu/arcgis/services/GioLandPublic/DEM/MapServer/WmsServer", layers = "Image")
} else {
addTiles(map)
}
}
base_map <- function(cp, offset, epsg) {
cp2 <- move_cp_to_direction(cp, offset)
cp_lines <- create_connection_lines(cp, cp2)
lf <- leaflet(options = leafletOptions(crs = get_leafletCRS(epsg))) %>%
addPolylines(data = cp_lines %>% st_transform(crs = 4326), color = "#777777", opacity = 1, weight = 3, group = "Cell locations") %>%
get_epsg_tiles(epsg)
}
viz_p <- function(cp, rst, var, trans, pnames, offset, rect) {
cp$sel <- factor(ifelse(cp$sel == 2, "Selected", ifelse(cp$small, "Small cell", "Normal cell")), levels = c("Selected", "Small cell", "Normal cell"))
cp2 <- move_cp_to_direction(cp, offset)
cp_lines <- create_connection_lines(cp, cp2)
pal <- colorFactor(c("red", "gray70", "gold"), levels = c("Selected", "Small cell", "Normal cell"))
if (all(is.na(rst[]))) var <- "empty"
title <- switch(var,
dBm = "Signal strength in dBm",
s = "Signal dominance - s",
bsm = "Best server map",
#lu = "Land use prior (in %)",
pag = "Connection likelihood - P(a|g)<br>(in 1 / 1,000)",
pg = "Composite prior - P(g)<br>(in 1/1,000,000)",
pga = "Location posterior - P(g|a)<br>(in 1/1,000,000)",
paste("Prior", pnames[var], " - P(g)<br>(in 1/1,000,000)"))
cls <- if (var == "dBm") {
dBm_classes
} else {
qty_classes
}
numpal <- ifelse(var %in% c("dBm", "s"), "Blues",
ifelse(var == "pga", "viridis",
ifelse(var == "pag", "Greens", "Blues")))
if (var %in% c("dBm", "s")) {
pal2 <- colorBin(cls$colors, bins = cls$breaks, na.color = "#00000000")#, dBm_classes$labels)
#rst2 <- raster::projectRaster(rst, crs = st_crs(4326)$proj4string)
rst2 <- rst
} else if (var == "bsm") {
rst2 <- raster::projectRaster(rst, crs = st_crs(3857)$proj4string, method = "ngb")
lvls <- raster::levels(rst)[[1]]
cols <- rep(RColorBrewer::brewer.pal(8, "Dark2"), length.out = nrow(lvls))
pal2 <- colorFactor(palette = cols, domain = lvls$ID, na.color = "transparent")
} else if (var != "empty") {
rst2 <- raster::projectRaster(rst, crs = st_crs(4326)$proj4string, method = "bilinear")
if (any(is.nan(rst2[]))) {
rst2 <- raster::projectRaster(rst, crs = st_crs(4326)$proj4string, method = "ngb")
}
if (var == "pag") {
values <- pmin(pmax(rst2[] * 1000, 0), 1000)
} else {
values <- pmin(pmax(rst2[] * 1000000, 0), 1000000)
}
rst2[] <- values
rng <- range(values, na.rm = TRUE)
pal2 <- colorNumeric(palette = numpal, rng, reverse = (numpal != "viridis"),
na.color = "transparent")
}
lf <- leafletProxy("map") %>%
clearMarkers() %>%
clearImages() %>%
clearControls() %>%
clearShapes()
if (offset > 0) {
lf <- lf %>%
addPolylines(data = cp_lines %>% st_transform(crs = 4326), color = "#777777", opacity = 1, weight = 3, group = "Cell locations") %>%
addCircleMarkers(data = cp2 %>% st_transform(crs = 4326), fillColor = ~pal(sel), color = "black", fillOpacity = 1, radius = 5, weight = 1, group = "Cell locations", layerId = ~cell)
} else {
lf <- lf %>%
addCircleMarkers(data = cp %>% st_transform(crs = 4326), fillColor = ~pal(sel), color = "black", fillOpacity = 1, radius = 5, weight = 1, group = "Cell locations", layerId = ~cell)
}
if (var %in% c("dBm", "s")) {
lf <- lf %>% addRasterImage(x = rst2, opacity = trans, group = title, colors = pal2) %>%
leaflet::addLayersControl(overlayGroups = c("Cell locations", title), position = "topleft", options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(colors = cls$colors, labels = cls$labels, opacity = trans, title = title)
} else if (var == "bsm") {
lf <- lf %>% addRasterImage(x = rst2, opacity = trans, group = title, colors = cols) %>%
leaflet::addLayersControl(overlayGroups = c("Cell locations", title), position = "topleft", options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(colors = cols, labels = as.character(lvls$cell), opacity = trans, title = title)
} else if (var == "empty") {
lf <- lf %>% leaflet::addLayersControl(overlayGroups = c("Cell locations"), position = "topleft")
} else {
lf <- lf %>% addRasterImage(x = rst2, opacity = trans, group = title, colors = pal2) %>%
leaflet::addLayersControl(overlayGroups = c("Cell locations", title), position = "topleft", options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(pal = pal2, values = rng, opacity = trans, title = title)
}
lf %>% addPolygons(data = rect, color = "#000000", weight = 1, fill = FALSE)
}
|
9065fa097c656ca0a6181c405b91cbc76b43ec51
|
77b40750438f66bfd1eb6b965875c75d18c7498b
|
/R/fx_modelResample.R
|
bcf527d26f2cef67bead3bc032a78efdd1d1f744
|
[] |
no_license
|
fishpm/nruPredict
|
8bd737ca31b8d6c3fbbe7a8cd1b9d5970f4305a3
|
3fd93518b2131f5dc5a77b440e4574ce7efeaeef
|
refs/heads/master
| 2022-07-16T10:53:37.590941
| 2022-07-11T15:07:42
| 2022-07-11T15:07:42
| 228,579,565
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,893
|
r
|
fx_modelResample.R
|
## * fx_modelResample (documentation)
##' @title Apply Machine Learning Framework
##' @description Apply machine learning framework to specified dataset
##'
##' @param df0 data frame including all observations (data frame)
##' @param cv.type cross-validation type ('loocv', 'ltocv', 'n-fold', 'numeric') (string)
##' @param covar list of df0 column names for "covariate" (not of specific interest) features (string/list)
##' @param voi list of df0 column names for variables/features of interest (string/list)
##' @param outcome df0 column name for outcome measure to be predicted (string)
##' @param model.type machine learning model ('rf', 'logistic', 'regression', 'rf.regression', 'svm') (string)
##' @param nresample number of resamples (numeric)
##' @param dthresh decision threshold (numeric)
##' @param z.pred standardize predictive features (boolean)
##' @param n.cores number of cores (parallel processes) (numeric/integer)
##' @param balance.col df0 column name used for ensuring balanced columns
##' @param partitions pre-defined train/test partitions
##'
##' @return A list of length five, containing the following elements:
##' \itemize{
##' \item "perfMetrics" Model performance metrics for each individual fold and "across" and "within".
##' \cr "across": sum or mean of metric across folds
##' \cr "within": mean of metric across folds
##' \item "cmat.covar": confusion matrix of covariate model (at "dthresh" decision threshold)
##'
##' \item "cmat.full": confusion matrix of full model (at "dthresh" decision threshold)
##'
##' \item "df.allfolds": data frame for test-related model predictions
##'
##' \item "parameters": list of relevant specified parameters
##' }
##'
##' @return A list of length five, containing the following elements:
##' \itemize{
##' \item "perfMetrics" Model performance metrics for each individual fold and "across" and "within".
##' \cr "across": sum or mean of metric across folds
##' \cr "within": mean of metric across folds
##' \itemize{
##' \item TP: true positive
##' \item FP: false positive
##' \item TN: true negative
##' \item FN: false negative
##' \item sens: sensitivity
##' \item spec: specificity
##' \item ppv: positive predictive value
##' \item npv: negative predictive value
##' \item acc: accuracy
##' \item auc.ROC: area under the curve of ROC curve
##' \item optThresh: optimal decision threshold determined from training data
##' }
##'
##' \item "cmat.covar": confusion matrix of covariate model (at "dthresh" decision threshold)
##'
##' \item "cmat.full": confusion matrix of full model (at "dthresh" decision threshold)
##'
##' \item "df.allfolds": data frame for test-related model predictions
##' \itemize{
##' \item orig.df.row: row in original data frame for specific observation,
##' \item fold: fold assignment
##' \item pred.prob.covar: predicted probability of class membership from covariate model
##' \item pred.prob.full: predicted probability of class membership from full model
##' \item pred.class.covar: predicted class from covariate model
##' \item pred.class.full: predicted class from full model
##' \item actual.class: actual class membership
##' }
##'
##' \item "parameters": list of relevant specified parameters
##' \itemize{
##' \item "sample.type": cross-validation sampling procedure
##' \item "class.levels": class levels
##' \item "model.type": machine learning model framework
##' \item "covar": specified covariates
##' \item "voi": specified variables of interest
##' \item "outcome": name of class being predicted
##' \item "formula.covar": formula object for covariate model
##' \item "formula.full": formula object for full model
##' \item "data.frame": data frame specified (CURRENTLY NOT CORRECTLY SPECIFIED)
##' \item "cmat.descrip": key for how to understand confusion matrices ()
##' \item "negative.class": class assigned to probability = 0
##' \item "positive.class": class assigned to probability = 1
##' \item "dthresh": decision threshold
##' \item "z.pred": whether z-scoring of features is specified
##' \item "nresample": number of resamples
##' }
##' }
## * fx_modelResample (example)
##' @examples
##' #### Generate data ####
##' n <- 100
##'
##' set.seed(1)
##' group <- factor(sample(c('MDD','HC'),n,replace=T))
##' age <- rnorm(n,25,5)
##' sex <- factor(sample(c('male','female'),n,replace=T))
##' rand.vals1 <- rnorm(n,0,0.75)
##' set.seed(2)
##' rand.vals2 <- rnorm(n,0,0.75)
##' dd <- data.frame(group = group,
##' age = age,
##' sex = sex,
##' f1 = rand.vals1 + as.numeric(group),
##' f2 = rand.vals2)
##'
##' #### MODEL EXAMPLE 1 #####
##' ## covariates
##' covar <- c('age','sex')
##' ## variables of interest
##' voi <- c('f1','f2')
##' ## class outcome
##' y <- 'group'
##'
##' ## resamples and permutations
##' nresample <- 10
##' nperm <- 10
##' n.cores <- 1 ## 10
##'
##' ## fit classification model
##' modelObj <- fx_modelResample(df0 = dd,
##' cv.type = '5-fold',
##' covar = covar,
##' voi = voi,
##' outcome = y,
##' model.type = 'rf',
##' nresample = nresample,
##' dthresh = 0.5,
##' z.pred = F,
##' balance.col = y,
##' n.cores = n.cores)
##'
##' ## determine overall model performance
##' modelPerfObj <- fx_modelResamplePerf(modelResampleObj = modelObj)
##' ## permutation testing
##' permObj <- fx_perm(df0 = dd, modelObj = modelObj, nperm = nperm, n.cores = n.cores)
##' ## determine permutation test performance
##' permPerfObj <- fx_permPerf(permObj = permObj, modelResamplePerf = modelPerfObj)
##'
##' ## Summary of performance measures based on observed data
##' modelPerfObj$df.summary
##' ## Outcome metrics for each resample
##' modelPerfObj$df.iter
##' ## Summary of permutation test outcomes
##' permPerfObj$df.summary
##' ## Outcome metrics for each permutation
##' permPerfObj$df.iter
##' ## create roc curve plot
##' fx_rocPlot(modelObj = modelObj, modelPerfObj = modelPerfObj, permPerfObj = permPerfObj, title.text = 'My Title')
## * fx_modelResample (code)
##' @export
fx_modelResample <- function(df0, cv.type = NULL, covar = NULL, voi = NULL, outcome = NULL, model.type = NULL, nresample = 1, dthresh = 0.5, z.pred = F, n.cores = 20, balance.col = NULL, partitions = NULL){
# For visual update on progress
updateMarks <- seq(from = 0, to = nresample, length.out = 11)
# only one resample if loocv
if(cv.type == 'loocv'){
nresample <- 1
writeLines('LOOCV - resetting nresamples to 1...')
} else {
writeLines('Generating resample results...')
}
# fit model object for each resample
modelResamplePerfObj <- lapply(seq(nresample), function(j){
# update on progress
if (j%in%updateMarks){
writeLines(paste0('\tResample: ', j, ' (', (j/nresample)*100, '% complete)'))
}
# partition data in to folds
if(is.null(partitions)){
partition.list <- fx_partition(df0, type = cv.type, balance.col = balance.col)
} else {
partition.list <- partitions[[j]]
}
# apply machine learning framework
modelObj <- parallel::mclapply(seq(length(partition.list)), function(i){
fx_model(fx_sample(df0,partition.list[[i]]),
covar = covar,
voi = voi,
outcome = outcome,
model.type = model.type,
z.pred = z.pred)},
mc.cores = n.cores)
# summarize model performance
modelPerfObj <- fx_modelPerf(modelObj, dthresh=dthresh)
# parameters saved only once and as it's own list element
modelPerfObj$parameters <- NULL
return(modelPerfObj)
})
# run model once to extract parameter information
partition.list <- fx_partition(df0, type = cv.type, balance.col = balance.col)
modelObj <- parallel::mclapply(seq(length(partition.list)), function(i){
fx_model(fx_sample(df0,partition.list[[i]]),
covar = covar,
voi = voi,
outcome = outcome,
model.type = model.type)},
mc.cores = n.cores)
modelPerfObj <- fx_modelPerf(modelObj, dthresh=dthresh)
# update parameter information
parameters <- modelPerfObj$parameters
parameters$z.pred <- z.pred
parameters$nresample <- nresample
writeLines('Model fitting completed!')
return(list(modelResamplePerfObj = modelResamplePerfObj,
parameters = parameters))
}
|
fad794a830d399d4ea0d46eb507c41a458475622
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/geotopbricks/R/getProjection.R
|
87491a165196b9a35398ed11a0be598f97b1944b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
getProjection.R
|
NULL
#'
#' It reads the CRS metadata utilzed in a GEOtop Simulation
#'
#' @param x name and full path of the file containimg CRS information
#' @param cond logical value. If \code{FALSE} the function returns \code{NA}. Default is \code{TRUE}.
#' @param ... futher arguments
#'
#' @export
#' @return A string corresponding the projection and CRS if the argument \code{cond} is \code{TRUE}.
#' @examples
#' library(geotopbricks)
#' wpath <- "http://www.rendena100.eu/public/geotopbricks/simulations/idroclim_test1"
#' x <- paste(wpath,"geotop.proj",sep="/")
#'
#'
#' crs <- getProjection(x)
#'
getProjection <- function(x,cond=TRUE,...) {
out <- NA
open <- FALSE
if (cond) {
out <- as.character(scan(x,what="list",sep="\n",n=1))
}
return(out)
}
|
47f9ff8101549a1192ad19ba732cc5fe8fe51f2f
|
47f66f15615e8e4ad20d650cf6d06036dcd6bd6c
|
/ancestral_populations_on_map.R
|
16ecf77d9aa33b6bceca6659f84be7f4085584b2
|
[] |
no_license
|
marclagu/ddRADseq_brown_trout_Iceland
|
8201c740800cd66b93858c95e9ab490a9d8ec532
|
00721d7e1afcf286731669386f95acddfe4a6f2f
|
refs/heads/main
| 2023-04-15T21:35:46.455476
| 2023-01-24T19:12:08
| 2023-01-24T19:12:08
| 589,250,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,555
|
r
|
ancestral_populations_on_map.R
|
# calculating the number of ancestral populations
library(LEA)
vcf2geno("olfusa.filtered.vcf", "olfusa.geno")
pc = pca("olfusa.geno", scale = TRUE)
tw = tracy.widom(pc)
tw$pvalues[1:5]
plot(tw$percentage)
project = NULL
project = snmf("olfusa.geno",
K = 6:11,
entropy = TRUE,
repetitions = 2500,
seed = 1236,
CPU = 4,
alpha = 100,
project = "new")
par(family="Times")
plot(project, col = "#d73027", pch = 19, cex = 1.2)
library(dplyr)
# selecting the best run for K = 8
best = which.min(cross.entropy(project, K = 8))
my.colors <- c("#ffffbf", "#abd9e9", "#313695", "#f46d43",
"#d73027", "#a50026", "#fee090", "#74add1")
barchart(project, K = 8, run = best,
border = NA, space = 0, sort.by.Q = TRUE,
col = my.colors,
xlab = "Individuals",
ylab = "Ancestry proportions",
main = "Ancestry matrix (K=8)")
# exporting matrix
matrix8 <- Q(project, K=8, run=best)
write.csv(matrix8, "matrix8")
#######################################################################################
# plotting ancestral populations on map
par(mfrow=c(1,1))
# spatial packages
library(raster)
library(rgeos)
library(rgdal)
library(ggmap)
library(sp)
library(terra)
# colors
library(colorspace)
# Read in the shapefile (obtained from the Cartographic Service of Iceland at https://www.lmi.is/)
IS1<-readOGR(dsn="./Maps/", layer="IS1")
IS2<-readOGR(dsn="./Maps/", layer="IS2")
IS3<-readOGR(dsn="./Maps/", layer="IS3")
zones_clipped_1 <- raster::crop(IS1, extent(1569500,1650000,170000,230000))
zones_clipped_2 <- raster::crop(IS2, extent(1569500,1650000,170000,230000))
zones_clipped_3 <- raster::crop(IS3, extent(1569500,1650000,170000,230000))
qmatrix = Q(project, K=8, run=best)
popmap <- read.table("popmap.olfusa.clean.QGIS.tsv", header=TRUE)
k<- data.frame(qmatrix)
k$ID <- popmap$POP
k %>%
group_by(k$ID) %>%
summarize(mean_V1 = mean(V1, na.rm=TRUE),
mean_V2 = mean(V2, na.rm=TRUE),
mean_V3 = mean(V3, na.rm=TRUE),
mean_V4 = mean(V4, na.rm=TRUE),
mean_V5 = mean(V5, na.rm=TRUE),
mean_V6 = mean(V6, na.rm=TRUE),
mean_V7 = mean(V7, na.rm=TRUE),
mean_V8 = mean(V8, na.rm=TRUE))
k_summary <- k %>%
group_by(k$ID) %>%
summarize(mean_V1 = mean(V1, na.rm=TRUE),
mean_V2 = mean(V2, na.rm=TRUE),
mean_V3 = mean(V3, na.rm=TRUE),
mean_V4 = mean(V4, na.rm=TRUE),
mean_V5 = mean(V5, na.rm=TRUE),
mean_V6 = mean(V6, na.rm=TRUE),
mean_V7 = mean(V7, na.rm=TRUE),
mean_V8 = mean(V8, na.rm=TRUE))
coord <- data.frame(popmap$POP, popmap$LAT, popmap$LONG)
coord_summary <- coord %>%
group_by(popmap.POP) %>%
summarize(mean_LAT = mean(popmap.LAT, na.rm=TRUE),
mean_LONG = mean(popmap.LONG, na.rm=TRUE))
k_number <- k %>%
group_by(k$ID) %>%
tally()
k_summary$number <- k_number$n
data <- data.frame(k_summary)
data$LAT <- coord_summary$mean_LAT
data$LONG <- coord_summary$mean_LONG
y<- data$LAT
x<- data$LONG
data$lon=x
data$lat=y
coordinates(data) <- c("lon", "lat")
proj4string(data) <- CRS("+init=epsg:4326") # WGS 84
CRS.new <- CRS("+proj=lcc +lat_1=64.25 +lat_2=65.75 +lat_0=65 +lon_0=-19 +x_0=1700000 +y_0=300000 +ellps=GRS80 +units=m +no_defs")
d <- spTransform(data, CRS.new)
d1 <- data.frame(d)
par(mai=c(0.1,0.1,0.1,0.1))
plot(zones_clipped_1, yaxs = 'i', xaxs = 'i', lwd=2)
plot(zones_clipped_2, add=T, col="light gray",border="dark gray", yaxs = 'i', xaxs = 'i')
plot(zones_clipped_3, add=T, col="light gray", yaxs = 'i', xaxs = 'i')
plot(d, add=TRUE, pch=1, cex=0.01, alpha=0)
library(maps)
library(plotrix)
library(scales)
library(seqinr)
points(d1$lon, d1$lat, cex = d1$number/1800, col='white', pch=19)
my.colors <- c("#ffffbf", "#abd9e9", "#313695", "#f46d43",
"#d73027", "#a50026", "#fee090", "#74add1")
for (i in 1:(2)) {my.colors[i]<-col2alpha(color=my.colors[i], alpha=1)}
#for same size of pies
for (x in 1:nrow(d)){floating.pie(d1$lon[x],d1$lat[x], c(d1$mean_V1[x],d1$mean_V2[x],d1$mean_V3[x],d1$mean_V4[x],
d1$mean_V5[x], d1$mean_V6[x],d1$mean_V7[x],d1$mean_V8[x]),
radius=1200, col =my.colors)}
|
32790b8002f2646d573185b2779dd57c4185ec68
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/genoPlotR/examples/plot_gene_map.Rd.R
|
314e7c38bad91c6476a6121ed8456db7d16b7069
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,191
|
r
|
plot_gene_map.Rd.R
|
library(genoPlotR)
### Name: plot_gene_map
### Title: Plot gene and genome maps
### Aliases: plot_gene_map
### Keywords: hplot
### ** Examples
old.par <- par(no.readonly=TRUE)
data("three_genes")
## Segments only
plot_gene_map(dna_segs=dna_segs)
## With comparisons
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons)
## Tree
names <- c("A_aaa", "B_bbb", "C_ccc")
names(dna_segs) <- names
tree <- newick2phylog("(((A_aaa:4.2,B_bbb:3.9):3.1,C_ccc:7.3):1);")
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
tree=tree)
## Increasing tree width
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
tree=tree, tree_width=3)
## Annotations on the tree
tree2 <- newick2phylog("(((A_aaa:4.2,B_bbb:3.9)97:3.1,C_ccc:7.3)78:1);")
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
tree=tree2, tree_width=3)
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
tree=tree2, tree_width=3, tree_branch_labels_cex=0.5)
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
tree=tree2, tree_width=3, tree_branch_labels_cex=0)
## Annotation
## Calculating middle positions
mid_pos <- middle(dna_segs[[1]])
# Create first annotation
annot1 <- annotation(x1=mid_pos, text=dna_segs[[1]]$name)
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons, annotations=annot1)
## Exploring options
annot2 <- annotation(x1=c(mid_pos[1], dna_segs[[1]]$end[2]),
x2=c(NA, dna_segs[[1]]$end[3]),
text=c(dna_segs[[1]]$name[1], "region1"),
rot=c(30, 0), col=c("grey", "black"))
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
annotations=annot2, annotation_height=1.3)
## xlims
## Just returning a segment
plot_gene_map(dna_segs, comparisons,
xlims=list(NULL, NULL, c(Inf,-Inf)),
dna_seg_scale=TRUE)
## Removing one gene
plot_gene_map(dna_segs, comparisons,
xlims=list(NULL, NULL, c(-Inf,2800)),
dna_seg_scale=TRUE)
## offsets
offsets <- c(0, 0, 0)
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons, offsets=offsets)
offsets <- c(200, 400, 0)
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons, offsets=offsets)
## main
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
main="Comparison of A, B and C")
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
main="Comparison of A, B and C", main_pos="left")
## dna_seg_labels
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
dna_seg_labels=c("Huey", "Dewey", "Louie"))
## dna_seg_labels size
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
dna_seg_labels=c("Huey", "Dewey", "Louie"),
dna_seg_label_cex=2)
## dna_seg_line
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
dna_seg_line=c("FALSE", "red", grey(0.6)))
## gene_type
plot_gene_map(dna_segs=dna_segs, comparisons=comparisons,
gene_type="side_blocks")
##
## From here on, using a bigger dataset from a 4-genome comparison
##
data("barto")
## Adding a tree
tree <- newick2phylog("(BB:2.5,(BG:1.8,(BH:1,BQ:0.8):1.9):3);")
## Showing only subsegments
xlims1 <- list(c(1380000, 1445000),
c(10000, 83000),
c(15000, 98000),
c(5000, 82000))
## Reducing dataset size for speed purpose
for (i in 1:length(barto$dna_segs)){
barto$dna_segs[[i]] <- trim(barto$dna_segs[[i]], xlim=xlims1[[i]])
if (i < length(barto$dna_segs))
barto$comparisons[[i]] <- trim(barto$comparisons[[i]],
xlim1=xlims1[[i]], xlims1[[i+1]])
}
plot_gene_map(barto$dna_segs, barto$comparisons, tree=tree,
xlims=xlims1,
dna_seg_scale=TRUE)
## Showing several subsegments per genome
xlims2 <- list(c(1445000, 1415000, 1380000, 1412000),
c( 10000, 45000, 50000, 83000, 90000, 120000),
c( 15000, 36000, 90000, 120000, 74000, 98000),
c( 5000, 82000))
## dna_seg_scale, global_color_scheme, size, number, color of dna_seg_scale,
## size of dna_seg_scale labels
plot_gene_map(barto$dna_segs, barto$comparisons, tree=tree, xlims=xlims2,
dna_seg_scale=c(TRUE, FALSE, FALSE, TRUE), scale=FALSE,
dna_seg_label_cex=1.7,
dna_seg_label_col=c("black", "grey", "blue", "red"),
global_color_scheme=c("e_value", "auto", "grey", "0.7"),
n_scale_ticks=3, scale_cex=1)
## Hand-made offsets: size of all gaps
offsets2 <- list(c(10000, 10000),
c(2000, 2000, 2000),
c(10000, 5000, 2000),
c(10000))
plot_gene_map(barto$dna_segs, barto$comparisons, tree=tree,
#annotations=annots,
xlims=xlims2,
offsets=offsets2,
dna_seg_scale=TRUE)
##
## Exploring and modifying a previously plotted gene map plot
##
## View viewports
current.vpTree()
## Go down to one of the viewports, add an xaxis, go back up to root viewport
downViewport("dna_seg_scale.3.2")
grid.rect()
upViewport(0)
## Get all the names of the objects
grobNames <- getNames()
grobNames
## Change the color ot the scale line
grid.edit("scale.lines", gp=gpar(col="grey"))
## Remove first dna_seg_lines
grid.remove("dna_seg_line.1.1")
##
## Plot genoPlotR logo
##
col <- c("#B2182B", "#D6604D", "#F4A582", "#FDDBC7",
"#D1E5F0", "#92C5DE", "#4393C3", "#2166AC")
cex <- 2.3
## First segment
start1 <- c(150, 390, 570)
end1 <- c( 1, 490, 690)
genoR <- c(270, 530)
## Second segment
start2 <- c(100, 520, 550)
end2 <- c(240, 420, 650)
Plot <- c(330)
## dna_segs
ds1 <- as.dna_seg(data.frame(name=c("", "", ""),
start=start1, end=end1, strand=rep(1, 3),
col=col[c(2, 6, 1)], stringsAsFactor=FALSE))
ds_genoR <- as.dna_seg(data.frame(name=c("geno", "R"),
start=genoR, end=genoR, strand=rep(1, 2),
col=c(col[8], "black"),
stringsAsFactor=FALSE), cex=cex, gene_type="text")
ds2 <- as.dna_seg(data.frame(name=c("", "", ""),
start=start2, end=end2, strand=rep(1, 3),
col=col[c(5, 3, 7)],
stringsAsFactor=FALSE))
ds_Plot <- as.dna_seg(data.frame(name="Plot",
start=Plot, end=Plot, strand=1,
col=col[c(1)],
stringsAsFactor=FALSE), cex=cex, gene_type="text")
## comparison
c1 <- as.comparison(data.frame(start1=start1, end1=end1,
start2=start2, end2=end2,
col=grey(c(0.6, 0.8, 0.5))))
## Generate genoPlotR logo
## Not run:
##D cairo_pdf("logo.pdf", h=0.7, w=3)
## End(Not run)
par(fin=c(0.7, 3))
plot_gene_map(dna_segs=list(c(ds1, ds_genoR), c(ds2, ds_Plot)),
comparisons=list(c1), scale=FALSE, dna_seg_scale=FALSE,
dna_seg_line=grey(0.7), offsets=c(-20,160))
## Not run:
##D dev.off()
## End(Not run)
par(old.par)
|
f570e5a7b12219166a7806e4c2c98a41c37b3fa1
|
ca31854a104eeed212c105488dde400f92d056fc
|
/tests/testthat/test-term_stats.R
|
d088dcc50798059d6352a3b7cdb2a0a68d2c13b8
|
[
"Apache-2.0"
] |
permissive
|
cran/corpus
|
ceb57516441e57a2a593cc2e112ee3029da97e33
|
59d4da68aa5275821b48ecfcc21636f398459df5
|
refs/heads/master
| 2021-07-12T10:57:59.928640
| 2021-05-02T03:30:04
| 2021-05-02T03:30:04
| 88,405,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,773
|
r
|
test-term_stats.R
|
context("term_stats")
test_that("'term_stats' works", {
expect_equal(term_stats("A rose is a rose is a rose."),
structure(data.frame(term = c("a", "rose", "is", "."),
count = c(3, 3, 2, 1),
support = c(1, 1, 1, 1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can use a filter", {
f <- text_filter(drop_punct = TRUE, drop = stopwords_en)
expect_equal(term_stats("A rose is a rose is a rose.", f),
structure(data.frame(term = c("rose"),
count = c(3),
support = c(1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can count ngrams", {
expect_equal(term_stats("A rose is a rose is a rose.", ngrams = 2),
structure(data.frame(term = c("a rose", "is a", "rose is",
"rose ."),
count = c(3, 2, 2, 1),
support = c(1, 1, 1, 1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can count ngrams above count_min", {
expect_equal(term_stats("A rose is a rose is a rose.", ngrams = 2,
min_count = 2),
structure(data.frame(term = c("a rose", "is a", "rose is"),
count = c(3, 2, 2),
support = c(1, 1, 1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can count ngrams above support_min", {
expect_equal(term_stats(c("A rose is a rose is a rose.", "Rose Red"),
ngrams = 1,
min_support = 2),
structure(data.frame(term = c("rose"),
count = c(4),
support = c(2),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can output types", {
expect_equal(term_stats("A rose is a rose is a rose.", ngrams = 2,
min_count = 2, types = TRUE),
structure(data.frame(term = c("a rose", "is a", "rose is"),
type1 = c("a", "is", "rose"),
type2 = c("rose", "a", "is"),
count = c(3, 2, 2),
support = c(1, 1, 1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' can select terms", {
expect_equal(term_stats("A rose is a rose is a rose.",
subset = term %in% c("rose", "a")),
structure(data.frame(term = c("a", "rose"),
count = c(3, 3),
support = c(1, 1),
stringsAsFactors = FALSE),
class = c("corpus_frame", "data.frame")))
})
test_that("'term_stats' errors for invalid 'subset' argument", {
expect_error(term_stats("A rose is a rose is a rose.", subset = "rose"),
"'subset' must be logical")
})
test_that("'term_stats' errors for invalid 'count', 'support' arguments", {
expect_error(term_stats("hello", min_count = c(1, 2)),
"'min_count' must have length 1")
expect_error(term_stats("hello", max_count = NA),
"'max_count' must be a numeric value (or NULL)", fixed = TRUE)
})
test_that("'term_stats' errors for invalid 'ngrams' argument", {
expect_error(term_stats("hello", ngrams = "1"),
"'ngrams' must be NULL or an integer vector")
expect_error(term_stats("hello", ngrams = c(NA, 1)),
"'ngrams' entries must be positive integer values")
expect_error(term_stats("hello", ngrams = c(1, 0)),
"'ngrams' entries must be positive integer values")
expect_error(term_stats("hello", ngrams = 128),
"'ngrams' entries must be below 128")
expect_error(term_stats("hello", ngrams = integer()),
"'ngrams' argument cannot have length 0")
})
|
c3477003da05e9c07b7f9d3f79a2eb026905ed22
|
b9db037ee7bc2ebf9c228ad1f66fecabccfa70be
|
/man/add_default_decisions.Rd
|
090e30c9739c14029ad12667275d6598d78c0739
|
[] |
no_license
|
IsaakBM/prioritizr
|
924a6d8dcc7c8ff68cd7f5a2077de2fa1f300fe7
|
1488f8062d03e8736de74c9e7803ade57d6fcc29
|
refs/heads/master
| 2020-12-10T06:23:19.437647
| 2019-12-22T00:04:20
| 2019-12-22T00:04:20
| 233,524,401
| 1
| 0
| null | 2020-01-13T06:13:19
| 2020-01-13T06:13:18
| null |
UTF-8
|
R
| false
| true
| 545
|
rd
|
add_default_decisions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_default_decisions.R
\name{add_default_decisions}
\alias{add_default_decisions}
\title{Add default decisions}
\usage{
add_default_decisions(x)
}
\arguments{
\item{x}{\code{\link{ConservationProblem-class}} object.}
}
\description{
This function adds the default decision types to a conservation planning
\code{\link{problem}}. The default types are binary and are added using
the \code{\link{add_binary_decisions}} function.
}
\seealso{
\code{\link{decisions}}.
}
|
27401062227dedeeead31cb4ac17d64ce2a735cf
|
42c5613984794b9b9c08b792e6a1b91772613495
|
/R/anova2x2_se.R
|
519473a6fb49d71c5991128717f1659c1ec257cc
|
[
"MIT"
] |
permissive
|
chrisaberson/pwr2ppl
|
87b5c8ca9af5081613d8a49c76a9fea9cdd5de12
|
06a0366cf87710cb79ef45bdc6535fd4d288da51
|
refs/heads/master
| 2022-09-28T13:57:48.675573
| 2022-09-05T23:35:22
| 2022-09-05T23:35:22
| 54,674,476
| 16
| 7
| null | 2019-03-29T16:55:16
| 2016-03-24T21:10:28
|
R
|
UTF-8
|
R
| false
| false
| 5,868
|
r
|
anova2x2_se.R
|
#'Compute power for Simple Effects in a Two by Two Between Subjects ANOVA with two levels for each factor.
#'Takes means, sds, and sample sizes for each group. Alpha is .05 by default, alternative values may be entered by user
#'@param m1.1 Cell mean for First level of Factor A, First level of Factor B
#'@param m1.2 Cell mean for First level of Factor A, Second level of Factor B
#'@param m2.1 Cell mean for Second level of Factor A, First level of Factor B
#'@param m2.2 Cell mean for Second level of Factor A, Second level of Factor B
#'@param s1.1 Cell standard deviation for First level of Factor A, First level of Factor B
#'@param s1.2 Cell standard deviation for First level of Factor A, Second level of Factor B
#'@param s2.1 Cell standard deviation for Second level of Factor A, First level of Factor B
#'@param s2.2 Cell standard deviation for Second level of Factor A, Second level of Factor B
#'@param n1.1 Cell sample size for First level of Factor A, First level of Factor B
#'@param n1.2 Cell sample size for First level of Factor A, Second level of Factor B
#'@param n2.1 Cell sample size for Second level of Factor A, First level of Factor B
#'@param n2.2 Cell sample size for Second level of Factor A, Second level of Factor B
#'@param alpha Type I error (default is .05)
#'examples
#'anova2x2_se(m1.1=0.85, m1.2=0.85, m2.1=0.00, m2.2=0.60,
#'s1.1=1.7, s1.2=1.7, s2.1=1.7, s2.2=1.7,
#'n1.1=250, n1.2=250, n2.1=250, n2.2=250, alpha=.05)
#'@return Power for Simple Effects Tests in a Two By Two ANOVA
#'@export
#'
#'
anova2x2_se<-function(m1.1=NULL,m1.2=NULL,m2.1=NULL,m2.2=NULL, s1.1=NULL,s1.2=NULL,s2.1=NULL,s2.2=NULL,
n1.1=NULL,n1.2=NULL,n2.1=NULL,n2.2=NULL, alpha=.05){
oldoption<-options(contrasts=c("contr.helmert", "contr.poly"))
oldoption
on.exit(options(oldoption))
x<-stats::rnorm(n1.1,m1.1,s1.1)
X<-x
MEAN<-m1.1
SD<-s1.1
Z <- (((X - mean(X, na.rm = TRUE))/stats::sd(X, na.rm = TRUE))) * SD
y<-MEAN + Z
A<-rep("A1",n1.1)
B<-rep("B1",n1.1)
l1.1<-data.frame(y, A, B)
x<-stats::rnorm(n1.2,m1.2,s1.2)
X<-x
MEAN<-m1.2
SD<-s1.2
Z <- (((X - mean(X, na.rm = TRUE))/stats::sd(X, na.rm = TRUE))) * SD
y<-MEAN + Z
A<-rep("A1",n1.2)
B<-rep("B2",n1.2)
l1.2<-data.frame(y, A, B)
x<-stats::rnorm(n2.1,m2.1,s2.1)
X<-x
MEAN<-m2.1
SD<-s2.1
Z <- (((X - mean(X, na.rm = TRUE))/stats::sd(X, na.rm = TRUE))) * SD
y<-MEAN + Z
A<-rep("A2",n2.1)
B<-rep("B1",n2.1)
l2.1<-data.frame(y, A, B)
x<-stats::rnorm(n2.2,m2.2,s2.2)
X<-x
MEAN<-m2.2
SD<-s2.2
Z <- (((X - mean(X, na.rm = TRUE))/stats::sd(X, na.rm = TRUE))) * SD
y<-MEAN + Z
A<-rep("A2",n2.2)
B<-rep("B2",n2.2)
l2.2<-data.frame(y, A, B)
simdat<-rbind(l1.1,l1.2,l2.1,l2.2)
dataA1<-subset(simdat, A=="A1")
dataA2<-subset(simdat, A=="A2")
dataB1<-subset(simdat, B=="B1")
dataB2<-subset(simdat, B=="B2")
options(contrasts=c("contr.sum", "contr.poly"))
anova<-stats::aov(y~A*B, data=simdat)
anova<-car::Anova(anova, type="III")
SSwin<-anova[5,1] #row column
dfwin<-anova[5,2]
SSA<-anova[2,1] #column, row
SSB<-anova[3,1]
SSAB<-anova[4,1]
SST<-SSA+SSB+SSAB+SSwin
MSwin<-SSwin/dfwin
options(contrasts=c("contr.sum", "contr.poly"))
anoAatB1<-stats::aov(y~A, data=dataB1)
anoAatB1<-car::Anova(anoAatB1, type="III")
options(contrasts=c("contr.sum", "contr.poly"))
anoAatB2<-stats::aov(y~A, data=dataB2)
anoAatB2<-car::Anova(anoAatB2, type="III")
options(contrasts=c("contr.sum", "contr.poly"))
anoBatA1<-stats::aov(y~B, data=dataA1)
anoBatA1<-car::Anova(anoBatA1,type="III")
options(contrasts=c("contr.sum", "contr.poly"))
anoBatA2<-stats::aov(y~B, data=dataA2)
anoBatA2<-car::Anova(anoBatA2, type="III")
dfwinSE<-dfwin+2
SSBatA1<-anoBatA1[2,1]
dfBatA1<-anoBatA1[2,2]
eta2BatA1<-SSBatA1/SST
f2BatA1<-eta2BatA1/(1-eta2BatA1)
lambdaBatA1<-f2BatA1*dfwinSE
minusalpha<-1-alpha
FtBatA1<-stats::qf(minusalpha, dfBatA1, dfwinSE)
power.BatA1<-round(1-stats::pf(FtBatA1, dfBatA1,dfwinSE,lambdaBatA1),3)
SSBatA2<-anoBatA2[2,1]
dfBatA2<-anoBatA2[2,2]
eta2BatA2<-SSBatA2/SST
f2BatA2<-eta2BatA2/(1-eta2BatA2)
lambdaBatA2<-f2BatA2*dfwinSE
FtBatA2<-stats::qf(minusalpha, dfBatA2, dfwinSE)
power.BatA2<-round(1-stats::pf(FtBatA2, dfBatA2,dfwinSE,lambdaBatA2),3)
SSAatB1<-anoAatB1[2,1]
dfAatB1<-anoAatB1[2,2]
dfwinAat<-anoAatB1[3,2]
eta2AatB1<-SSAatB1/SST
f2AatB1<-eta2AatB1/(1-eta2AatB1)
lambdaAatB1<-f2AatB1*dfwinSE
FtAatB1<-stats::qf(minusalpha, dfAatB1, dfwinSE)
power.AatB1<-round(1-stats::pf(FtAatB1, dfAatB1,dfwinSE,lambdaAatB1),3)
SSAatB2<-anoAatB2[2,1]
dfAatB2<-anoAatB2[2,2]
eta2AatB2<-SSAatB2/SST
f2AatB2<-eta2AatB2/(1-eta2AatB2)
lambdaAatB2<-f2AatB2*dfwinSE
FtAatB2<-stats::qf(minusalpha, dfAatB2, dfwinSE)
power.AatB2<-round(1-stats::pf(FtAatB2, dfAatB2,dfwinSE,lambdaAatB2),3)
message("Simple Effect Comparing M = ",m1.1, " and M = ", m2.1,". Power = ", power.AatB1)
message("Simple Effect Comparing M= ",m1.2, " and M = ", m2.2,". Power = ", power.AatB2)
message("Simple Effect Comparing M = ",m1.1, " and M = ", m1.2,". Power = ", power.BatA1)
message("Simple Effect Comparing M = ",m2.1, " and M = ", m2.2,". Power = ", power.BatA2)
result <- data.frame(matrix(ncol = 8))
colnames(result) <- c("Eta-squared A at B1","Power A at B1","Eta-squared A at B2","Power A at B2","Eta-squared B at A1","Power B at A1","Eta-squared B at A2","Power B at A2")
result[, 1]<-eta2AatB1
result[, 2]<-power.AatB1
result[, 3]<-eta2AatB2
result[, 4]<-power.AatB2
result[, 5]<-eta2BatA1
result[, 6]<-power.BatA1
result[, 7]<-eta2BatA2
result[, 8]<-power.BatA2
output<-na.omit(result)
rownames(output)<- c()
invisible(output)
}
|
c7848125643569965de5b8d23d27bd94769066fd
|
3db6b4a85d65321bebfa78741727dd441427d57f
|
/inst/doc/Connecting.R
|
61194c527a6fc2e6d5e1ebbe7d950bca84533c3f
|
[
"Apache-2.0"
] |
permissive
|
cran/DatabaseConnector
|
de2d3326fbd71248239b6b38ad2fa0e2eb339389
|
d4e73726e428abed496caeb470c061e2f8542fc6
|
refs/heads/master
| 2023-07-05T06:55:52.762538
| 2023-06-29T21:40:11
| 2023-06-29T21:40:11
| 134,714,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,711
|
r
|
Connecting.R
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
library(DatabaseConnector)
## ----eval=FALSE---------------------------------------------------------------
# Sys.setenv("DATABASECONNECTOR_JAR_FOLDER" = "c:/temp/jdbcDrivers")
## ----eval=FALSE---------------------------------------------------------------
# install.packages("usethis")
# usethis::edit_r_environ()
## ----eval=FALSE---------------------------------------------------------------
# Sys.setenv("DATABASECONNECTOR_JAR_FOLDER" = "c:/temp/jdbcDrivers")
## ----eval=FALSE---------------------------------------------------------------
# downloadJdbcDrivers("postgresql")
## ----echo=FALSE---------------------------------------------------------------
writeLines("DatabaseConnector JDBC drivers downloaded to 'c:/temp/jdbcDrivers'.")
## ----eval=FALSE---------------------------------------------------------------
# install.packages("RSQLite")
## ----eval=FALSE---------------------------------------------------------------
# conn <- connect(dbms = "postgresql",
# server = "localhost/postgres",
# user = "joe",
# password = "secret")
## ----echo=FALSE---------------------------------------------------------------
writeLines("Connecting using PostgreSQL driver")
## ----eval=FALSE---------------------------------------------------------------
# disconnect(conn)
## ----eval=FALSE---------------------------------------------------------------
# conn <- connect(dbms = "postgresql",
# connectionString = "jdbc:postgresql://localhost:5432/postgres",
# user = "joe",
# password = "secret")
## ----echo=FALSE---------------------------------------------------------------
writeLines("Connecting using PostgreSQL driver")
## ----eval=FALSE---------------------------------------------------------------
# details <- createConnectionDetails(dbms = "postgresql",
# server = "localhost/postgres",
# user = "joe",
# password = "secret")
# conn <- connect(details)
## ----echo=FALSE---------------------------------------------------------------
writeLines("Connecting using PostgreSQL driver")
## -----------------------------------------------------------------------------
conn <- connect(dbms = "sqlite", server = tempfile())
# Upload cars dataset as table:
insertTable(connection = conn,
tableName = "cars",
data = cars)
querySql(conn, "SELECT COUNT(*) FROM main.cars;")
disconnect(conn)
|
3d9644e866b360ac44cf90d70dbd8421dc9adf31
|
1146fc0ebe4c191ed8805915b9bb3d3404da8248
|
/R/intervention_proportion.R
|
2f857ae32eb08474474a71b14eb5f7508f02c2b9
|
[] |
no_license
|
softloud/metasim
|
3e56f527a0af8ccb6376ab8be43ded503480d33f
|
3380c351fad4bddb4c24f1a30cb3391a66b90121
|
refs/heads/master
| 2020-04-16T17:44:59.654202
| 2019-07-13T08:15:33
| 2019-07-13T08:15:33
| 165,786,914
| 8
| 9
| null | 2019-07-13T08:15:34
| 2019-01-15T04:46:12
|
HTML
|
UTF-8
|
R
| false
| false
| 242
|
r
|
intervention_proportion.R
|
#' Calculate proportion of intervention group
#'
#' @inheritParams beta_par
#'
#' @export
intervention_proportion <- function(n, proportion, error) {
par <- beta_par(proportion, error)
rbeta(n, shape1 = par$alpha, shape2 = par$beta)
}
|
90c323516f8e101c97cff99fb51bef4041a154fa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bPeaks/examples/bPeaks-package.Rd.R
|
b87d0200953491fd40ab2a5baba9b504bfae534f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,957
|
r
|
bPeaks-package.Rd.R
|
library(bPeaks)
### Name: bPeaks-package
### Title: bPeaks: an intuitive peak-calling strategy to detect
### transcription factor binding sites from ChIP-seq data in small
### eukaryotic genomes
### Aliases: bPeaks-package bPeaks
### Keywords: peak calling ChIP-seq protein binding sites protein-DNA
### interactions deep sequencing small eukaryotic genomes
### ** Examples
# get library
library(bPeaks)
# STEP 1: get PDR1 data (ChIP-seq experiments, IP and control samples,
# related to the transcription factor Pdr1 in yeast Saccharomyces
# cerevisiae)
data(dataPDR1)
# STEP 2 : bPeaks analysis (only 10 kb of chrIV are analyzed here,
# as an illustration)
bPeaksAnalysis(IPdata = dataPDR1$IPdata[40000:50000,],
controlData = dataPDR1$controlData[40000:50000,],
cdsPositions = dataPDR1$cdsPositions,
windowSize = 150, windowOverlap = 50,
IPcoeff = 4, controlCoeff = 2,
log2FC = 1, averageQuantiles = 0.5,
resultName = "bPeaks_example")
# --> Result files (PDF and BED) are written in the working directory.
## Not run:
##D # -> bPeaks analysis, all chromosome IV and default parameters (optimized for yeasts)
##D
##D # STEP 1: get PDR1 data (ChIP-seq experiments, IP and control samples,
##D # related to the transcription factor Pdr1 in yeast Saccharomyces
##D # cerevisiae)
##D data(dataPDR1)
##D
##D # STEP 2: bPeaks analysis
##D bPeaksAnalysis(IPdata = dataPDR1$IPdata,
##D controlData = dataPDR1$controlData,
##D cdsPositions = dataPDR1$cdsPositions,
##D windowSize = 150, windowOverlap = 50,
##D IPcoeff = 2, controlCoeff = 2,
##D log2FC = 2, averageQuantiles = 0.9,
##D resultName = "bPeaks_PDR1",
##D peakDrawing = TRUE)
##D
##D # STEP 3 : procedure to locate peaks according to
##D # gene positions
##D peakLocation(bedFile = "bPeaks_PDR1_bPeaks_allGenome.bed",
##D cdsPositions = yeastCDS$Saccharomyces.cerevisiae,
##D outputName = "bPeakLocation_finalPDR1", promSize = 800)
##D
##D # -> Note that cds (genes) positions are stored in bPeaks package for several yeast
##D # species
##D data(yeastCDS)
##D
##D summary(yeastCDS)
##D # Length Class Mode
##D # Debaryomyces.hansenii 31370 -none- character
##D # Eremothecium.gossypii 23615 -none- character
##D # Kluyveromyces.lactis 25380 -none- character
##D # Pichia.sorbitophila 55875 -none- character
##D # Saccharomyces.kluyveri 27790 -none- character
##D # Yarrowia.lipolytica 32235 -none- character
##D # Zygosaccharomyces.rouxii 24955 -none- character
##D # Saccharomyces.cerevisiae 5 data.frame list
##D # Candida.albicans 5 data.frame list
##D # Candida.glabrata 5 data.frame list
## End(Not run)
|
20a3d89114b034ca8d2cde302dc802afa88df9a1
|
c6d39c415efc8db022b150eeca2ac189a31e427d
|
/HypoPub_LoadPackages.R
|
ffd844ef6c3e6043a440f897e5a87579db90a791
|
[] |
no_license
|
brianherb/DevHumanHypothalamus
|
eeb17c9bf6a919b6314f67dd1a063d4822dfefca
|
35d4c8979cc1bd7b2ae0728f080daa2601a98c09
|
refs/heads/main
| 2023-06-13T22:33:57.947533
| 2021-07-08T16:22:23
| 2021-07-08T16:22:23
| 384,183,943
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 964
|
r
|
HypoPub_LoadPackages.R
|
#qlogin -P "sament-lab" -l mem_free=100G -q interactive.q -pe thread 16
.libPaths('/local/projects-t3/idea/bherb/software/R_lib/R_3_6')
library(Seurat,lib.loc='/usr/local/packages/r-3.6.0/lib64/R/library') ## 3.0.1
#library(Seurat)
library(BiocGenerics)
library(monocle3)
library(projectR)
library(plotrix) # updated
library(xlsx)
library(ggplot2)
library(impute)
library(preprocessCore)
library(AnnotationDbi)
library(GO.db)
library(dendextend)
library(matrixStats)
library(Matrix)
library(scrattch.hicat)
library(scDblFinder)
#library(future)
library(RColorBrewer)
library(GENIE3)
library(feather)
library(slingshot)
library(gam)
library(tradeSeq)
library(RColorBrewer)
library(SingleCellExperiment)
library(WGCNA)
library(AUCell)
library(MetaNeighbor)
#library(topGO)
library('GOstats')
library('GO.db')
library('org.Hs.eg.db')
library("biomaRt")
#library(scran,lib.loc='/usr/local/packages/r-3.6.0/lib64/R/library')
library(EnhancedVolcano)
library( cicero )
|
648188300e76d9dff69e3fa68957bdedeb0d0c72
|
dd2fdafe579a221d340b6b712f39fa2d20034c18
|
/db_old/axes.R
|
677f4b55ffc38561792533cf8f9fe2dbc794a08d
|
[] |
no_license
|
laurentBesnainou/Rcode
|
1958b71ed59e7400e41e0199d4e4af88ee3d26f4
|
263c5b919e6e9b40b2ecd245bdb6e4d103f3b67c
|
refs/heads/master
| 2021-05-08T18:47:26.985622
| 2018-01-30T12:34:22
| 2018-01-30T12:34:22
| 119,534,108
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,891
|
r
|
axes.R
|
#cirle sur les clients par rapport aux offres
#On constitue la matrice des connaissancess
library(circlize)
library(dplyr)
pilotage_2017 <- pilotage_data
tmp <- pilotage_2017 %>% filter( WEEK ==5, STEP %in% c( "3 - Emise")) %>% select(GROUPE,OFFRE_PRINCIPALE,CA_BT__N__KE)
matriceConnais <- xtabs(CA_BT__N__KE~ GROUPE + OFFRE_PRINCIPALE, na.omit(tmp))
to <- paste(unique(colnames(matriceConnais)),sep = ",")
from <- paste(rownames(matriceConnais),sep = ",")
mat <- matrix(0, nrow = length(unique(from)), ncol = length(unique(to)))
col <- matrix(0, nrow = length(unique(from)), ncol = length(unique(to)))
rownames(mat) = unique(from)
colnames(mat) = unique(to)
noms <- c(from,to)
names(gripCol) <- noms
for (i in 1:length(from)) {
for (j in 1:length(to)) {
mat[i,j] <- matriceConnais[i,j]
}
}
# for(i in from ) {
#
# if (i != input$consultant_id ) {
#
# col[which(from == i), 1] = "#FFFFFF00"
# col[which(from == i), 2] = "#FFFFFF00"
# col[which(from == i), 3] = "#FFFFFF00"
# col[which(from == i), 4] = "#FFFFFF00"
# col[which(from == i), 5] = "#FFFFFF00"
# }
# }
#= = = = = initialize = = = = = #
par(mar = c(1, 1, 1, 1))
circos.par(gap.degree = c(rep(2, nrow(mat)-1), 10, rep(2, ncol(mat)-1), 10))
# = = = = = plot 'circlize' = = = = = #
chordDiagram(mat, annotationTrack = "grid", transparency = 0.8,
preAllocateTracks = list(track.height = 0.1),
col = matrix(rainbow(nrow(mat)),nrow=nrow(mat),ncol=ncol(mat)),
row.col = 1)
# = = = = = add labels = = = = = #
circos.trackPlotRegion(track.index = 1,
panel.fun = function(x, y) {
xrange = get.cell.meta.data("xlim")
labels = get.cell.meta.data("sector.index")
circos.text(mean(xrange), 0,
labels = labels, niceFacing = TRUE)
},
bg.border = NA)
circos.clear()
circos.axis(mat, sector.index, track.index)
chordDiagram(mat,
directional = TRUE,
niceFacing=TRUE,
transparency = 0.2)
circos.clear()
circos.trackText(from,to,labels =union(from,to),
factors = union(from,to),
col = "#EEEEEE", font = 2, facing = "downward")
# = = = = = add labels = = = = = #
factors = 1:20# just indicate there are 20 sectors
circos.par(gap.degree = 0, cell.padding =c(0, 0, 0, 0),start.degree = 360/20/2,
track.margin =c(0, 0), clock.wise = FALSE)
circos.initialize(factors = factors, xlim =c(0, 1))
circos.trackPlotRegion(ylim =c(0, 1), factors = factors,
bg.col = "black",track.height = 0.15)
circos.trackText(rep(0.5, 20),rep(0.5, 20),
labels =c(13, 4, 18, 1, 20, 5, 12, 9, 14, 11, 8, 16, 7, 19, 3, 17, 2, 15, 10, 6),
factors = factors, col = "#EEEEEE", font = 2, facing = "bending.outside")
circos.trackPlotRegion(ylim =c(0, 1), factors = factors,bg.col =rep(c("#E41A1C", "#4DAF4A"), 10),
bg.border = "#EEEEEE",
track.height = 0.05)
circos.trackPlotRegion(ylim =c(0, 1), factors = factors,bg.col =rep(c("black", "white"), 10),
bg.border = "#EEEEEE", track.height = 0.275)
circos.trackPlotRegion(ylim =c(0, 1), factors = factors,bg.col =rep(c("#E41A1C", "#4DAF4A"), 10),
bg.border = "#EEEEEE", track.height = 0.05)
circos.trackPlotRegion(ylim =c(0, 1), factors = factors,bg.col =rep(c("black", "white"), 10),
bg.border = "#EEEEEE", track.height = 0.375)
draw.sector(center =c(0, 0), start.degree = 0, end.degree = 360,rou1 = 0.1,
col = "#4DAF4A", border = "#EEEEEE")
draw.sector(center =c(0, 0), start.degree = 0, end.degree = 360,rou1 = 0.05, col = "#E41A1C", border = "#EEEEEE")
|
c3b4be3c26c27cf93e14ffc6c6ea0a0af61ab9f6
|
9e8615839d1361b9690b32c59085ffeeccc91b07
|
/RScripts/00_apresenta_curso.R
|
d2f760e0c999b7aaa5423d697ee3871293d457c1
|
[] |
no_license
|
rdosreis/MAT02018
|
7dbb658308b75997b14fe010fdc3c029d22819ca
|
4e8e0ba81bd19de67a6f68ef8866d5389b6f42b8
|
refs/heads/master
| 2022-10-04T12:55:53.010897
| 2022-09-22T13:26:15
| 2022-09-22T13:26:15
| 244,178,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,172
|
r
|
00_apresenta_curso.R
|
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='80%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'hi_my_name_is.png'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', paged.print=FALSE----
knitr::include_graphics(here::here('images','covid-recomendacoes.jpg'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='10%', paged.print=FALSE----
knitr::include_graphics(here::here('images','Rlogo.png'))
## ---- echo=FALSE, eval=TRUE-------------------------------------------------------------------
x <- rnorm(n = 100, mean = 10, sd = 1)
## ---- echo=TRUE, eval=TRUE, fig.align='center', out.width='50%'-------------------------------
hist(x, col = 'black', border = 'white')
## ----echo=FALSE, fig.align='right', message=FALSE, warning=FALSE, out.width='15%', paged.print=FALSE----
knitr::include_graphics(here('images','ctanlion.png'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'estat1.jpg'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'estat2.jpg'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'estat3.png'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'estat4.jpg'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='100%', out.height='80%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'Descritiva_Inferencia.png'))
## ----echo=FALSE, fig.align='center', message=FALSE, warning=FALSE, out.width='50%', out.height='50%', paged.print=FALSE----
knitr::include_graphics(here::here('images', 'Statistically-Insignificant-8.jpg'))
|
ec2d34edc08bf248bcd434e17d333a00563c51bf
|
9904cbbc06ae2ded9bd2235a4420ee52d80f934b
|
/Scripts/compareNetworks.R
|
ebe9162e14f2f0137626bb137503a82b0dcb4e81
|
[] |
no_license
|
gazwb/compositionalCor_v3
|
2b27a159a8463dcda7159c381f145da114b884b3
|
ac1be3c95e9670cf7d7c0c22640657724769d5e2
|
refs/heads/master
| 2020-06-04T21:34:42.752376
| 2015-04-30T16:52:16
| 2015-04-30T16:52:16
| 33,330,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,498
|
r
|
compareNetworks.R
|
setwd("~/MAARS_p2/MB")
source("/home/gaz/MAARS_p2/Scripts/compositionalCor_v3/Scripts/inference_functions.R")
source("/home/gaz/MAARS_p2/Scripts/compositionalCor_v3/Scripts/analysis_functions.R")
source("/home/gaz/rand/multiplot/multiplot.R")
nets <- readNetworks()
# read in these networks
net.ADL <- nets[[1]]
# net.ADNL <- read.table(file="/home/gaz/MAARS_p2/Scripts/compositionalCor/mainNetworks/edgeData_AD_NON_LES", header = TRUE)
net.CTRL <- nets[[2]]
# net.PSONL <- read.table(file="/home/gaz/MAARS_p2/Scripts/compositionalCor/mainNetworks/edgeData_PSO_NON_LES", header = TRUE)
net.PSOL <- nets[[3]]
node <- readAttributes()
# read in these networks
node.ADL <- node[[1]]
# net.ADNL <- read.table(file="/home/gaz/MAARS_p2/Scripts/compositionalCor/mainNetworks/edgeData_AD_NON_LES", header = TRUE)
node.CTRL <- node[[2]]
# net.PSONL <- read.table(file="/home/gaz/MAARS_p2/Scripts/compositionalCor/mainNetworks/edgeData_PSO_NON_LES", header = TRUE)
node.PSOL <- node[[3]]
mbdat <- load_MB_data()
getHubNodes(net.ADL)
# call functions for module realigning
olm.CTRL_PSOL <- defOverlapMatrix(node.CTRL ,node.PSOL)
olm.CTRL_ADL <- defOverlapMatrix(node.CTRL ,node.ADL)
# pass module memberships for networks and overlap
jac.CTRL_PSOL <- defJaccardMatrix(node.CTRL ,node.PSOL,olm.CTRL_PSOL)
jac.CTRL_ADL <- defJaccardMatrix(node.CTRL ,node.ADL,olm.CTRL_ADL)
# realign
node.PSOL.fx <- reAlign.2(node.PSOL,jac.CTRL_PSOL)
node.ADL.fx <- reAlign.2(node.ADL,jac.CTRL_ADL)
node.CTRL.fx <- reAlign.ctrl(node.CTRL)
# create a new list of the network attributes with the fixed nodes
node.fx <- list(node.ADL.fx,node.CTRL.fx,node.PSOL.fx)
# write out to file
#write_realignment(node.ADL.fx,node.CTRL.fx,node.PSOL.fx)
#define the order of the lists
netOrder <- c("AD L","CTRL","PSO L")
# call network stats
netStats <- calcStats(node,nets, netOrder)
# call differential connectivity
diffConect <- diffConnectivity(nets)
diffConectivityPerms <- readPermutations()
perm.ADL <- diffConectivityPerms[[1]]
perm.CTRL.ADL <- diffConectivityPerms[[2]]
perm.PSOL <- diffConectivityPerms[[3]]
perm.CTRL.PSOL <- diffConectivityPerms[[4]]
netSizes <- c(sum(modStats.ADL[[3]]),sum(modStats.CTRL[[3]]),sum(modStats.PSOL[[3]]))
diffPerm.ADL <- difConexPermutation(perm.ADL,perm.CTRL.ADL)
diffPerm.PSOL <- difConexPermutation(perm.PSOL,perm.CTRL.PSOL)
# function to calculate p values
diffConnect.P <- calculatePermutationPval(diffConect, diffPerm.ADL,diffPerm.PSOL)
tax <- mbdat[[2]][match(rownames(diffConnect.P),mbdat[[2]]$OTU_id),]
#fix tax
tax[tax$Family == "",6] <- "F."
tax[tax$Genus == "",7] <- "G."
tax[tax$Species == "",8] <- "sp."
tax$joinName <- as.factor(paste0(tax$Genus, " ", tax$Species))
## what modules are these nodes in?
cms <- node.CTRL.fx[match(rownames(diffConnect.P),node.CTRL.fx$nodeName),10]
pms <- node.PSOL.fx[match(rownames(diffConnect.P),node.PSOL.fx$nodeName),10]
ams <- node.ADL.fx[match(rownames(diffConnect.P),node.ADL.fx$nodeName),10]
diffConnect.tax.P <- cbind(tax[,c(6,7,8,10)],ams,cms,pms, diffConnect.P)
rownames(diffConnect.tax.P) <- rownames(diffConnect.P)
# diffConnect.tax.P[diffConnect.tax.P$ad.p < 0.05,]
# diffConnect.tax.P[diffConnect.tax.P$pso.p < 0.05,]
fold.changes <- calcDifferentialAbundance(diffConnect.tax.P)
diffConnect.tax.P.fc <- cbind(diffConnect.tax.P ,fold.changes)
# plot the differential connectivity by module
AD.diffconnect <- diffConnect.tax.P.fc[!is.na(diffConnect.tax.P.fc$ams),]
PSO.diffconnect <- diffConnect.tax.P.fc[!is.na(diffConnect.tax.P.fc$pms),]
AD.diffconnect$ad.BH <- p.adjust(AD.diffconnect$ad.p,method = "BH")
PSO.diffconnect$pso.BH <- p.adjust(PSO.diffconnect$pso.p,method = "BH")
# set color scale
PSO.diffconnect$pms <- as.factor(PSO.diffconnect$pms)
myColors <- brewer.pal(8,"Dark2")
names(myColors) <- levels(PSO.diffconnect$pms)
colScale <- scale_colour_manual(name = "pms",values = myColors)
# plot
qplot(Diffcvpso, PSOvCTRL.FC, data=PSO.diffconnect, colour=as.factor(pms),xlab = "Differential Connectivity", ylab = "log10(Fold Change)", main = "PSO Differential connectivity vs Fold change") + geom_point(aes(size = 1)) + colScale
qplot(Diffcvad, ADvCTRL.FC, data=AD.diffconnect, colour=as.factor(ams),xlab = "Differential Connectivity", ylab = "log10(Fold Change)",main = "AD Differential connectivity vs Fold change")+ geom_point(aes(size = 1)) + colScale
# get significant differentially connected bacterias
sig.dc.PSO <- PSO.diffconnect[PSO.diffconnect$pso.BH < 0.1,]
sig.dc.AD <- AD.diffconnect[AD.diffconnect$ad.BH < 0.1,]
# factor for order
sig.dc.PSO <- sig.dc.PSO[order(sig.dc.PSO$Diffcvpso,decreasing = FALSE),]
sig.dc.PSO$joinName<- as.character(sig.dc.PSO$joinName)
sig.dc.PSO$joinName <- factor(sig.dc.PSO$joinName, levels=unique(sig.dc.PSO$joinName))
# factor for order
sig.dc.AD<- sig.dc.AD[order(sig.dc.AD$Diffcvad,decreasing = FALSE),]
sig.dc.AD$joinName<- as.character(sig.dc.AD$joinName)
sig.dc.AD$joinName <- factor(sig.dc.AD$joinName, levels=unique(sig.dc.AD$joinName))
sig.dc.AD$rn <- rownames(sig.dc.AD)
sig.dc.AD$rn <- factor(sig.dc.AD$rn, levels=unique(sig.dc.AD$rn))
ggplot(data=sig.dc.PSO, aes(x=joinName, y=Diffcvpso, fill=joinName)) + geom_bar(colour="black", stat="identity",width=.8) + guides(fill=FALSE) + labs(y = "Differential Connectivity", x = "") + ggtitle("PSO v CTRL") + coord_flip() + theme_bw()
ggplot(data=sig.dc.AD, aes(x=rn, y=Diffcvad, fill=joinName)) + geom_bar(colour="black", stat="identity",width=.8) + guides(fill=FALSE) + scale_x_discrete(labels = sig.dc.AD$joinName)+ labs(y = "Differential Connectivity", x = "") + ggtitle("AD v CTRL") + coord_flip() + theme_bw()
# more plots
moduleDegreePlot()
plotBetweenessCentrality(nets)
# extract positive and negative subgraphs
# 1 is positive links, 2 is negative links, 3 iis positive nodes, 4 is negative nodes
directionalSubgraphs <- extractDirectionalSubgraphs(nets,node.fx)
###get module statistics
modStats.ADL <- calcModuleStats(node.ADL.fx)
modStats.CTRL <- calcModuleStats(node.CTRL.fx)
modStats.PSOL <- calcModuleStats(node.PSOL.fx)
o <- o2PropNetworkPlot(node.fx)
# plot of o2 proportions per modules
p <- o2PropPlot(node.fx) # works now
# plot of module similarity
j <- moduleOverlapPlot(node.fx)
k <- orderNetworkPlot(node.fx)
l <- taxNetworkPlot(node.fx)
readPermutations
rawUnionDat <- readRawProportionalCountDat()
AD.raw <- t(rawUnionDat[[1]])
CTRL.raw <- t(rawUnionDat[[2]])
PSO.raw <- t(rawUnionDat[[3]])
# exec one SparCC
#sparCC.PSO <- sparcc(PSO.raw)
|
489825dd9bfbe6c3d1fd455cb84308201fb9e72f
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/dataanalysiscode/libfuzz.R
|
c7828041547b583c1dca51b216f65bd5e83c299b
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,604
|
r
|
libfuzz.R
|
deepstate_pkg_create_LibFuzzer<-function(path){
path <- normalizePath(path,mustWork=TRUE)
insts.path <- normalizePath("~",mustWork=TRUE)
deepstate <- file.path(insts.path,".RcppDeepState")
deepstate.path <- file.path(deepstate,"deepstate-master")
inst_path <- file.path(path, "inst")
test_path <- file.path(inst_path,"testfiles")
if(!(file.exists(file.path(insts.path,".RcppDeepState/deepstate-master/build/libdeepstate32.a")) &&
file.exists(file.path(insts.path,"/.RcppDeepState/deepstate-master/build/libdeepstate.a")))){
RcppDeepState::deepstate_make_run()
}
LF.a <- file.path(deepstate.path,"build_libfuzzer/libdeepstate_LF.a")
if(!file.exists(LF.a)){
deepstate_make_libFuzzer()
#print("lib not exists")
}
exists_flag = 0
if(!file.exists(file.path(path,"src/*.so"))){
system(paste0("R CMD INSTALL ",path),intern = FALSE,ignore.stderr =TRUE,ignore.stdout = TRUE)
}
functions.list <- RcppDeepState::deepstate_get_function_body(path)
fun_names <- unique(functions.list$funName)
for(f in fun_names){
libfuzzer.fun.path <- file.path(test_path,f,paste0("libFuzzer_",f))
libfuzzer.harness.path <- file.path(libfuzzer.fun.path,paste0(f,"_DeepState_TestHarness"))
input_dir <- file.path(libfuzzer.fun.path,"libfuzzer_inputs")
inputs.list <- Sys.glob(file.path(input_dir,"*"))
if(!dir.exists(libfuzzer.fun.path)){
exists_flag = 1
dir.create(libfuzzer.fun.path,showWarnings = FALSE)
}
function.path <- file.path(test_path,f)
harness.path <- file.path(function.path,paste0(f,"_DeepState_TestHarness.cpp"))
makefile.path <- file.path(function.path,"Makefile")
if(file.exists(harness.path) && file.exists(makefile.path) ){
executable <- gsub(".cpp$","",harness.path)
object <- gsub(".cpp$",".o",harness.path)
o.logfile <- file.path(libfuzzer.fun.path,paste0("/",f,"_log"))
logfile <- file.path(libfuzzer.fun.path,paste0("/libfuzzer_",f,"_log"))
output_dir <- file.path(libfuzzer.fun.path,paste0("/libfuzzer_",f,"_output"))
if(!dir.exists(output_dir)) {
dir.create(output_dir,showWarnings = FALSE)
}
if(!dir.exists(input_dir)) {
dir.create(input_dir,showWarnings = FALSE)
}
#writing harness file
harness_lines <- readLines(harness.path,warn=FALSE)
harness_lines <- gsub("RInside R;","static int rinside_flag = 0;\n if(rinside_flag == 0)\n {\n rinside_flag = 1;\n RInside R;\n } std::time_t current_timestamp = std::time(0);"
,harness_lines,fixed=TRUE)
k <- nc::capture_all_str(harness_lines,
"qs::c_qsave","\\(",
save=".*",",\"",l=".*","\"")
for(i in seq_along(k$l)){
harness_lines <- gsub(paste0("\"",k$l[i],"\""),paste0(gsub(".qs","",basename(k$l[i])),"_t"),harness_lines,fixed=TRUE)
harness_lines <- gsub(paste0("qs::c_qsave(",gsub(".qs","",basename(k$l[i]))),paste0("std::string ",gsub(".qs","",basename(k$l[i])),"_t = ","\"",dirname(dirname(k$l[i])),
"/",basename(libfuzzer.fun.path),"/libfuzzer_inputs/\" + std::to_string(current_timestamp) +
\"_",basename(k$l[i]),"\"",";\n qs::c_qsave(",gsub(".qs","",basename(k$l[i]))),harness_lines,fixed=TRUE)
}
harness.libFuzz <- file.path(libfuzzer.fun.path,basename(harness.path))
file.create(harness.libFuzz,recursive=TRUE)
cat(harness_lines, file=harness.libFuzz, sep="\n")
##makefileupdate
makefile_lines <- readLines(makefile.path,warn=FALSE)
makefile_lines <- gsub(function.path,libfuzzer.fun.path,makefile_lines,fixed=TRUE)
makefile_lines <- gsub("clang++ -g","clang++ -g -fsanitize=address,fuzzer",makefile_lines,fixed=TRUE)
makefile_lines <- gsub("-ldeepstate","-ldeepstate -ldeepstate_LF",makefile_lines,fixed=TRUE)
makefile_lines <- gsub("deepstate-master/build","deepstate-master/build_libfuzzer",makefile_lines,fixed=TRUE)
makefile.libFuzz <- file.path(libfuzzer.fun.path,"Makefile")
file.create(makefile.libFuzz,recursive=TRUE)
cat(makefile_lines, file=makefile.libFuzz, sep="\n")
compile_line <-paste0("rm -f *.o && make -f ",makefile.libFuzz)
execution_line <- paste0("cd ",libfuzzer.fun.path," && ./",basename(executable)," -max_total_time=1800")
if(exists_flag == 1){
print(compile_line)
system(compile_line)
print(execution_line)
system(execution_line)
}
}
}
}
|
b1855867ac9233f8519cb914392f24b4abfad0c3
|
7ff90fda996b9b1da1048936749ad06c0e13da9a
|
/xgboost.R
|
af0a536a58066c03d5607dc431dd4d478886ccc8
|
[] |
no_license
|
katieji737/Adtracking-XGBoost
|
08583a5a88d26c57b59557d2072be7b1f520a53e
|
8d795b50553190e3728bc4fcd01da3d7cb90a490
|
refs/heads/master
| 2020-04-03T18:29:48.251871
| 2018-10-31T02:32:08
| 2018-10-31T02:32:08
| 155,486,519
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,317
|
r
|
xgboost.R
|
library(data.table)
library(dplyr)
library(xgboost)
library(ggplot2)
train <- fread("/Users/~/adtracking_dataset.csv",
select =c("ip", "app", "device", "os", "channel", "click_time", "is_attributed"),
showProgress=F,
colClasses=c("ip"="numeric","app"="numeric","device"="numeric","os"="numeric","channel"="numeric","click_time"="character","is_attributed"="numeric"))
#test <- fread("/Users/~/test.csv",
#select =c("ip", "app", "device", "os", "channel", "click_time"),
#showProgress=F,
#colClasses=c("ip"="numeric","app"="numeric","device"="numeric","os"="numeric","channel"="numeric","click_time"="character"))
set.seed(1234)
train <- train[c(which(train$is_attributed == 1),sample(which(train$is_attributed == 0),9773,replace = F)), ]
str(train)
y <- train$is_attributed
#write.csv(y,'/Users/katie/Desktop/train_y.csv')
n_train = nrow(train)
#dat_combined <- rbind(train,test[,-1],fill = T)
#rm(train,test)
#invisible(gc())
train[, ':='(hour = hour(click_time))
][, ip_count := .N, by = "ip"
][, app_count := .N, by = "app"
][, channel_count := .N, by = "channel"
][, device_count := .N, by = "device"
][, os_count := .N, by = "os"
][, app_count := .N, by = "app"
][, ip_app := .N, by = "ip,app"
][, ip_dev := .N, by = "ip,device"
][, ip_os := .N, by = "ip,os"
][, ip_channel := .N, by = "ip,channel"
][,ip_hour := .N, by = "ip,hour"
][,app_device := .N, by = "app,device"
][,app_channel := .N, by = "app,channel"
][,channel_hour := .N, by = "channel,hour"
][,ip_app_channel := .N, by = "ip,app,channel"
][,app_channel_hour := .N, by = "app,channel,hour"
][,ip_app_hour := .N, by = "ip,app,hour"
][, c("ip","click_time", "is_attributed") := NULL]
#write.csv(train, '/~/train_interaction.csv')
invisible(gc())
train[, lapply(.SD, uniqueN), .SDcols = colnames(train)] %>%
melt(variable.name = "features", value.name = "unique_values") %>%
ggplot(aes(reorder(features, -unique_values), unique_values)) +
geom_bar(stat = "identity", fill = "lightblue") +
scale_y_log10(breaks = c(50,100,250, 500, 10000, 50000)) +
geom_text(aes(label = unique_values), vjust = 1.6, color = "black", size=2) +
theme_minimal() +
labs(x = "features", y = "Number of unique values")
within_train_index <- sample(c(1:n_train),0.7*n_train,replace = F) ## split the training dataset into train & validation
processed_train_train = train[1:n_train,][within_train_index]
y1 = y[1:n_train][within_train_index]
processed_train_val = train[1:n_train,][-within_train_index]
y2 = y[1:n_train][-within_train_index]
processed_test = train[-c(1:n_train),]
rm(train)
rm(y)
invisible(gc())
model_train <- xgb.DMatrix(data = data.matrix(processed_train_train), label = y1)
rm(processed_train_train)
invisible(gc())
model_val <- xgb.DMatrix(data = data.matrix(processed_train_val), label = y2)
rm(processed_train_val)
invisible(gc())
xgb_test <- xgb.DMatrix(data = data.matrix(processed_test))
rm(processed_test)
invisible(gc())
params <- list(objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
nthread = 7,
eta = 0.05,
max_depth = 10,
gamma = 0.9,
subsample = 0.8,
colsample_bytree = 0.8,
scale_pos_weight = 50,
nrounds = 100)
myxgb_model <- xgb.train(params, model_train, params$nrounds, list(val = model_val), print_every_n = 20, early_stopping_rounds = 50)
imp <- xgb.importance(colnames(model_train), model=myxgb_model)
xgb.plot.importance(imp, top_n = 15)
cv <- xgb.cv(data = model_train, nrounds = 100, nthread = 7, nfold = 10, metrics = "auc",
max_depth = 10, eta = 0.05, objective = "binary:logistic")
|
025eba5d29acfb62a8be6ef651eca112e9830341
|
6db7a411ae81ea77831a323a7053d0b112a383df
|
/R/getENSEMBLGENOMES.Seq.R
|
8f434bade0e630a1111ffe5438f306c74fa4f097
|
[] |
no_license
|
cran/biomartr
|
803c46a689f26cad60e675bf689e52c95eff975c
|
c5607e69575bd4d47c1003e308a719f8da78bccf
|
refs/heads/master
| 2023-07-10T20:27:51.927945
| 2023-06-20T12:50:07
| 2023-06-20T12:50:07
| 39,511,686
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,147
|
r
|
getENSEMBLGENOMES.Seq.R
|
#' @title Helper function for retrieving biological sequence files from
#' ENSEMBLGENOMES
#' @description This function downloads gff files of query organisms from
#' ENSEMBLGENOMES
#' @param organism scientific name of the organism of interest.
#' @param release the ENSEMBLGENOMES release. Default is \code{release = NULL} meaning that the current (most recent) version is used.
#' @param type biological sequence type.
#' @param id.type id type.
#' @param path location where file shall be stored.
#' @author Hajk-Georg Drost
#' @noRd
getENSEMBLGENOMES.Seq <-
function(organism,
release = NULL,
type = "dna",
id.type = "toplevel",
path) {
if (!is.element(type, c("dna", "cds", "pep", "ncrna")))
stop("Please a 'type' argument supported by this function:
'dna', 'cds', 'pep', 'ncrna'.")
name <- NULL
# test if REST API is responding
is.ensemblgenomes.alive()
if (is.taxid(organism))
stop("Unfortunately, taxid retrieval is not yet implemented for ENSEMBLGENOMES...", call. = FALSE)
if ( !suppressMessages(is.genome.available(organism = organism, db = "ensemblgenomes", details = FALSE)) ) {
warning("Unfortunately organism '", organism, "' is not available at ENSEMBLGENOMES. ",
"Please check whether or not the organism name is typed correctly or try db = 'ensembl'.",
" Thus, download of this species has been omitted. ", call. = FALSE)
return(FALSE)
} else {
taxon_id <- assembly <- accession <- NULL
new.organism <- stringr::str_to_lower(stringr::str_replace_all(organism, " ", "_"))
ensembl_summary <-
suppressMessages(is.genome.available(
organism = organism,
db = "ensemblgenomes",
details = TRUE
))
if (nrow(ensembl_summary) == 0) {
message("Unfortunately, organism '",organism,"' does not exist in this database. Could it be that the organism name is misspelled? Thus, download has been omitted.")
return(FALSE)
}
if (nrow(ensembl_summary) > 1) {
if (is.taxid(organism)) {
ensembl_summary <-
dplyr::filter(ensembl_summary, taxon_id == as.integer(organism), !is.na(assembly))
} else {
ensembl_summary <-
dplyr::filter(ensembl_summary,
(name == stringr::str_to_lower(new.organism)) |
(accession == organism),
!is.na(assembly)) }
message("Several entries were found for '", organism, "'.")
# "... The first entry '", ensembl_summary$name[1],"' with accession id '",ensembl_summary$accession[1],"' was selected for download.")
message("In case you wish to retrieve another genome version please consult is.genome.available(organism = '", organism,"', details = TRUE, db = 'ensemblgenomes') and specify another accession id as organism argument.")
message("\n")
# select only first entry
}
new.organism <-
paste0(
stringr::str_to_upper(stringr::str_sub(ensembl_summary$name[1], 1, 1)),
stringr::str_sub(ensembl_summary$name[1], 2, nchar(ensembl_summary$name[1]))
)
# retrieve detailed information for organism of interest
}
get.org.info <- ensembl_summary
rest_url <- paste0(
"http://rest.ensembl.org/info/assembly/",
new.organism,
"?content-type=application/json"
)
rest_api_status <- test_url_status(url = rest_url, organism = organism)
if (is.logical(rest_api_status)) {
return(FALSE)
} else {
if (get.org.info$division == "EnsemblBacteria") {
if (!file.exists(file.path(tempdir(), "EnsemblBacteria.txt"))) {
tryCatch({
custom_download(
"ftp://ftp.ensemblgenomes.org/pub/current/bacteria/species_EnsemblBacteria.txt",
destfile = file.path(tempdir(), "EnsemblBacteria.txt"),
mode = "wb"
)
}, error = function(e) {
message(
"Something went wrong when accessing the API 'http://rest.ensemblgenomes.org'.",
" Are you connected to the internet? ",
"Is the homepage 'ftp://ftp.ensemblgenomes.org/pub/current/bacteria/species_EnsemblBacteria.txt' ",
"currently available? Could it be that the scientific name is mis-spelled or includes special characters such as '.' or '('?"
)
})
}
suppressWarnings(
bacteria.info <-
readr::read_delim(
file.path(tempdir(), "EnsemblBacteria.txt"),
delim = "\t",
quote = "\"",
escape_backslash = FALSE,
col_names = c(
"name",
"species",
"division",
"taxonomy_id",
"assembly",
"assembly_accession",
"genebuild",
"variation",
"pan_compara",
"peptide_compara",
"genome_alignments",
"other_alignments",
"core_db",
"species_id"
),
col_types = readr::cols(
name = readr::col_character(),
species = readr::col_character(),
division = readr::col_character(),
taxonomy_id = readr::col_integer(),
assembly = readr::col_character(),
assembly_accession = readr::col_character(),
genebuild = readr::col_character(),
variation = readr::col_character(),
pan_compara = readr::col_character(),
peptide_compara = readr::col_character(),
genome_alignments = readr::col_character(),
other_alignments = readr::col_character(),
core_db = readr::col_character(),
species_id = readr::col_integer()
),
comment = "#"
)
)
# parse for wrong name conventions and fix them...
organism <-
stringr::str_replace_all(organism, " sp ", " sp. ")
organism <-
stringr::str_replace_all(organism, " pv ", " pv. ")
organism <-
stringr::str_replace_all(organism, " str ", " str. ")
organism <-
stringr::str_replace_all(organism, " subsp ", " subsp. ")
organism <-
stringr::str_replace_all(organism, "\\(", "")
organism <-
stringr::str_replace_all(organism, "\\)", "")
assembly <- NULL
bacteria.info <-
dplyr::filter(bacteria.info,
assembly == get.org.info$assembly)
if (nrow(bacteria.info) == 0) {
message(
"Unfortunately organism '",
ensembl_summary$display_name,
"' could not be found. Have you tried another database yet? ",
"E.g. db = 'ensembl'? Thus, download for this species is omitted."
)
return(FALSE)
}
if (is.na(bacteria.info$core_db[1])) {
message(
"Unfortunately organism '",
ensembl_summary$display_name,
"' was not assigned to a bacteria collection.
Thus download for this species is omitted."
)
return(FALSE)
}
release_api <- jsonlite::fromJSON(
"http://rest.ensembl.org/info/eg_version?content-type=application/json"
)
if (!is.null(release)){
if (!is.element(release, seq_len(as.integer(release_api))))
stop("Please provide a release number that is supported by ENSEMBLGENOMES.", call. = FALSE)
}
# construct retrieval query
if (is.null(release))
core_path <- "ftp://ftp.ensemblgenomes.org/pub/current/bacteria/fasta/"
if (!is.null(release))
core_path <- paste0("ftp://ftp.ensemblgenomes.org/pub/release-", release ,"/bacteria/fasta/")
ensembl.qry <-
paste0(core_path,
paste0(unlist(
stringr::str_split(bacteria.info$core_db[1], "_")
)[1:3], collapse = "_"),
"/",
stringr::str_to_lower(new.organism),
"/",
type,
"/",
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
)
} else {
release_api <- jsonlite::fromJSON(
"http://rest.ensembl.org/info/eg_version?content-type=application/json"
)
if (!is.null(release)){
if (!is.element(release, seq_len(as.integer(release_api))))
stop("Please provide a release number that is supported by ENSEMBLGENOMES.", call. = FALSE)
}
# construct retrieval query
if (is.null(release))
core_path <- "ftp://ftp.ensemblgenomes.org/pub/current/"
if (!is.null(release))
core_path <- paste0("ftp://ftp.ensemblgenomes.org/pub/release-", release ,"/")
# construct retrieval query
ensembl.qry <-
paste0( core_path,
stringr::str_to_lower(
stringr::str_replace(get.org.info$division[1],
"Ensembl", "")
),
"/fasta/",
stringr::str_to_lower(new.organism),
"/",
type,
"/",
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
)
}
# if (!exists.ftp.file(url = ensembl.qry, file.path = ensembl.qry)) {
# message(
# "Unfortunately no ",
# type,
# " file could be found for organism '",
# organism,
# "'. Thus, the download of this organism has been omitted."
# )
# return(FALSE)
# }
if (file.exists(file.path(
path,
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
))) {
message(
"File ",
file.path(
path,
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
),
" exists already. Thus, download has been skipped."
)
} else {
custom_download(url = ensembl.qry,
destfile = file.path(
path,
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
))
}
return(c(file.path(
path,
paste0(
new.organism,
".",
rest_api_status$default_coord_system_version,
".",
type,
ifelse(id.type == "none", "", "."),
ifelse(id.type == "none", "", id.type),
".fa.gz"
)
), ensembl.qry))
}
}
|
8319ae30d902a64031de0775ddddcd94766ffe77
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/spc.capability.cpm.simple.R
|
7d695daeb22bc02cdfd1b098d0535aaa300414fe
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 1,418
|
r
|
spc.capability.cpm.simple.R
|
#' Calculate Capability Measures - Cpm
#'
#' Calculate Cpm, a measure that combines the measure of variability and targeting relative to nominal specifications. Higher is better.
#'
#' @param lower.specification Lower specification limit (if applicable)
#' @param upper.specification Upper specification limit (if applicable)
#' @param process.variability Estimate of process variability, expressed as variance
#' @param process.center Estimate of process center
#' @param nominal.center Nominal target for the process
#' @param n.sigma The number of standard deviations to use in the denominator of the calculation. 6 is recommended, but 5.15 has also been historically used by Automotive Industry Action Group (AIAG).
#'
#' @return A scalar with computed Cpm.
spc.capability.cpm.simple <- function(
lower.specification
,upper.specification
,process.variability #Usually Expressed as Variance
,process.center
,nominal.center
,n.sigma = 6) {
cpm <- NA
if (!is.na(lower.specification) & !is.na(upper.specification)) {
cpm <- (upper.specification - lower.specification)/(n.sigma*sqrt(process.variability + (process.center - nominal.center)^2))
} else if (is.na(lower.specification) | is.na(upper.specification)) {
cpm <- (2*abs(process.center- na.omit(c(upper.specification,lower.specification))))/(n.sigma*sqrt(process.variability + (process.center - nominal.center)^2))
}
cpm
}
|
b2ebaffb9c31efd68b180e0c3c178620cacd27f4
|
cf7ba0b28c506172906f6fa9a6edc32a2922da58
|
/R/bf.R
|
18a69c6b40e86da9c028deb28762b69bbdd04524
|
[] |
no_license
|
cran/geoBayes
|
9edcc7113ab69f548f73d7b34dc0dd89f3ac8825
|
43f974c7b0062cd3b1310a1a5462b1a27c0e6a3b
|
refs/heads/master
| 2023-07-21T21:59:49.150797
| 2023-07-06T21:30:03
| 2023-07-06T21:30:03
| 23,163,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,566
|
r
|
bf.R
|
##' Function to compute the Bayes factors from MCMC samples.
##'
##' Computes the Bayes factors using \code{method} with respect to
##' \code{reference}.
##' @title Computation of Bayes factors at the skeleton points
##' @param runs A list with outputs from the function
##' \code{\link{mcsglmm}} or \code{\link{mcstrga}}.
##' @param bfsize1 A scalar or vector of the same length as
##' \code{runs} with all integer values or all values in (0, 1]. How
##' many samples (or what proportion of the sample) to use for
##' estimating the Bayes factors at the first stage. The remaining
##' sample will be used for estimating the Bayes factors in the
##' second stage. Setting it to 1 will perform only the first stage.
##' @param method Which method to use to calculate the Bayes factors:
##' Reverse logistic or Meng-Wong.
##' @param reference Which model goes in the denominator.
##' @param transf Whether to use a transformed sample for the
##' computations. If \code{"no"} or \code{FALSE}, it doesn't. If
##' \code{"mu"} or \code{TRUE}, it uses the samples for the mean. If
##' \code{"wo"} it uses an alternative transformation. The latter
##' can be used only for the families indicated by
##' \code{.geoBayes_models$haswo}.
##' @return A list with components
##' \itemize{
##' \item \code{logbf} A vector containing logarithm of the Bayes factors.
##' \item \code{logLik1} \code{logLik2} Matrices with the values of
##' the log-likelihood computed from the samples for each model at the
##' first and second stages.
##' \item \code{isweights} A vector with the importance sampling
##' weights for computing the Bayes factors at new points that will be
##' used at the second stage. Used internally in
##' \code{\link{bf2new}} and \code{\link{bf2optim}}.
##' \item \code{controlvar} A matrix with the control variates
##' computed at the samples that will be used in the second stage.
##' \item \code{sample2} The MCMC sample for mu or z that will be
##' used in the second stage. Used internally in
##' \code{\link{bf2new}} and \code{\link{bf2optim}}.
##' \item \code{N1}, \code{N2} Vectors containing the sample sizes
##' used in the first and second stages.
##' \item \code{distmat} Matrix of distances between locations.
##' \item \code{betm0}, \code{betQ0}, \code{ssqdf}, \code{ssqsc},
##' \code{tsqdf}, \code{tsqsc}, \code{dispersion}, \code{response},
##' \code{weights}, \code{modelmatrix}, \code{locations},
##' \code{family}, \code{corrfcn}, \code{transf} Model parameters used
##' internally in.
##' \code{\link{bf2new}} and \code{\link{bf2optim}}.
##' \item \code{pnts} A list containing the skeleton points. Used
##' internally in \code{\link{bf2new}} and \code{\link{bf2optim}}.
##' }
##' @references Geyer, C. J. (1994). Estimating normalizing constants
##' and reweighting mixtures. Technical report, University of
##' Minnesota.
##'
##' Meng, X. L., & Wong, W. H. (1996). Simulating ratios of
##' normalizing constants via a simple identity: A theoretical
##' exploration. \emph{Statistica Sinica}, 6, 831-860.
##'
##' Roy, V., Evangelou, E., and Zhu, Z. (2015). Efficient estimation
##' and prediction for the Bayesian spatial generalized linear mixed
##' model with flexible link functions. \emph{Biometrics}, 72(1), 289-298.
##' @examples \dontrun{
##' data(rhizoctonia)
##' ### Define the model
##' corrf <- "spherical"
##' kappa <- 0
##' ssqdf <- 1
##' ssqsc <- 1
##' betm0 <- 0
##' betQ0 <- .01
##' family <- "binomial.probit"
##' ### Skeleton points
##' philist <- c(100, 140, 180)
##' omglist <- c(.5, 1)
##' parlist <- expand.grid(linkp=0, phi=philist, omg=omglist, kappa = kappa)
##' ### MCMC sizes
##' Nout <- 100
##' Nthin <- 1
##' Nbi <- 0
##' ### Take MCMC samples
##' runs <- list()
##' for (i in 1:NROW(parlist)) {
##' runs[[i]] <- mcsglmm(Infected ~ 1, family, rhizoctonia, weights = Total,
##' atsample = ~ Xcoord + Ycoord,
##' Nout = Nout, Nthin = Nthin, Nbi = Nbi,
##' betm0 = betm0, betQ0 = betQ0,
##' ssqdf = ssqdf, ssqsc = ssqsc,
##' phi = parlist$phi[i], omg = parlist$omg[i],
##' linkp = parlist$linkp[i], kappa = parlist$kappa[i],
##' corrfcn = corrf,
##' corrtuning=list(phi = 0, omg = 0, kappa = 0))
##' }
##' bf <- bf1skel(runs)
##' bf$logbf
##' }
##' @importFrom sp spDists
##' @useDynLib geoBayes bfsp_no bfsp_mu bfsp_wo bfsp_tr
##' @export
bf1skel <- function(runs, bfsize1 = 0.80, method = c("RL", "MW"),
reference = 1, transf = c("no", "mu", "wo"))
{
method <- match.arg(method)
imeth <- match(method, eval(formals()$method))
if (!all(sapply(runs, inherits, what = "geomcmc"))) {
stop ("Input runs is not a list with elements of class geomcmc.")
}
nruns <- length(runs)
if (nruns == 0) stop ("No runs specified")
reference <- as.integer(reference)
if (isTRUE(reference < 1L | reference > nruns)) {
stop("Argument reference does not correspond to a run in runs.")
}
Nout <- sapply(runs, function(x) x$MCMC$Nout)
Nout1 <- getsize(bfsize1, Nout, "*")
Ntot1 <- sum(Nout1)
Nout2 <- Nout - Nout1
Ntot2 <- sum(Nout2)
## Check if fixed phi and omg
if (!all(sapply(runs, function(x) length(x$FIXED$phi) == 1))) {
stop("Each input runs must have a fixed value phi.")
}
if (!all(sapply(runs, function(x) length(x$FIXED$omg) == 1))) {
stop("Each input runs must have a fixed value omg.")
}
## Extract data and model
nm_DATA <- c("response", "weights", "modelmatrix", "offset", "locations",
"longlat")
nm_MODEL <- c("family", "corrfcn", "betm0", "betQ0", "ssqdf", "ssqsc",
"tsqdf", "tsqsc", "dispersion")
DATA <- runs[[1]]$DATA[nm_DATA]
MODEL <- runs[[1]]$MODEL[nm_MODEL]
if (nruns > 1) {
for (i in 2:nruns) {
if (!identical(runs[[i]]$DATA[nm_DATA], DATA)) {
stop("MCMC chains don't all correspond to the same data.")
}
if (!identical(runs[[i]]$MODEL[nm_MODEL], MODEL)) {
stop("MCMC chains don't all correspond to the same model.")
}
}
}
y <- DATA$response
n <- as.integer(length(y))
l <- DATA$weights
F <- DATA$modelmatrix
offset <- DATA$offset
p <- NCOL(F)
loc <- DATA$locations
dm <- sp::spDists(loc, longlat = DATA$longlat)
family <- MODEL$family
## ifam <- .geoBayes_family(family)
corrfcn <- MODEL$corrfcn
icf <- .geoBayes_correlation(corrfcn)
betm0 <- MODEL$betm0
betQ0 <- MODEL$betQ0
ssqdf <- MODEL$ssqdf
ssqsc <- MODEL$ssqsc
tsqdf <- MODEL$tsqdf
tsqsc <- MODEL$tsqsc
dispersion <- MODEL$dispersion
## Choose sample
getsample <-
transfsample(runs,
list(response = y, family = family), transf)
sample <- matrix(unlist(getsample$sample), n)
itr <- getsample$itr
transf <- getsample$transf
real_transf <- getsample$real_transf
ifam <- getsample$ifam
## Skeleton points
phi_pnts <- as.double(sapply(runs, function(r) r$FIXED$phi))
omg_pnts <- as.double(sapply(runs, function(r) r$FIXED$omg))
nu_pnts <- as.double(sapply(runs, function(r) r$FIXED$linkp_num))
if (.geoBayes_corrfcn$needkappa[icf]) {
kappa_pnts <- sapply(runs, function(r) r$FIXED$kappa)
kappa_pnts <- .geoBayes_getkappa(kappa_pnts, icf)
} else {
kappa_pnts <- rep(0, nruns)
}
bfroutine <- paste0("bfsp_", real_transf)
if (nruns == 1) {
MCMC <- runs[[1]]$MCMC
out <- list(logbf = 1, logLik1 = MCMC$logLik[1:Ntot1],
logLik2 = MCMC$logLik[-(1:Ntot1)],
isweights = rep.int(0, Ntot2),
controlvar = matrix(1, Ntot2, 1),
z = sample[[1]][, -(1:Ntot1), drop = FALSE],
N1 = Nout1, N2 = Nout2,
betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf,
ssqsc = ssqsc, tsqdf = tsqdf, tsqsc = tsqsc,
dispersion = dispersion, response = y,
weights = l, modelmatrix = F, offset = offset,
locations = loc, longlat = DATA$longlat,
distmat = dm,
family = family,
referencebf = 0, corrfcn = corrfcn, transf = transf,
real_transf = real_transf, itr = itr,
pnts = list(nu = nu_pnts, phi = phi_pnts, omg = omg_pnts,
kappa = kappa_pnts))
return(out)
}
## Split the sample
sel1 <- rep(rep(c(TRUE, FALSE), nruns), rbind(Nout1, Nout2))
z1 <- sample[, sel1, drop = FALSE]
z2 <- sample[, !sel1, drop = FALSE]
logbf <- numeric(nruns)
lglk1 <- matrix(0., Ntot1, nruns)
lglk2 <- matrix(0., Ntot2, nruns)
zcv <- matrix(0., Ntot2, nruns)
weights <- numeric(Ntot2)
if (ifam == 0) {
tsq <- tsqsc
} else {
tsq <- dispersion
}
RUN <- .Fortran(bfroutine,
weights = weights,
zcv = zcv,
logbf = logbf,
lglk1 = lglk1,
lglk2 = lglk2,
as.double(phi_pnts), as.double(omg_pnts),
as.double(nu_pnts), as.double(z1),
as.integer(Nout1), as.integer(Ntot1),
as.double(z2), as.integer(Nout2), as.integer(Ntot2),
as.double(y), as.double(l), as.double(F), as.double(offset),
as.double(dm), as.double(betm0), as.double(betQ0),
as.double(ssqdf), as.double(ssqsc), max(tsqdf, 0),
as.double(tsq), as.double(kappa_pnts), as.integer(icf),
as.integer(n), as.integer(p), as.integer(nruns),
as.integer(ifam), as.integer(imeth), as.integer(itr),
PACKAGE = "geoBayes")
refbf <- RUN$logbf[reference]
logbf <- RUN$logbf - refbf
if (Ntot2 > 0) {
weights <- RUN$weights
lglk2 <- RUN$lglk2
zcv <- RUN$zcv
} else {
weights <- lglk2 <- zcv <- NULL
}
out <- list(logbf = logbf, logLik1 = RUN$lglk1, logLik2 = lglk2,
isweights = weights, controlvar = zcv, sample2 = z2,
N1 = Nout1, N2 = Nout2,
betm0 = betm0,
betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc, tsqdf = tsqdf,
tsqsc = tsqsc, dispersion = dispersion, response = y, weights = l,
modelmatrix = F, offset = offset,
locations = loc, distmat = dm, family = family,
corrfcn = corrfcn, transf = transf,
real_transf = real_transf, itr = itr,
pnts = list(nu = nu_pnts, phi = phi_pnts, omg = omg_pnts,
kappa = kappa_pnts))
out
}
|
f5e361091596a72942cea2a64560cafc096886ff
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.compute/man/ec2_register_transit_gateway_multicast_group_sources.Rd
|
cbe9e28000a7b464b382febcffd2ae3af451aaae
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,343
|
rd
|
ec2_register_transit_gateway_multicast_group_sources.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_register_transit_gateway_multicast_group_sources}
\alias{ec2_register_transit_gateway_multicast_group_sources}
\title{Registers sources (network interfaces) with the specified transit
gateway multicast group}
\usage{
ec2_register_transit_gateway_multicast_group_sources(
TransitGatewayMulticastDomainId,
GroupIpAddress = NULL,
NetworkInterfaceIds,
DryRun = NULL
)
}
\arguments{
\item{TransitGatewayMulticastDomainId}{[required] The ID of the transit gateway multicast domain.}
\item{GroupIpAddress}{The IP address assigned to the transit gateway multicast group.}
\item{NetworkInterfaceIds}{[required] The group sources' network interface IDs to register with the transit
gateway multicast group.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Registers sources (network interfaces) with the specified transit gateway multicast group.
See \url{https://www.paws-r-sdk.com/docs/ec2_register_transit_gateway_multicast_group_sources/} for full documentation.
}
\keyword{internal}
|
bc040a220a429816b8bf3ec8d26b6d9841eda07c
|
a7b930d1da4595b48c4a15043350fd99f6e96923
|
/get_con_sig_DG.R
|
c0136633465120babf074d48f98b2bf3199e6358
|
[] |
no_license
|
lazywolf007/Deepm6A
|
f84e64854d392aadd394c585888c769d8af4789a
|
95d704d6042e1ebc5365c77b5885eeaa893fd7ef
|
refs/heads/master
| 2021-09-23T10:58:50.013295
| 2018-09-21T18:18:58
| 2018-09-21T18:18:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,692
|
r
|
get_con_sig_DG.R
|
## run rwr ----------------------------------------------------------------------------
sigDG_test <- function(filepath, n) {
setwd(filepath)
load("F:/MeRIPSinglebase/review1/result/Pro_Dis.RData")
load("ag0.RData")
source("F:/MeRIPSinglebase/metdb2/process/rwrhFun.R")
mmpath <- "rwrmm.RData"
x <- read.table("F:/MeRIPSinglebase/review1/result/gene_id1_new.txt")
siggene <- as.character(x$V1)
siggene <- siggene[is.element(siggene, id)]
sigdis <- sigdis[is.element(sigdis$gene, id),]
re <- rwrh(siggene, mmpath, sigdis$doid)
re0 <- list()
filepath <- "disease/randomm"
for(i in 1:100) {
mmpath <- paste(filepath, "/rwrmm", i, ".RData", sep = "")
re0[[i]] <- rwrhr(siggene, mmpath, sigdis$doid)
print(i)
}
randomdis <- lapply(re0, "[[", 2)
randomdis <- lapply(randomdis, "[", 1:n)
randomdis <- unlist(randomdis)
topdis <- re$topdis[1:n]
pval <- lapply(as.list(topdis), function(x, y){sum(is.element(y,x))/100}, randomdis)
re$topdis[1:n][unlist(pval) < 0.05]
topdis <- re$topdis[1:n][unlist(pval) < 0.05]
topdisgene <- sigdis[is.element(sigdis$disname, topdis),]
topdisgene <- tapply(as.character(topdisgene$gene), as.character(topdisgene$disname), c)
topdisgene <- lapply(topdisgene, function(x){paste(x, collapse = ",")})
topdisgene <- data.frame(dis = names(topdisgene), gene = unlist(topdisgene))
topdisgene <- data.frame(dis = topdis, gene = topdisgene$gene[match(topdis, topdisgene$dis)])
write.table(topdisgene, file = paste("disease/top_", n, "_disgene10.xls", sep = ""),
quote = F, col.names = T, row.names = F, sep = "\t")
return(topdisgene)
}
## get con sig dis ----------------------------------------------------------------------------------------------
n <- 10
filepath <- "F:/MeRIPSinglebase/review1/data/random/biogrid"
biogrid <- sigDG_test(filepath, n)
filepath <- "F:/MeRIPSinglebase/review1/data/random/iRef"
iRef <- sigDG_test(filepath, n)
filepath <- "F:/MeRIPSinglebase/review1/data/random/hint"
hint <- sigDG_test(filepath, n)
filepath <- "F:/MeRIPSinglebase/review1/data/random/multinet"
multinet <- sigDG_test(filepath, n)
id <- c(as.character(biogrid$dis), as.character(iRef$dis),
as.character(hint$dis), as.character(multinet$dis))
ind <- tapply(id, id, length)
condis <- names(ind)[ind == 4]
condis <- biogrid[is.element(biogrid$dis, condis),]
write.table(condis, file = paste("F:/MeRIPSinglebase/review1/result/top_", n, "_disgene.xls", sep = ""),
quote = F, col.names = T, row.names = F, sep = "\t")
|
4d4bd54e94352fde25d751ca1b76afadc5e6263e
|
2b854ae16112437bd5439bcce6ceebe84252e2d2
|
/man/sim.stpp.Rd
|
56f00b60d86cab8ec37729b4b2debf5e1e61f7cc
|
[] |
no_license
|
stpp-GitHub-community/stpp
|
c5c8b06bda8812e076bf99de8e015823f8d9d004
|
62921b83ccbd82c28bcdac05a6f39df578273254
|
refs/heads/master
| 2023-07-08T22:52:21.815171
| 2023-06-30T16:29:58
| 2023-06-30T16:29:58
| 98,971,189
| 12
| 3
| null | 2019-05-03T17:11:17
| 2017-08-01T07:10:56
|
R
|
UTF-8
|
R
| false
| false
| 2,014
|
rd
|
sim.stpp.Rd
|
\name{sim.stpp}
\alias{sim.stpp}
\title{Generate spatio-temporal point patterns}
\description{
Generate one (or several) realisation(s) of a spatio-temporal point process in a region \eqn{S\times T}{S x T}.
}
\usage{
sim.stpp(class="poisson", s.region, t.region, npoints=NULL,
nsim=1, ...)
}
\arguments{
\item{class}{Must be chosen among "poisson", "cluster", "cox", "infectious" and "inhibition".}
\item{s.region}{Two-column matrix specifying polygonal region containing
all data locations.
If \code{s.region} is missing, the unit square is considered.}
\item{t.region}{Vector containing the minimum and maximum values of
the time interval.
If \code{t.region} is missing, the interval \eqn{[0,1]}{[0,1]} is considered.}
\item{npoints}{Number of points to simulate.}
\item{nsim}{Number of simulations to generate. Default is 1.}
\item{...}{Additional parameters related to the \code{class} parameter.
See \code{\link{rpp}} for the Poisson process; \code{\link{rpcp}} for the Poisson
cluster process; \code{\link{rlgcp}} for the Log-Gaussian Cox process;
\code{\link{rinter}} for the interaction (inhibition or contagious)
process and \code{\link{rinfec}} for the infectious process.}
}
\value{
A list containing:
\item{xyt}{Matrix (or list of matrices if \code{nsim}>1)
containing the points \eqn{(x,y,t)}{(x,y,t)} of the simulated point pattern.
\code{xyt} (or any element of the list if \code{nsim}>1) is an object
of the class \code{stpp}.}
\item{s.region, t.region}{Parameters passed in argument.}
}
\author{
Edith Gabriel <edith.gabriel@inrae.fr>
}
\seealso{
\code{\link{rpp}}, \code{\link{rinfec}}, \code{\link{rinter}},
\code{\link{rpcp}} and \code{\link{rlgcp}} for the simulation of
Poisson, infectious, interaction, Poisson cluster and log-gaussian
Cox processes respectively; and \code{\link{plot.stpp}},
\code{\link{animation}} and \code{\link{stan}} for plotting space-time
point patterns. }
|
32e6e5782ce917e82704c1e03b154ffaada1dc14
|
2b3826d7b31c343d1512f062532d6d251e2d9b75
|
/Frequentist-two-stage.R
|
e29f31e2cf4d3bed659c7597daa288d7fba6a0b0
|
[] |
no_license
|
wwyoo/Bayes-two-stage
|
00915d5cd16993a97478d871aaa4da8de8e5580a
|
4a3cc936484d3f5a4f0f1974439f4981afdf4c24
|
refs/heads/master
| 2021-04-26T23:05:29.038464
| 2018-03-05T21:55:00
| 2018-03-05T21:55:00
| 123,930,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,925
|
r
|
Frequentist-two-stage.R
|
#This code implements the frequentist two-stage procedure by Belitser et al. (2012) Optimal
#two-stage procedures for estimating location and size of the
#maximum of a multivariate regression function, in the Annals of Statistics 44, number 6, pages 2850-2876
#The original code was supplied by one of the authors of the paper, and I have modified it for the purpose
#of comparing their procedure with our proposed Bayesian two-stage method
#################################################################################################################
#par(mai=c(0,0.1,0,0.1))
#choose optimal smoothning (bandwidth) of loess by leave-one-out cross validation
optimalband <- function(W, bandwidth){
mse <- rep(0, nrow(W))
for(i in 1:nrow(W)){
testz <- W$z[i]
testy <- W$y[i]
testx <- W$x[i]
trainz <- W$z[-i] #leave one out
trainy <- W$y[-i]
trainx <- W$x[-i]
m <- loess(trainz ~trainx + trainy, degree = 1, span=bandwidth)
testobs <- data.frame(trainx = testx, trainy = testy)
fit <- predict(m, newdata = testobs)
mse[i] <- (fit - testz) ^ 2
}
return(mean(mse))
}
##################################################################################################################
# error standard deviation:
sigma <- 0.1
# true regression function, put in dataframe fframe:
f <- function(x, y){
x <- 2*x - 1
y <- 2*y - 1
(1+exp(-5*(x)^2-2*(y)^4))*(cos(4*x)+cos(5*y))
}
#true mu
xfmax <- 0.5
yfmax <- 0.5
M0 <- f(xfmax, yfmax)
# initial, uniformly spaced data, put in dataframe obsframe:
# total n1*n1 observations
nrep <- 1000
mseF2mu <- rep(0,nrep)
mseF2M <- rep(0, nrep)
n1 <- 30
x <- seq(0,1, length=n1)
y <- seq(0,1, length=n1)
obsframe <- expand.grid(x=x, y=y)
band <- seq(0.01,0.5,by=0.01)
mseband <- rep(0, length(band))
#set.seed(100)
# obsframe$z <- f(obsframe$x, obsframe$y) + sigma*rnorm(n1*n1)
#for(i in 1:length(band)){
# bandwidth <- band[i]
# mseband[i] <- optimalband(W = obsframe, bandwidth = bandwidth)
# print(i)
#}
deltacan1 <- seq(0, 0.2, by = 0.01)[-1]
deltacan2 <- seq(0, 0.2, by = 0.01)[-1]
#deltacan <- expand.grid(deltacan1, deltacan2)
deltacan <- cbind(deltacan1, deltacan2)
ndelta <- nrow(deltacan)
msedeltaF2 <-rep(0, ndelta)
n3 <- 96
#for(j in 1:ndelta){ #to choose optimal localization
delta1 <- 0.06
delta2 <- 0.06
set.seed(100)
for(i in 1:nrep){
obsframe$z <- f(obsframe$x, obsframe$y) + sigma*rnorm(n1*n1)
# local linear regression to find stage one estimator:
m <- loess(obsframe$z ~obsframe$x + obsframe$y, degree =1, span=0.02)
fit <- predict(m)
#persp(x,y, matrix(fit, n1, n1), theta = 50, phi = 20, expand = 0.5, zlab="", xlab="x", ylab="y", main="") -> res
maxindex <- which(fit==max(fit))
xmutilde <- obsframe$x[maxindex]
ymutilde <- obsframe$y[maxindex]
#points(trans3d(xmutilde,ymutilde, f(xfmax, yfmax), pmat=res), col=2, type = "b", pch = 19)
# new data
# new designpoints:
x0 <- xmutilde
y0 <- ymutilde
x1 <- x0+delta1
y1 <- y0
x2 <- x0
y2 <- y0+delta2
x3 <- x0-delta1
y3 <- y0
x4 <- x0
y4 <- y0-delta2
x5 <- x0+delta1
y5 <- y0-delta2
x6 <- x0+delta1
y6 <- y0+delta2
x7 <- x0-delta1
y7 <- y0+delta2
x8 <- x0-delta1
y8 <- y0-delta2
# regressors:
p <- rep(c(x0, x1, x2, x3, x4, x5, x6, x7, x8), n3)
q <- rep(c(y0, y1, y2, y3, y4, y5, y6, y7, y8), n3)
r <- rep(c((x0)^2, (x1)^2, (x2)^2, (x3)^2, (x4)^2, (x5)^2, (x6)^2, (x7)^2, (x8)^2), n3)
s <- rep(c((y0)^2, (y1)^2, (y2)^2, (y3)^2, (y4)^2, (y5)^2, (y6)^2, (y7)^2, (y8)^2), n3)
t <- rep(c(x0*y0, x1*y1, x2*y2, x3*y3, x4*y4, x5*y5, x6*y6, x7*y7, x8*y8), n3)
# new observations:
newz <- f(p,q) + sigma*rnorm(9*n3)
#points(trans3d(c(x0, x1, x2, x3, x4, x5, x6, x7, x8), c(y0, y1, y2, y3, y4, y5, y6, y7, y8), newz, pmat=res), col=8)
a <- lm(newz ~ p+q+r+s+t)$coefficients
# find muhat, the argmax of the quadratic surface
A <- matrix(data=c(2*a[4], a[6], a[6], 2*a[5]), nrow=2, ncol=2, byrow=TRUE)
b <- -c(a[2], a[3])
u <- solve(A,b)
xmuhat <- as.numeric(u[1])
ymuhat <- as.numeric(u[2])
# plot of quadratic surface:
#g <- function(k,l) a[1] + a[2]*k + a[3]*l + a[4]*k^2+a[5]*l^2+a[6]*k*l
#gframe <- expand.grid(v=v, w=w)
#gframe$g <- g(gframe$v, gframe$w)
#persp(v,w, matrix(gframe$g, 51, 51), theta = 30, phi = 30, expand = 0.5, zlab="", xlab="x", ylab="y", main="") -> res
# plot the final estimator:
#points(trans3d(xmuhat,ymuhat, g(xmuhat, ymuhat), pmat=res), col="green", pch=19)
mseF2mu[i] <- sqrt((xmuhat - xfmax)^2 + (ymuhat - yfmax)^2) #produce third box-plot in Figure 2 of our paper
#mseF2M[i] <- abs(a[1] + a[2]*xmuhat + a[3]*ymuhat + a[4]*xmuhat^2 + a[5]*ymuhat^2 + a[6]*xmuhat*ymuhat - M0)
print(i)
}
#msedeltaF2[j] <- mean(mseF2)
#print(j)
#}
|
4d4f438884b895226e74fe1cc6afd9b1c4fc1ecf
|
909041f1adf263aebb16c007915c24ba00a32865
|
/matrices.r
|
1a14023c1dcd76948400457db367954ed3655882
|
[] |
no_license
|
Lyndon0199/Lyndon0199
|
1473965e21674e77096d6939490e72da6a56e9d8
|
d7e8d8f6c22d53f7db4856250dbe223dd925bf06
|
refs/heads/master
| 2020-04-29T18:21:51.259133
| 2019-03-20T19:41:18
| 2019-03-20T19:41:18
| 176,321,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
r
|
matrices.r
|
c(4,2,-8) # Creación de un vector sin asignarlo a una variable
## [1] 4 2 -8
## ----------------
## Distintas formas de asignar un vector a una variable
u <- c(4,2,-8) # Usando el operador <-
c(4, 2, -8) -> v # Usando el operador ->
# Usando la función assign:
assign("w", c(4, 2, -8))
p = c(4, 2, -8) # Usando el operador =
print(u); print(v); print(w); print(p)
## [1] 4 2 -8
## [1] 4 2 -8
## [1] 4 2 -8
## [1] 4 2 -8
|
b0dfa037a8bdd0e491fa6539c25ef6cd1d397526
|
bbb29a9b577c51d93bff33cd37a9bbd97ad1bcfc
|
/man/is.Date.Rd
|
950edc39e97b5d4f11b19eb50f61143857e0bb3d
|
[] |
no_license
|
cran/popEpi
|
8b656a4b7c90a2dea7af6116ac84dee0406f07ce
|
355adafe4d6b9f351d7a17d1e0a839095f7fe417
|
refs/heads/master
| 2023-08-31T11:05:34.587550
| 2023-08-23T14:30:02
| 2023-08-23T15:31:02
| 48,085,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 653
|
rd
|
is.Date.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{is.Date}
\alias{is.Date}
\title{Test if object is a \code{Date} object}
\usage{
is.Date(obj)
}
\arguments{
\item{obj}{object to test on}
}
\value{
`TRUE` if `obj` is of class `"Date"` or `"IDate"`.
}
\description{
Tests if an object is a \code{Date} object and returns
a logical vector of length 1. \code{IDate} objects are also
\code{Date} objects, but \code{date} objects from package \pkg{date}
are not.
}
\seealso{
\code{\link{get.yrs}}, \code{\link{is_leap_year}}, \code{\link{as.Date}}
}
\author{
Joonas Miettinen
}
|
fd59f96dfcb8e55552ec4343e9daaf0df67d9f89
|
3a1e801b771f286e691e5acde34e0e73841b5756
|
/mpg-envinfosys-teams-2018-rs_18_mcfest-master/src/017_plots.R
|
0e0716f6d2cb61d9fb27ff56485a60e78592b897
|
[] |
no_license
|
Muesgen/Mcfest_Remote_Sensing_GIS
|
6cf26150e3441834271644eb215b6f732fa714ad
|
c8f0c291d765eda140f9faea342f2a0e8fbaaf6e
|
refs/heads/master
| 2021-01-16T07:46:17.005598
| 2020-02-25T15:10:38
| 2020-02-25T15:10:38
| 243,029,197
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,527
|
r
|
017_plots.R
|
root_folder <- envimaR::alternativeEnvi(root_folder = "~/edu/mpg-envinsys-plygrnd", alt_env_id = "COMPUTERNAME",
alt_env_value = "PCRZP", alt_env_root_folder = "F:\\edu\\mpg-envinsys-plygrnd")
source(paste0(root_folder, "/mpg-envinfosys-teams-2018-rs_18_mcfest/src/000_setup.R"))
####Confusion Matrix####
library(scales)
library(ggplot2)
ggplotConfusionMatrix <- function(m, mod){
mytitle <- paste("External Accuracy", percent_format()(m$overall[1]),
"Internal Accuracy", percent_format()(max(mod$results$Accuracy)), "\n",
"External Kappa", percent_format()(m$overall[2]),
"Internal Kappa", percent_format() (max(mod$results$Kappa)))
p <- ggplot(data = as.data.frame(m$table) ,
aes(x = Reference, y = Prediction)) +
geom_tile(aes(fill = log(Freq)), colour = "white") +
scale_fill_gradient(low = "white", high = "steelblue") +
geom_text(aes(x = Reference, y = Prediction, label = Freq)) +
theme(legend.position = "none", plot.title = element_text(hjust=0.5, face = "bold", size = 20), axis.text = element_text(size = 18),
axis.title.x = element_text(size = 20, margin = margin(t = 10, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 20, margin = margin(t = 0, r = 20, b = 0, l = 0)), title = element_text(size = 24),
panel.background = element_rect(fill = "#d8d8d8", colour = "#d8d8d8", size = 0.5, linetype = "solid"),
plot.background = element_rect(fill = "#d8d8d8")) +
ggtitle(mytitle)
return(p)
}
names <- c("Beech", "Douglas fir", "Spruce", "Larch", "Oak")
mod8 <- readRDS(paste0(envrmt$path_data_training, "mod8.rds"))
conf8 <- readRDS(paste0(envrmt$path_data_training, "confmod8.rds"))
colnames(conf8$table) <- names
rownames(conf8$table) <- names
ggplotConfusionMatrix(conf8, mod8)
mod9 <- readRDS(paste0(envrmt$path_data_training, "mod9.rds"))
conf9 <- readRDS(paste0(envrmt$path_data_training, "confmod9.rds"))
colnames(conf9$table) <- names
rownames(conf9$table) <- names
x <- ggplotConfusionMatrix(conf9, mod9)
png(paste0(envrmt$path_data_plots, "conf8.png"), res=200, width=10, height = 8, units = "in")
print(ggplotConfusionMatrix(conf8, mod8))
dev.off()
png(paste0(envrmt$path_data_plots, "conf9.png"), res=200, width=10, height = 8, units = "in")
print(ggplotConfusionMatrix(conf9, mod9))
dev.off()
####Species Acc####
csegs8 <- raster::shapefile(paste0(envrmt$path_data_mof, "cseg_stats_mod8.shp"))
csegs9 <- raster::shapefile(paste0(envrmt$path_data_mof, "cseg_stats_mod9.shp"))
unique(csegs8@data[which(csegs8@data$spec %in% unique(csegs8@data$spec)[1:5]), c(14, 16)])
unique(csegs9@data[which(csegs9@data$spec %in% unique(csegs9@data$spec)[1:5]), c(16, 17)])
####Var Importance####
mod8 <- readRDS(paste0(envrmt$path_data_training, "mod8.rds"))
conf8 <- readRDS(paste0(envrmt$path_data_training, "confmod8.rds"))
x <- caret::varImp(mod8)
for (i in seq(nrow(x$importance))){
x$importance$mean[i] <- rowMeans(x$importance[i,1:5])
}
write.table(x = x$importance, file = paste0(envrmt$path_data_training , "conf8imp.csv"), sep = ";", dec = ".")
mod9 <- readRDS(paste0(envrmt$path_data_training, "mod9.rds"))
conf9 <- readRDS(paste0(envrmt$path_data_training, "confmod9.rds"))
x <- caret::varImp(mod9)
for (i in seq(nrow(x$importance))){
x$importance$mean[i] <- rowMeans(x$importance[i,1:5])
}
write.table(x = x$importance, file = paste0(envrmt$path_data_training , "conf9imp.csv"), sep = ";", dec = ".")
|
90ddb9b105e28ee9b457e901f9d07141802eecf6
|
b0a4381263dc3eb2e5b30035d74ab2169e29ea5e
|
/3_montecarlo/taskPath/BIN/R/analysis.R
|
9fdf678f95fc9c13f0e4e0dbf066d431d8b1d2c8
|
[] |
no_license
|
ynagae1tryeting/lab
|
534eec361180287b68d26447286d1f0014e98a09
|
a66c8750bf5d3dc91866113ecea6b7b3e8f48c36
|
refs/heads/master
| 2021-01-12T03:47:28.073605
| 2019-05-09T07:14:09
| 2019-05-09T07:14:09
| 78,257,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,025
|
r
|
analysis.R
|
source("./function.R")
library(lattice)
library(tidyverse)
percent <- 0.2
centers2percent <- function(x, d, percent, thr){
for(N in 0:(nrow(d)/2)){
#message(paste(N, " / ", nrow(d)/2, sep=""))
# 該当の座標に入っている記号を抽出
i <- x[1]
j <- x[2]
i_1 <- seq(i-N, i+N)
j_1 <- seq(j-N, j+N)
i_2 <- seq(i-N-1, i+N+1)
j_2 <- seq(j-N-1, j+N+1)
# 周期境界を超える配置を規格化
if(sum(i_1[i_1 > nrow(d)]) != 0){
i_1[i_1 > nrow(d)] <- i_1[i_1 > nrow(d)] - nrow(d)
}
if(sum(j_1[j_1 > nrow(d)]) != 0){
j_1[j_1 > nrow(d)] <- j_1[j_1 > nrow(d)] - nrow(d)
}
if(sum(i_2[i_2 > nrow(d)]) != 0){
i_2[i_2 > nrow(d)] <- i_2[i_2 > nrow(d)] - nrow(d)
}
if(sum(j_2[j_2 > nrow(d)]) != 0){
j_2[j_2 > nrow(d)] <- j_2[j_2 > nrow(d)] - nrow(d)
}
index <- 0
for(I in i_1){
for(J in j_1){
index <- c(index, D[((D[,1]==c(I)) * (D[,2]==J))==1 ,3])
}
}
index <- index[-1]
N_Sn_1 <- sum(index==0) * 1 + sum(index==1) * 2
N_Si_1 <- sum(index==-1) * 2 + sum(index==0) * 1
index <- 0
for(I in i_2){
for(J in j_2){
index <- c(index, D[((D[,1]==c(I)) * (D[,2]==J))==1 ,3])
}
}
index <- index[-1]
N_Sn_2 <- sum(index==0) * 1 + sum(index==1) * 2
N_Si_2 <- sum(index==-1) * 2 + sum(index==0) * 1
# N_Sn_1とN_Sn2が同じ時、Siに囲まれていると判断してループを終了する
if( (N_Sn_2 - N_Sn_1) <= thr ){
break
}
}
# クラスタに含まれている原子数を返す
res <- data.frame(
"N_Si"=N_Si_1,
"N_Sn"=N_Sn_1,
"atoms"=N_Si_1 + N_Sn_1,
"percent"=(N_Sn_1 / (N_Sn_1 + N_Si_1))
)
return(res)
}
for(N in seq(10, 200, 10)){
message(paste("STEP: ", N, sep=""))
# d: MCSを経たcell matrix
d <- as.matrix(read.csv(paste("./cell/",percent*100,"_percent/step",N,".csv", sep="")))
colnames(d) <- NULL
D <- melt(d)
# Si*2: -1
# SiSn: 0
# Sn*2: 1
# D[((D[,1]==c(1,2,3)) * (D[,2]==j))==1 ,3]
D_ <- D[D[,3]!=-1,] # Snが入っているところだけを抽出
# クラスタ中心を探索
k <- kmeans(
D_[,1:2],
centers=round(nrow(d)*ncol(d)*percent,0)/2,
iter.max = 10000,
nstart = 1)
f <- function(x, d){
i <- x[1]
j <- x[2]
return(d[i, j])
}
centers <- unique(round(k$centers,0)[,1:2])
centers <- data.frame(
centers,
apply(centers, 1, f, d)
)
centers <- centers[centers[,3]!=-1,1:2]
r <- apply(centers, 1, centers2percent, d, percent, 3)
label <- matrix(unlist(r),
ncol=4, byrow=T)
write.csv(label, paste("./ana/",percent*100,"_percent/step", N, ".csv", sep=""), quote=F, row.names=F)
}
|
e0f84804abcf3a9bc5650bca84179a9bd47ab0ea
|
41f4d1678e0ffdcab68f16d67a2518f6e0bc9615
|
/Exploratory Data Analysis/Proyect 2/Load_Data.R
|
6afcfbd433e52d9105677885af8d1a417bf69756
|
[] |
no_license
|
cjferba/-datasciencecoursera-repo
|
659a2dd30072655413ccc6d3af30f2f431e55303
|
1e628074c7af985002233155ffef1cbc836f784a
|
refs/heads/master
| 2021-01-22T14:02:07.923763
| 2015-05-26T19:19:43
| 2015-05-26T19:19:43
| 29,011,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
Load_Data.R
|
library(ggplot2)
library(plyr)
#Load data if no exist
if (!"neiData" %in% ls()) {
neiData <- readRDS("data/summarySCC_PM25.rds")
}
if (!"sccData" %in% ls()) {
sccData <- readRDS("./data/Source_Classification_Code.rds")
}
#show neiData
head(neiData)
#show sccData
head(sccData)
print(paste("Dimension of NEI Data: ",dim(neiData)[1],dim(neiData)[2] ))
print(paste("Dimension of SCC Data: ",dim(sccData)[1],dim(sccData)[2] ))
|
1d9b192c72bd061fb0870d7de7cbf7c2ed29096c
|
e8bbc985eccb3f11e01cceabc9672c8e5dc3a6ef
|
/man/RAEN.Rd
|
341e541e860f259842f5825558bc34aa713bf9b9
|
[] |
no_license
|
cran/RAEN
|
702ff8201edf0525230bebc5dc0332d452069107
|
41e8ff7ed82ede5cd70de1bf1e081687de104d9f
|
refs/heads/master
| 2023-03-06T09:57:59.866709
| 2021-02-21T05:00:16
| 2021-02-21T05:00:16
| 334,182,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,525
|
rd
|
RAEN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RAEN.R
\name{RAEN}
\alias{RAEN}
\alias{predict.RAEN}
\title{Random Ensemble Variable Selection for High Dimensional Data}
\usage{
RAEN(
x,
y,
B,
ngrp = floor(15 * ncol(x)/nrow(x)),
parallel = TRUE,
family = "competing",
ncore = 2
)
\method{predict}{RAEN}(object, newdata, ...)
}
\arguments{
\item{x}{the predictor matrix}
\item{y}{the time and status object for survival}
\item{B}{times of bootstrap}
\item{ngrp}{the number of blocks to separate variables into. Default is 15*p/N, where p is the number of predictors and N is the sample size.}
\item{parallel}{Logical TRUE or FALSE. Whether to use multithread computing, which can save consideratable amount of time for high dimensional data. Default is TRUE.}
\item{family}{what family of data types. Default is 'competing'. Quantile regression for competing risks will be available through the developmental version on github}
\item{ncore}{Number of cores used for parallel computing, if parallel=TRUE}
\item{object}{the RAEN object containing the variable selection results}
\item{newdata}{the predictor matrix for prediction}
\item{...}{other parameters to pass}
}
\value{
a dataframe with the variable names and the regression coefficients
the linear predictor of the outcome risk
}
\description{
Perform variable selection for high dimensional data
}
\examples{
\donttest{
library(RAEN)
data(toydata)
x=toydata[,-c(1:2)]
y=toydata[,1:2]
fgrp<-deCorr(x, ngrp=20)
}
}
|
5b0dffb7cc5e9cd9a33a09b67d0734a01161d837
|
09c4fac6304698cd65c9687c40434aec2801cd83
|
/app/rScripts/removeUnknownUsStates.R
|
41f8c989c32b73db22c381d469bbf44db130d817
|
[
"Apache-2.0"
] |
permissive
|
umbertoDifa/VizProject1
|
8bc97b85d95dc808fbd073cdbe4e1f71b0878d62
|
1144359f4d5329650fd51145b924323360b59468
|
refs/heads/master
| 2021-01-10T02:02:46.696003
| 2015-12-24T11:54:39
| 2015-12-24T11:54:39
| 48,537,879
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
removeUnknownUsStates.R
|
toRemove<-setdiff(usSbagliati,usGiusti) #giusti sono pochi,sbagliati sono di piu
resto<-setdiff(usSbagliati,toRemove)
newUs<-us.state.names[us.state.names[,"name"] %in% resto,]
#write.csv(newUs, file = "us-state-names.tsv",row.names=FALSE)
write.table(newUs, file='us-state-names.tsv', quote=FALSE, sep='\t',row.names=FALSE)
|
345e2beb3d5747760ecc1cb98ac913cee35a08ba
|
84108ae54987955233447c20e1dc4ddba272a42f
|
/R/Rexam/day10/lab12.R
|
9e37f5b44484d3379b9a4535207db7a48afb1880
|
[] |
no_license
|
MyChoYS/K_TIL
|
bb82554a6b68f9d3fcd39c9031331ea4df0d8716
|
8417a6f22addcfe08bcb708d61c91091f702b2cb
|
refs/heads/master
| 2023-04-24T05:58:29.253920
| 2021-05-07T06:13:34
| 2021-05-07T06:13:34
| 325,244,373
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
lab12.R
|
#문제1
v1 <- c("Happy","Birthday","to","you")
length(v1)+sum(nchar(v1))
str_count()
#문제2
v2 <- paste(v1[1],v1[2],v1[3],v1[4])
length(v2) + nchar(v2)
#문제3
paste(LETTERS,1:10)
paste(LETTERS,1:10,sep = "")
#문제4
v4 <- "Good Morning"
v4 <- list(strsplit(v4," ")[[1]][1],strsplit(v4," ")[[1]][2])
library (stringr) ###
str_sub(v4,1,4)
#문제5
v5 <- c("Yesterday is histroy, tommrrow is a mystery, today is a gift!",
"That's why we call it the present - from kung fu panda")
v5 <- gsub("[[:punct:]]","",v5)
v5 <- unlist(strsplit(v5," "))
v5
#문제6
s1 <- "@^^@Have a nice day!! 좋은 하루!! 오늘도 100점 하루...."
r1 <- gsub("[가-힣]", "", s1)
r2 <- gsub("[[:punct:]]","",s1)
r3 <- gsub("[[:punct:]가-힣]","",s1)
r4 <- gsub("100","백",s1)
#문제7*****
library(KoNLP)
hotel1 <- scan("output/hotel.txt", what="")
#Filter(function(x) {nchar(x) >= 2}, hotel2) 단어 두글자 이상으로 제한
hotel2<- Filter(function(x) {nchar(x) >= 2}, gsub("[[:cntrl:][:punct:]]","",unlist(extractNoun(hotel1))))
wcount <- sort(table(hotel2),decreasing = T)[1:10]
df <- data.frame(wcount )
colnames(df) <- c("wname","wcount")
View(df)
write.csv(df,file = "output/hotel_top_word.csv")
|
783a5ee90442fe1f0e88f814a091573a4228ad11
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.machine.learning/man/comprehend_delete_document_classifier.Rd
|
6bc7c26d285600d313eb466750fb826b43efa2b6
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,077
|
rd
|
comprehend_delete_document_classifier.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehend_operations.R
\name{comprehend_delete_document_classifier}
\alias{comprehend_delete_document_classifier}
\title{Deletes a previously created document classifier}
\usage{
comprehend_delete_document_classifier(DocumentClassifierArn)
}
\arguments{
\item{DocumentClassifierArn}{[required] The Amazon Resource Name (ARN) that identifies the document classifier.}
}
\value{
An empty list.
}
\description{
Deletes a previously created document classifier
Only those classifiers that are in terminated states (IN_ERROR,
TRAINED) will be deleted. If an active inference job is using the model,
a \code{ResourceInUseException} will be returned.
This is an asynchronous action that puts the classifier into a DELETING
state, and it is then removed by a background job. Once removed, the
classifier disappears from your account and is no longer available for
use.
}
\section{Request syntax}{
\preformatted{svc$delete_document_classifier(
DocumentClassifierArn = "string"
)
}
}
\keyword{internal}
|
3c5acdc3fdf4beaed6578cbe9d076c5e4353f58c
|
9e9034817ae36de0c220a18d8130f9f5099bd2a6
|
/Kmeans_FeatureEngineering_Deeplearning.R
|
3f286d1a6b00586ebbcefb7d406e11c30cf8fd33
|
[] |
no_license
|
karthikmohan23/Machine-Learning
|
3c5888a101154912d5587dce6c3fe7a3a9f022eb
|
c2c4f99b2c19cdec4090fcf5fe280e575cbc24b7
|
refs/heads/master
| 2021-04-15T13:06:16.188029
| 2019-07-17T16:20:30
| 2019-07-17T16:20:30
| 126,626,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,985
|
r
|
Kmeans_FeatureEngineering_Deeplearning.R
|
library(onehot)
library(caret)
library(rpart)
library(rpart.plot)
library(fastICA)
library(h2o)
library(plyr)
HR <- read.csv('D:/UTD MSBA/Fall 2017 (Sem 3)/Machine Learning/Project 4/HR_comma_sep.csv',header = TRUE)
HR.f <- HR
HR.f$left <- NULL
#one hot encoding
data_ <- onehot(HR.f,stringsAsFactors = FALSE,addNA = FALSE,max_levels = 20)
data <- as.data.frame(predict(data_,HR.f))
head(data)
#scaling
maxs <- apply(data, 2, max)
mins <- apply(data, 2, min)
scaled <- as.data.frame(scale(data, center = mins, scale = maxs - mins))
dim(scaled)
summary(scaled)
set.seed(123)
# Compute and plot within cluster Sum of squares for k = 1 to k = 15.
k <- 15
wss <- sapply(2:k,function(k){kmeans(scaled, k, nstart=10,iter.max = 15 )$tot.withinss})
#tot.withinss or total within-cluster sum of square should be as small as possible
plot(2:k, wss,type="b", pch = 19, frame = FALSE,xlab="Number of clusters K",ylab="Total within-clusters sum of squares")
res = kmeans(scaled,9)
table(HR$left,res$cluster)
#(between_SS / total_SS) should be high - it explains the total variation
#--------------------------Car----------------
set.seed(123)
cardata <- read.csv("D:/UTD MSBA/Fall 2017 (Sem 3)/Machine Learning/Project 4/car_data.csv", header=TRUE)
cardata.f <- cardata
cardata.f$car <- NULL
#one hot encoding
one_hot_encoding = function(dat){
t = onehot(dat,stringsAsFactors = FALSE,addNA = FALSE,max_levels = 20)
t = as.data.frame(predict(t,dat))
return(t)
}
#Scaling
scaling_data = function(dat){
maxs <- apply(dat, 2, max)
mins <- apply(dat, 2, min)
dat <- as.data.frame(scale(dat, center = mins, scale = maxs - mins))
return(dat)
}
#Elbow Graph
get_elbow_graph <- function(dat){
k <- 15
wss_car <- sapply(1:k,function(k){kmeans(dat, k)$tot.withinss})
wss_car
plot(1:k, wss_car,type="b", pch = 19, frame = FALSE,xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
}
data = one_hot_encoding(cardata.f)
scaled_car = scaling_data(data)
set.seed(123)
sprintf("Initial K-Means")
get_elbow_graph(scaled_car)
k_means_org_features = kmeans(scaled_car,3)
k_means_org_features
#Nueral Networks with Cluster features
clust_features = one_hot_encoding(as.data.frame(as.factor(k_means_org_features$cluster)))
colnames(clust_features) = c("cluster 1","cluster 2","cluster 3","cluster 4", "cluster 5", "cluster 6", "cluster 7","cluster 8")
h2o.init(ip = "localhost", port = 54321, max_mem_size = "4000m")
clust_features$car <- as.factor(cardata$car)
splits <- h2o.splitFrame(as.h2o(clust_features), c(0.6,0.19,0.2), seed=1234)
train <- h2o.assign(splits[[1]], "train.hex") # 60%
valid <- h2o.assign(splits[[2]], "valid.hex") # 19%
test <- h2o.assign(splits[[3]], "test.hex") # 20%
response <- "car"
predictors <- setdiff(names(clust_features), response)
predictors
m1 <- h2o.deeplearning(
training_frame=train,
validation_frame=valid, ## validation dataset: used for scoring and early stopping
x=predictors,
y=response,
activation="Rectifier",
hidden=c(100,100,100),
epochs=10,
nfolds = 5,
seed = 123,
variable_importances=T,
l2 = 6e-4,
loss = "CrossEntropy",
distribution = "bernoulli",
stopping_metric = "misclassification"
)
pred = h2o.predict(m1,train)
accuracy = pred$predict == train$car
err_rates = 1 - mean(accuracy)
sprintf("Train Error: %f",err_rates)
pred = h2o.predict(m1,test)
accuracy = pred$predict == test$car
test_err_rates = 1 - mean(accuracy)
sprintf("Test Error: %f",test_err_rates)
#PCA
set.seed(123)
pr = princomp(scaled_car,scores = TRUE)
pr_cardata = pr$scores[,1:14]
sprintf("After PCA")
get_elbow_graph(pr_cardata)
k_means_pca_features = kmeans(pr_cardata,3)
k_means_pca_features
#ICA
set.seed(123)
ic = fastICA(scaled_car, n.comp = 10, alg.typ = "parallel", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200,
tol = 0.0001, verbose = FALSE)
ica_data = ic$S
sprintf("After ICA")
get_elbow_graph(ica_data)
k_means_ica_features = kmeans(ica_data,12)
k_means_ica_features
#RCA
random.component.selection <- function(d=2, d.original=10) {
selected.features <- numeric(d);
n.feat <- d.original+1;
feat <- floor(runif(1,1,n.feat));
selected.features[1] <- feat;
for (i in 2:d) {
present <- TRUE;
while(present) {
feat <- floor(runif(1,1,n.feat));
for (j in 1:(i-1)) {
if (selected.features[j] == feat)
break;
}
if ((j==i-1) && (selected.features[j] != feat)) {
present<-FALSE;
selected.features[i] <- feat;
}
}
}
selected.features
}
random_projection <- function(d, m, scaling=FALSE){
d.original <- nrow(m);
if (d >= d.original)
stop("random.subspace: subspace dimension must be lower than space dimension", call.=FALSE);
# generation of the vector selected.features containing the indices randomly selected
selected.features <- random.component.selection(d, d.original);
# random data projection
if (scaling == TRUE)
reduced.m <- sqrt(d.original/d) * m[selected.features,]
else
reduced.m <- m[selected.features,];
reduced.m
}
m = as.matrix(t(scaled_car))
rp_features = as.data.frame(t(random_projection(20, m)))
sprintf("After RCA")
get_elbow_graph(rp_features)
k_means_rp_features = kmeans(rp_features,8)
k_means_rp_features
#Feature Selection (Decision Tree)
projecttree <- rpart(car~.,data=cardata,method="class",parms = list(split = "information"))
sig_vars = names(projecttree$variable.importance)
data = one_hot_encoding(cardata[,sig_vars])
scaled_car = scaling_data(data)
get_elbow_graph(scaled_car)
k_means_f_select = kmeans(scaled_car,6)
k_means_f_select
#output column is appended again as it was dropped from original dataset
#scaled_car$car <- car$car
#head(scaled_car)
#levels(scaled_car$car)[levels(scaled_car$car)%in%c("acc","good","vgood")] <- 0
#levels(scaled_car$car)[levels(scaled_car$car)%in%c("unacc")] <- 1
# Compute and plot wss for k = 1 to k = 15.
|
52fcdb771bb92772c0f88455a73ef5275d666974
|
6c1986518bcdd685e8c2c57699259f5c3af5cf7f
|
/scr/task.R
|
fa074822650662259a05f9f58596170d383978ed
|
[
"MIT"
] |
permissive
|
dacero21/clase-9
|
171ce8899271cb16ea588a24ac201a0381cc1461
|
f7eee2c621aa4ab7f1691042f7ccced188c856ab
|
refs/heads/master
| 2023-04-07T00:00:06.157844
| 2021-04-15T01:26:01
| 2021-04-15T01:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
task.R
|
#==============================================================================#
# Autor(es): Eduard Martinez
# Colaboradores:
# Fecha creacion: 17/03/2019
# Fecha modificacion: 17/03/2021
# Version de R: 4.0.3.
#==============================================================================#
# intial configuration
rm(list = ls()) # limpia el entorno de R
pacman::p_load(tidyverse,data.table,readxl) # cargar y/o instalar paquetes a usar
#----------------------------------#
# Importar archivos usando un loop #
#----------------------------------#
# crear vector con ruta de objetos a cargar
list.files(path = "data/input" , full.names = T)
files = list.files(path = "data/input" , full.names = T)
# lista para almacenar bases de datos
lista_data = list()
lista_data
# Loop
conteo = 1 # Para contar numero iterraciones
for (i in files){
lista_data[[conteo]] = read_excel(path = i)
conteo = conteo + 1
}
# exportar lista
saveRDS(lista_data,"data/output/lista siedco.rds")
#----------------------------------#
# Importar archivos usando un loop #
#----------------------------------#
# Importar archivo "lista siedco.rds" de data/output
ldata = readRDS("data/output/lista siedco.rds")
# Verificar visualmente los datos
df1 = ldata[[1]]
df10 = ldata[[10]]
# Limpiar una base de datos
df_i = ldata[[4]]
df_i = subset(df_i,is.na(...2)==F) # elimino observaciones no relevantes
colnames(df_i) = df_i[1,] %>% as.character() # Cambiar nombres
df_i = df_i[-1,]
# Generalizar el paso anterior en una funcion
f_clean = function(i){
df_i = ldata[[i]]
df_i = subset(df_i,is.na(...2)==F) # elimino observaciones no relevantes
colnames(df_i) = df_i[1,] %>% as.character() # Cambiar nombres
df_i = df_i[-1,]
return(df_i)
}
# aplicar la funcion
data = lapply(1:14, function(z) f_clean(i = z))
# veamos los elementos de la lista
dc1 = data[[1]]
dc10 = data[[10]]
# apliar en un dataframe
dataframe = rbindlist(l = data,use.names = ,fill = T)
|
c35d67917d6d7646f8aa7d4ef40a72c5fd3853e2
|
43efc50cd5007dd709bcb0bd8efc08b0520bdb00
|
/obj.R
|
596a3ed7feff7e37d80dc1a8cc80a735e20eaae7
|
[] |
no_license
|
Dmohit/dive
|
910f39edd2488ddf6c037e4ea138c7e9650ae476
|
bde2558aa98fc532c67dcdc109236de1b285e069
|
refs/heads/master
| 2021-01-21T12:47:24.900186
| 2016-04-03T07:11:34
| 2016-04-03T07:11:34
| 5,536,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
obj.R
|
proj <- function() {
v <- vector('list',length=10)
names(v) <- c('status','proj','ID','fixed','target','wtf','wtt','wtr','olds','current')
class(v) <- 'proj'
return(v)
}
init.proj <- function(v) {
|
08d448a4dc73263e5a09ccfecbbfc239248b8c5e
|
52c6d03d92444b773d3f57bf1e0dd09bd8cf0057
|
/data/journey-planner/import-restaurants.R
|
bb16aca4c1c6054869d5b5a4f808aff1fc3f80cd
|
[] |
no_license
|
katossky/katossky.github.io
|
fa59fd6b86ff70d584abd4c0543b9b87ee40f39d
|
b8dfb6ad19f94c69b1050df67ed8386d2c344729
|
refs/heads/master
| 2020-05-22T01:28:03.119849
| 2016-10-10T07:38:59
| 2016-10-10T07:38:59
| 55,267,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,012
|
r
|
import-restaurants.R
|
setwd('~/Projets/katossky.github.io/data/journey-planner')
library(rvest)
library(plyr)
library(dplyr)
library(tidyr)
library(stringr)
library(jsonlite)
library(data.table)
# SCRAPING ---------------------------------------------------------------------
get_michelin_restaurants <- function(top, bottom, left, right){
url <- paste0(
'http://www.viamichelin.com/web/Recherche_Restaurants?',
'geoboundaries=', bottom, ',', left,':',top,',', right
)
michelin <- read_html(url)
page_tabs <- michelin %>% html_nodes('.pagination-container a')
nb_pages <- page_tabs %>% html_text() %>% as.integer %>% max
Restaurants <- vector(mode='list', length=nb_pages)
Restaurants[[1]] <- michelin %>% html_nodes('li.poi-item')
for(page in 2:nb_pages){
sleep <- runif(1, min = 0, max = 1) %>% round(2)
Sys.sleep(sleep)
cat('Slept', sleep, 'seconds. Treating page', page, 'of', nb_pages, fill=TRUE)
michelin <- read_html(paste0(url, '&page=', page))
Restaurants[[page]] <- michelin %>% html_nodes('li.poi-item')
}
return(Restaurants)
}
Restaurants <- c(
get_michelin_restaurants(70, 50, 0, 20), # Scandinavia + Germany + North France
get_michelin_restaurants(50, 40,-10, 10) # France + North Spain
)
Restaurants <- do.call(c, Restaurants)
class(Restaurants) <- 'xml_nodeset'
# FORMATTING -------------------------------------------------------------------
prices <- Restaurants %>% html_nodes(css='.poi-item-price em') %>% html_text %>%
unlist %>% str_extract('^[0-9]+') %>% as.integer %>%
matrix(ncol=2, byrow=TRUE)
RESTAURANTS <- data.table(
name = Restaurants %>% html_nodes(css='.poi-item-name') %>% html_text,
url = Restaurants %>% html_nodes(css='.poi-item-name a') %>%
xml_attr('href') %>% paste0('http://www.viamichelin.com', .),
price_from = prices[,1],
price_to = prices[,2],
stars = Restaurants %>% html_nodes(css='.poi-item-stars') %>%
as.character %>% str_extract_all('="star"') %>% lengths,
bib_gourmand = Restaurants %>% html_nodes(css='.poi-item-stars') %>%
as.character %>% str_extract_all('="bib-gourmand"') %>%
lengths %>% as.logical,
assiette_michelin = Restaurants %>% html_nodes(css='.poi-item-stars') %>%
as.character %>% str_extract_all('="assiette"') %>%
lengths %>% as.logical
)
# guide year is only 2016...
# Restaurants %>% html_nodes(css='.poi-item-stars') %>% html_text %>%
# str_extract('[0123456789]{4}') %>% table
# SCRAPING MORE INFORMATION ----------------------------------------------------
n <- nrow(RESTAURANTS)
Restaurants <- vector(mode='list', length=n)
r <- 1
r <- 6510
r <- 9633
while(r <= nrow(RESTAURANTS)){
Sys.sleep(sleep <- runif(1, min = 0, max = 1))
cat(
'Slept', round(sleep, 2), 'seconds.',
'Treating restaurant', r, 'of', n, ':', RESTAURANTS$name[r], '.', fill=TRUE
)
Restaurants[[r]] <- read_html(RESTAURANTS$url[r]) %>% html_nodes('body')
r <- r+1
}
save(Restaurants, file=paste0(Sys.Date(),'michelin-detailed-restaurants.RData'))
Restaurants2 <- do.call(c, Restaurants)
class(Restaurants2) <- 'xml_nodeset'
# FORMATTING -------------------------------------------------------------------
# cuisine type
RESTAURANTS$cuisine <- Restaurants2 %>%
html_nodes(css='.datasheet-cooking-type') %>%
html_text(trim=TRUE)
# citation
RESTAURANTS$citation <- Restaurants2 %>%
html_nodes(css='.datasheet') %>% as.character %>%
str_extract_all('<blockquote>[\\s\\S]*</blockquote>') %>%
laply(function(v) if(length(v)==0) NA else v) %>%
str_sub(17,-19)
# standard
RESTAURANTS$standard_code <- Restaurants2 %>%
html_nodes(css='.datasheet-quotation') %>% as.character %>%
str_extract('standing-[0123456789]{2}') %>% str_sub(10,-1) %>% as.factor
RESTAURANTS$standard <- RESTAURANTS$standard_code %>% revalue(replace=c(
`12`='simple',
`13`='good',
`14`='very good',
`15`='excellent',
`16`='exceptionnal',
`17`='simple',
`18`='good',
`19`='very good',
`20`='excellent',
`21`='exceptionnal'
))
RESTAURANTS$best_addresses <- RESTAURANTS$standard_code %in% 17:21
# twenty_or_less
RESTAURANTS$twenty_or_less <- Restaurants2 %>%
html_nodes(css='.datasheet-quotation') %>% as.character %>%
str_detect('good-value-menu')
# address
RESTAURANTS$address <- Restaurants2 %>%
html_nodes(css=paste(
'.datasheet',
'.datasheet-item:not(.datasheet-name):not(.datasheet-description-container)'
)) %>% html_text
# phone
RESTAURANTS$phone <- Restaurants2 %>%
html_nodes(css='.datasheet .datasheet-more-info:last-child') %>% as.character %>%
str_extract_all('href="tel:.*?"') %>% # lengths %>% table
laply(function(v) if(length(v)==0) NA else v) %>%
str_sub(11,-2)
# mail
RESTAURANTS$mail <- Restaurants2 %>%
html_nodes(css='.datasheet .datasheet-more-info:last-child') %>% as.character %>%
str_extract_all('href="mailto:.*?"') %>% # lengths %>% table
laply(function(v) if(length(v)==0) NA else v) %>%
str_sub(14,-2)
# website
RESTAURANTS$website <- Restaurants2 %>%
html_nodes(css='.datasheet .datasheet-more-info:last-child') %>% as.character %>%
str_extract_all('href="http://.*?"') %>%
laply(function(v) if(length(v)==0) NA else v) %>%
str_sub(7,-2)
# other information
# reading
RESTAURANTS$good_to_know <- Restaurants %>%
lapply(html_nodes, xpath="//p[text()='Good to know']/../ul/li/text()") %>%
lapply(as.character) %>%
laply(function(v) if(length(v)==0) NA else v)
RESTAURANTS$additional_information <- Restaurants %>%
lapply(html_nodes, xpath="//p[normalize-space(text())='Additional information']/../ul/li/text()") %>%
lapply(as.character)
# extracting variables 1
RESTAURANTS <- rbind(
RESTAURANTS %>%
unnest(additional_information) %>%
mutate(place_holder=TRUE) %>%
spread(additional_information, place_holder, fill=FALSE),
RESTAURANTS %>%
filter(lengths(additional_information)==0) %>%
select(-additional_information),
fill=TRUE
)
# extracting variables 2
NA_to_F <- function(vect){vect[is.na(vect)]<- FALSE;vect}
RESTAURANTS$dinner_only <- NA_to_F(str_detect(RESTAURANTS$good_to_know, 'dinner only'))
RESTAURANTS$booking <- ifelse(
NA_to_F(str_detect(RESTAURANTS$good_to_know, 'booking advisable')),
yes='advisable',
no=ifelse(NA_to_F(str_detect(RESTAURANTS$good_to_know, 'booking essential')),
yes='essential',
no ='not required'
)
)
# coordinates
RESTAURANTS <- Restaurants2 %>%
html_nodes('div.poi_view') %>%
xml_attr('data-fetch_summary') %>%
lapply(function(item) fromJSON(item)$restaurants$id %>% str_match(
'^(-?[0-9]+\\.[0-9]+)\\|(-?[0-9]+\\.[0-9]+)'
) %>% `colnames<-`(c('match','lat','lon')) %>% as.data.table) %>%
rbind_all %>%
select(-match) %>%
mutate_each('as.numeric') %>%
cbind(RESTAURANTS)
save(RESTAURANTS, file=paste0(Sys.Date(),'-michelin-restaurants.RData'))
write.csv(RESTAURANTS, file=paste0(Sys.Date(),'-michelin-restaurants.csv'))
save(RESTAURANTS, file=paste0(Sys.Date(),'-michelin-restaurants2.RData'))
|
9e21a908db8122278e97883e863f77a9c7f3a43a
|
a8ed2942767e1eb379a58ca6be1ece8578fab007
|
/rankhospital.R
|
87b0cb2d81d370dbbcf49fbb430f2aeb238a3cc3
|
[] |
no_license
|
nsdfxela/datasciencecoursera-ex3
|
7a9837fd4e88d1d667397e7e8ff75d34c47ed637
|
62117a9d50507b3a1955eebb53c39b2147b59903
|
refs/heads/master
| 2021-01-02T08:19:23.526119
| 2014-05-04T19:19:41
| 2014-05-04T19:19:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
r
|
rankhospital.R
|
##rankhospital("MD", "heart failure", 5)
##setwd("C:/COMMUNISM/coursera/Data Science/2/work/week4/rprog-data-ProgAssignment3-data")
rankhospital <- function (state, outcome, num){
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
neededColumns <- grep("^Hospital.30.Day.Death..Mortality..Rates.from.", names(outcomeData))
figureOutOutcomesColumns <- function (outComeName)
{
neededColumns[grep(outComeName, names(outcomeData[neededColumns]))]
}
## Check that state and outcome are valid
##outcome checkin
if(outcome == "heart attack") {
neededColumns <- figureOutOutcomesColumns("Heart.Attack")
}
else if (outcome == "heart failure") {
neededColumns <- figureOutOutcomesColumns("Heart.Failure")
}
else if (outcome == "pneumonia") {
neededColumns <- figureOutOutcomesColumns("Pneumonia")
}
else {stop("invalid outcome")}
##state checkin
if(max(outcomeData[,"State"]==state) == 0)
stop("invalid state")
outcomeData[,neededColumns] <- sapply(outcomeData[,neededColumns], function(x) as.numeric(x))
outcomeDataClean <- outcomeData [!is.na(outcomeData [,neededColumns]),]
outcomeState <- outcomeDataClean[outcomeDataClean[,"State"]==state, ]
##outcomeState <- transform (outcomeState, rnk = rank(outcomeState[, neededColumns], ties.method="min"))
index <- with(outcomeState, order(outcomeState[,neededColumns], outcomeState[,"Hospital.Name"]))
##outcomeState
res <- outcomeState[index, ] ##sorted by neededColumns and then by Hospital.Name
##num checkin
if(num == "best") num <- 1
if(num == "worst") num <-length(res[,1])
name <- res[num, "Hospital.Name"]
if(is.null(name)) NA
else name
}
|
c794b6eaf339df1273e0d34d7b91b11a1ed51277
|
f354e85cb379bc5d858f80fbf9c2ba384ed68ef6
|
/man/cor_nba.Rd
|
bbcb1f952eaab4bf43ba349c84266667459aa270
|
[] |
no_license
|
CarolineYYU/CYavgTS1
|
22453fc49a5a7f332a087f430959fe4a5f8573a7
|
bead46a166e8c9f7a9fdb98d05e90d64b5acade5
|
refs/heads/master
| 2021-02-16T10:53:38.918100
| 2020-03-04T20:33:04
| 2020-03-04T20:33:04
| 244,952,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 473
|
rd
|
cor_nba.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_nba.R
\name{cor_nba}
\alias{cor_nba}
\title{A Correlation martix Function}
\usage{
cor_nba(dataset, y)
}
\arguments{
\item{dataset}{is the dataset of NBA}
\item{y}{is the Year that we are going to figure it out}
}
\description{
This function allows you to compute the correlation matrix of numeric variables in the datasets
}
\examples{
cor_nba()
}
\keyword{,matrix}
\keyword{Correlatiin}
|
03c41abd640cd458168cc875d11df5b4c68eb880
|
efa6ffc0018e3351f92ae4b6edd41d098a82b414
|
/DT.R
|
a1365bc5848004f75213c3386747e6a118f62e1f
|
[] |
no_license
|
kanikatiwari/Data-Analytics-R-
|
cf32fa655e3d10d669462abfe8f736216eeee2f7
|
eabc8488d681462329555d19e31b6025f25e148f
|
refs/heads/master
| 2020-07-31T05:47:20.458938
| 2020-03-20T09:52:02
| 2020-03-20T09:52:02
| 210,505,097
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
DT.R
|
#Decision Tree
#Loading the package for decision tree
library(party)
#using readingSkills dataset
head(readingSkills)
readingSkills
#creating input data frame
data.input = readingSkills[c(1:105),]
data.input
a = c(1:50)
cb = data.frame(cbind(a))
c = ctree(a~., data=cb)
plot(c)
#Creating the tree with one factor & plotting
output.tree1 = ctree(nativeSpeaker ~ age, data= data.input)
output.tree1
plot(output.tree1)
#Creating the tree with two factor & plotting
output.tree2 = ctree(nativeSpeaker ~ age+ shoeSize, data= data.input)
output.tree2
plot(output.tree2)
#Creating the tree with multi-factor & plotting
output.tree = ctree(nativeSpeaker ~ age+ shoeSize+ score, data= data.input)
output.tree
plot(output.tree)
?decisiontree
?ctree
|
684d6a78674a9d18c4eaf588bfe256d7afa41fd3
|
0f4cc899c28af13547edfc82b7871d933a8cad19
|
/R/plot.R
|
37af9135be45e7b5f678e26194c859fc58de2a04
|
[] |
no_license
|
cran/wbs
|
73b2a4e71f5a422bb29dba81513eab005bae20d7
|
4e47fc33cbd5c445da6a49b6ee34a7ac84a2b85d
|
refs/heads/master
| 2021-01-23T16:27:09.644389
| 2019-05-14T20:40:03
| 2019-05-14T20:40:03
| 17,700,854
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,038
|
r
|
plot.R
|
#' @title Plot for an 'sbs' object
#' @description Plots the input vector used to generate 'sbs' object \code{x} with fitted piecewise constant function, equal to the mean
#' between change-points specified in \code{cpt}.
#' @details When \code{cpt} is omitted, the function automatically finds change-points
#' using \code{changepoints} function with a default value of the threshold.
#' @method plot sbs
#' @importFrom stats ts.plot
#' @importFrom graphics lines title
#' @export
#' @param x an object of class 'sbs', returned by \code{\link{sbs}}
#' @param cpt a vector of integers with localisations of change-points
#' @param ... other parameters which may be passed to \code{plot} and \code{changepoints}
#' @seealso \code{\link{sbs}} \code{\link{changepoints}}
plot.sbs <- function(x,cpt,...){
ts.plot(x$x,ylab="x",...)
if(missing(cpt)){
w.cpt <- changepoints(x,...)
print
means <- means.between.cpt(x$x,w.cpt$cpt.th[[1]])
}else{
means <- means.between.cpt(x$x,cpt)
}
lines(x=means,type="l",col="red")
title("Fitted piecewise constant function")
}
#' @title Plot for a 'wbs' object
#' @description Plots the input vector used to generate 'wbs' object \code{x} with fitted piecewise constant function, equal to the mean
#' between change-points specified in \code{cpt}.
#' @details When \code{cpt} is omitted, the function automatically finds change-points
#' using \code{changepoints} function with strengthened Schwarz Information Criterion as a stopping criterion for the WBS algorithm.
#' @method plot wbs
#' @export
#' @param x an object of class 'wbs', returned by \code{\link{wbs}}
#' @param cpt a vector of integers with localisations of change-points
#' @param ... other parameters which may be passed to \code{plot} and \code{changepoints}
#' @seealso \code{\link{wbs}} \code{\link{changepoints}} \code{\link{ssic.penalty}}
plot.wbs <- function(x,cpt,...){
if(missing(cpt)) plot.sbs(x,cpt=changepoints(x,penalty="ssic.penalty")$cpt.ic[["ssic.penalty"]],...)
else plot.sbs(x,cpt,...)
}
|
8f9c0c56ebd5f19c2dedc6f15a6e9942b3d2d878
|
c2ae49e76f885f9745ebf9530b275d80a7ca1a1f
|
/R/rviewgraph-package.R
|
cb1a7b85bba4cba6838488aeb7032a5f319b29c5
|
[] |
no_license
|
cran/rviewgraph
|
f9100f245e27a267985c99c6a581e4f2803cb228
|
67fa4edc7f2f546681dfc5484191ce1601b23d0e
|
refs/heads/master
| 2023-06-01T08:24:19.029111
| 2023-05-10T16:50:02
| 2023-05-10T16:50:02
| 145,905,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,309
|
r
|
rviewgraph-package.R
|
#' @name rviewgraph-package
#' @aliases rviewgraph-package rviewgraph
#' @docType package
#' @title Animated Graph Layout Viewer
#' @description
#' for graph viewing, manipulation and plotting.
#'
#' @details
#' \tabular{ll}{
#' Package: \tab rviewgraph\cr
#' Type: \tab Package\cr
#' Version: \tab 1.4.2\cr
#' Date: \tab 2021-10-25\cr
#' License: \tab GPL-2 \cr
#' LazyLoad: \tab yes\cr
#' SystemRequirements: \tab 'Java' >= 8\cr
#' }
#' Provides 'Java' graphical user interfaces (GUI) for viewing, manipulating
#' and plotting graphs. Graphs may be directed or undirected.
#'
#' The original program, \code{rViewGraph} takes
#' a graph specified as an incidence matrix, array of edges, or in \code{igraph} format
#' and runs a graphical user interface that shows an
#' animation of a force directed algorithm positioning the vertices in two dimensions.
#' If run from a non-interactive R session, \code{rViewGraph} prints an
#' error message and returns \code{NULL}.
#'
#' A new program, \code{vg}, is an alternative interface to the underlying
#' 'Java' program that provides a more coherent way of specifying the graph,
#' and more control over how the vertices appear in the GUI. Specifically,
#' \code{vg} allows for arbitrary integer indices to identify the vertices,
#' and allows changes to the graph's vertex and edge sets. The text labels,
#' colours, shapes and sizes of the vertices can also be specified, either before
#' or after vertices are added to the graph. These changes can be made while the
#' vertex positioning animation is running. \code{vg} also provides
#' functions for saving and restoring the state of the graph including
#' vertices and edges,
#' vertex positions, and vertex appearances.
#' \code{vg()} can be run non-interactively without a GUI which allows a
#' graph structure to be built and saved for a future interactive session.
#'
#' Both programs can also start a dialog box to print the current view of the graph.
#'
#' The underlying positioning methods works well for graphs of various structure
#' of up to a few thousand vertices. It's not fazed by graphs that comprise several
#' components.
#'
#'
#' @seealso vg rViewGraph
#' @seealso #' There is a vignette on 'Building a simple graph sampler'.
#'
#' @keywords internal
"_PACKAGE"
#> [1] "_PACKAGE"
|
8756a2f1c2195cef08e0b01d0d6bef76c898dc2b
|
5e68edfa32c59c863e2be740bc4f0c64041ee762
|
/server.R
|
4045b2ebd11c35e0981551e06e5a5170f2a24c0b
|
[] |
no_license
|
Deleetdk/test_bias_omitted_variable_bias
|
e6e2a7a02e1943e056be0c338cec98f21c330715
|
b01f149e3dda1d6990f600f4c8aa385f47450ff5
|
refs/heads/master
| 2021-06-10T11:30:50.741214
| 2020-06-12T23:44:37
| 2020-06-12T23:44:37
| 37,875,889
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,598
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
library(stringr)
library(psych)
library(plyr)
library(DT)
library(grid)
theme_set(theme_bw())
n = 5000
shinyServer(function(input, output) {
reac_d = reactive({
set.seed(74) #for reproducible results
#encapsulate the data generation in {} to make them reproducible
{
A = data.frame(S_score = rnorm(n, input$S_adv),
T_score = rnorm(n, input$T_adv),
group = rep("A", n))
B = data.frame(S_score = rnorm(n),
T_score = rnorm(n),
group = rep("B", n))
d = as.data.frame(rbind(A, B))
error_size = sqrt(1 - (input$cor_S^2 + input$cor_T^2))
d$Y = d$S_score * input$cor_S + d$T_score * input$cor_T + error_size * rnorm(n * 2)
}
#rescale Y
d$Y = d$Y * 100 + 500
#model
fit = lm(as.formula(input$model), d)
d$Y_hat = predict(fit)
return(d)
})
output$plot <- renderPlot({
#get reactive data
d = reac_d()
#text
r = cor(d$Y, d$Y_hat)
r_group = ddply(d, .(group), summarize,
cor = cor(Y, Y_hat))
print(r_group)
text = str_c("r of prediction with criteria:",
"\nboth groups together: ", round(r, 2),
"\nblue group: ", round(r_group[1, 2], 2),
"\nred group: ", round(r_group[2, 2], 2))
text_object = grobTree(textGrob(text, x=.02, y=.98, hjust=0, vjust = 1),
gp = gpar(fontsize=11)) #text size
#plot
ggplot(d, aes(Y_hat, Y, color = group)) +
geom_point(alpha = .5) +
geom_smooth(method = "lm", se = F, linetype = "dashed", size = .7) +
geom_smooth(aes(color = NULL), method = "lm", se = F, linetype = "dashed", color = "black", size = .7) +
xlab("Predicted criteria score") +
ylab("Criteria score") +
scale_color_manual(values = c("#4646ff", "#ff4646"), #, #change colors
name = "Group", #change legend title
labels = c("Blue", "Red")) + #change labels
annotation_custom(text_object)
})
output$table = DT::renderDataTable({
#fetch data
d = reac_d()
#desc. stats
desc = ddply(d, .(group), summarize,
mean_S = mean(S_score),
mean_T = mean(T_score),
mean_Y = mean(Y),
mean_Y_hat = mean(Y_hat))
#table
d2 = matrix(nrow = 4, ncol = 3)
#S
d2[1, 1] = desc[1, "mean_S"]
d2[1, 2] = desc[2, "mean_S"]
d2[1, 3] = desc[1, "mean_S"] - desc[2, "mean_S"]
#T
d2[2, 1] = desc[1, "mean_T"]
d2[2, 2] = desc[2, "mean_T"]
d2[2, 3] = desc[1, "mean_T"] - desc[2, "mean_S"]
#Y
d2[3, 1] = desc[1, "mean_Y"]
d2[3, 2] = desc[2, "mean_Y"]
d2[3, 3] = desc[1, "mean_Y"] - desc[2, "mean_Y"]
#Y
d2[4, 1] = desc[1, "mean_Y_hat"]
d2[4, 2] = desc[2, "mean_Y_hat"]
d2[4, 3] = desc[1, "mean_Y_hat"] - desc[2, "mean_Y_hat"]
d2 = round(d2, 2)
rownames(d2) = c("Trait S", "Trait T", "Criteria score", "Predicted criteria score")
colnames(d2) = c("Blue group", "Red group", "Blue group's advantage")
DT::datatable(d2, , options = list(searching = F,
ordering = F,
paging = F,
info = F))
})
})
|
26c8536ccd48207fe38f5284d36128b85182acee
|
bb826848c054b08a5d620b8e8b76dac816534ae2
|
/Clinical/clin.script/superceded/clinical_data_DW.R
|
f1af8817eece25f4d20198457b8758af97700cb9
|
[] |
no_license
|
nmm199/MB_RNAseq
|
6fde32bc89e9e560c4cc8c321fba840a6a465b09
|
4174079877faa58f24793dbac822f9af8507220c
|
refs/heads/master
| 2021-01-20T01:57:02.583285
| 2019-02-01T17:54:17
| 2019-02-01T17:54:17
| 89,350,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,605
|
r
|
clinical_data_DW.R
|
### File introduction
### File name: clinical_data_4.R
### Aim of file is to
# 1. Run basic descriptive statistics on a cohort of children treated for medulloblastoma, whose details are contained within the local clinical database
# 2. Analyse genes of interest in relation to univariate and multivariate risk prediction models for survival (overall, event-free and progression-free)
# 3. This script covers analysis up to and including univariate risk factor analysis
# 4. Multivariate analysis / AUC will be covered by a separate script
### Author: Dr Marion Mateos
### Date: July 3 2017
### R version 3.4.0 (2017-04-21)
### Platform: x86_64-pc-linux-gnu (64-bit)
### Running under: Ubuntu 16.04.2 LTS
# attached base packages:
# [1] parallel stats graphics grDevices utils datasets methods base
### Packages and version numbers
## [1] survival_2.41-3
## RColorBrewer_1.1-2
## car_2.1-4
## gplots_3.0.1
## NMF_0.20.6
## Biobase_2.36.2
## BiocGenerics_0.22.0
## cluster_2.0.6
## rngtools_1.2.4
## pkgmaker_0.22
## registry_0.3
### Libraries to be used
# install.packages('gplots')
# install.packages('survival')
library(NMF)
library(gplots)
library(car)
library(stats)
library(survival)
### Functions used
source(file = "/home/nmm199/R/MB_RNAseq/Clinical/clin.script/clinical_data_functions_DW.R")
### names of functions for info on function see source file
### "chi.sq"
### "cor.result"
### "lin.reg"
### "km.log.test"
### "km.log.test.OS"
### "cox.result.OS"
### "km.log.test.EFS"
### updatepData
### External files required
### clinical database "x.data"
### 7 molecular group data "meth.data"
### cytogenetic arm data "cytogen.data"
### RNA expression data "RNA.data"
###############################################################################
### deals with making a GOI.vsd vector this is the part that is going to change
### at the moment this is just any old expression data
### you will plug in your own goi - isoforms, novel genes, etc
### just for demonstration purposes at the moment
###############################################################################
cat ("reading in expression data", sep ="\n")
RNA.data <- "/home/dan/mygit/rna_seq_mb/paper/MB.vsd.txt"
mb.vsd <- read.delim(RNA.data)
#### providing a putative biomarker
goi <- "ENSG00000136997"
goi.vsd <- as.numeric(mb.vsd[goi,])
### the output would be a vector with a continuous variable, names equal NMB numbers
names(goi.vsd) <- gsub("T","",names(mb.vsd))
#####################################################################################
### update your pData object
x.data <- "/home/nmm199/R/MB_RNAseq/Input data/database270617.csv"
cat ("reading in clinical database", sep ="\n")
### add in row names to original database file
pData <- read.csv(x.data, row.names = 1)
meth.data <- "/home/nmm199/R/MB_RNAseq/Input data/all7subgroupCalls.csv"
meth7 <- read.csv(meth.data, header=TRUE, sep=",", quote="\"", dec=".", row.names=1)
cytogen.data <- "/home/nmm199/R/MB_RNAseq/Input data/arm_calls_clean280617.txt"
cytogen <- read.table (cytogen.data, header=T, sep="\t")
test.pData <- updatepData(pData, meth7, cytogen, pdf.file = "./temp.pdf", log.file = "./temp.log.txt")
save(test.pData, file = "/home/nmm199/R/MB_RNAseq/Clinical/test.pData")
log.file = "pDatalog.txt"
################################################################################
pdf.file <- "marker.results.pdf"
results <- clinPathAssess(test.pData,goi.vsd,pdf.file = "marker.results.pdf",log.file = "marker.results.txt")
|
cc42d46614ca798f2f41d9939817b8a11f771ebc
|
a1e3f742d80a225e9a2a35e8e88b3054f5408037
|
/R/univregs.R
|
1485c4749e2e3813ff89d3e8cef88937f2b8bd76
|
[] |
no_license
|
cran/MXM
|
7590471ea7ed05944f39bf542c41a07dc831d34f
|
46a61706172ba81272b80abf25b862c38d580d76
|
refs/heads/master
| 2022-09-12T12:14:29.564720
| 2022-08-25T07:52:40
| 2022-08-25T07:52:40
| 19,706,881
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,789
|
r
|
univregs.R
|
univregs <- function(target, dataset, targetID = -1, test = NULL, user_test = NULL, wei = NULL, ncores = 1) {
univariateModels <- list();
dm <- dim(dataset)
rows <- dm[1]
cols <- dm[2]
if (targetID != -1 ) {
target <- dataset[, targetID]
dataset[, targetID] <- rbinom(rows, 1, 0.5)
}
id <- NULL
if ( !identical(test, testIndFisher) & !identical(test, testIndSpearman) ) {
ina <- NULL
id <- Rfast::check_data(dataset)
if ( sum(id > 0) ) dataset[, id] <- rnorm(rows * length(id) )
}
la <- length( unique(target) )
if ( !is.null(user_test) ) {
univariateModels <- univariateScore(target, dataset, test = user_test, wei, targetID)
} else if ( identical(test, testIndFisher) ) { ## Pearson's correlation
a <- as.vector( cor(target, dataset) )
dof <- rows - 3; #degrees of freedom
wa <- 0.5 * log( (1 + a) / (1 - a) ) * sqrt(dof)
id <- which( is.na(a) )
if ( length(id) > 0) wa[id] <- 0
univariateModels$stat <- wa;
univariateModels$pvalue <- log(2) + pt( abs(wa), dof, lower.tail = FALSE, log.p = TRUE) ;
} else if ( identical(test, testIndSpearman) ) { ## Spearman's correlation
a <- as.vector( cor(target, dataset) )
dof <- rows - 3; #degrees of freedom
wa <- 0.5 * log( (1 + a) / (1 - a) ) * sqrt(dof) / 1.029563
id <- which( is.na(a) )
if ( length(id) > 0) wa[id] <- 0
univariateModels$stat <- wa
univariateModels$pvalue <- log(2) + pt( abs(wa), dof, lower.tail = FALSE, log.p = TRUE);
} else if ( identical(test, gSquare) ) { ## G^2 test
z <- cbind(dataset, target)
if ( !is.matrix(z) ) z <- as.matrix(z)
dc <- Rfast::colrange(z, cont = FALSE)
a <- Rfast::g2tests(data = z, x = 1:cols, y = cols + 1, dc = dc)
stat <- a$statistic
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, a$df, lower.tail = FALSE, log.p = TRUE)
} else if ( identical(test, testIndBeta) ) { ## Beta regression
mod <- beta.regs(target, dataset, wei, logged = TRUE, ncores = ncores)
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndMMReg) ) { ## M (Robust) linear regression
fit1 <- MASS::rlm(target ~ 1, maxit = 2000, method = "MM")
lik1 <- as.numeric( logLik(fit1) )
lik2 <- numeric(cols)
dof <- numeric(cols)
ina <- 1:cols
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- MASS::rlm(target ~ dataset[, i], maxit = 2000, method = "MM" )
lik2[i] <- as.numeric( logLik(fit2) )
dof[i] <- length( coef(fit2) ) - 1
}
stat <- 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "MASS") %dopar% {
fit2 <- MASS::rlm(target ~ dataset[, i], maxit = 2000, method = "MM" )
lik2 <- as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) ) )
}
parallel::stopCluster(cl)
stat <- as.vector( 2 * abs(lik1 - mod[, 1]) )
dof <- as.vector( mod[, 2] ) - 1
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndReg) & !is.null(wei) ) { ## Weighted linear regression
univariateModels <- list();
stat <- pval <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- lm( target ~ dataset[, i], weights = wei, y = FALSE, model = FALSE )
tab <- anova(fit2)
stat[i] <- tab[1, 4]
df1 <- tab[1, 1] ; df2 = tab[2, 1]
pval[i] <- pf( stat[i], df1, df2, lower.tail = FALSE, log.p = TRUE )
}
univariateModels$stat <- stat
univariateModels$pvalue <- pval
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind) %dopar% {
ww <- lm( target ~ dataset[, i], weights = wei, y = FALSE, model = FALSE )
tab <- anova( ww )
stat <- tab[1, 4]
df1 <- tab[1, 1] ; df2 = tab[2, 1]
pval <- pf( stat, df1, df2, lower.tail = FALSE, log.p = TRUE )
return( c(stat, pval) )
}
parallel::stopCluster(cl)
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
}
} else if ( identical(test, testIndReg) & is.null(wei) ) { ## linear regression
mod <- Rfast::regression(dataset, target, logged = TRUE)
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndMVreg) ) { ## Weighted linear regression
univariateModels = list();
stat = pval = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = lm( target ~ dataset[, i], weights = wei, y = FALSE, model = FALSE )
tab = anova(fit2)
stat[i] = tab[2, 3]
df1 = tab[2, 4] ; df2 = tab[2, 5]
pval[i] = pf( stat[i], df1, df2, lower.tail = FALSE, log.p = TRUE )
}
univariateModels$stat <- stat
univariateModels$pvalue <- pval
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind) %dopar% {
ww <- lm( target ~ dataset[, i], weights = wei, y = FALSE, model = FALSE )
tab <- anova( ww )
stat <- tab[2, 3]
df1 <- tab[2, 4] ; df2 = tab[2, 5]
pval <- pf( stat, df1, df2, lower.tail = FALSE, log.p = TRUE )
return( c(stat, pval) )
}
parallel::stopCluster(cl)
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
}
} else if ( identical(test, testIndOrdinal) ) { ## ordinal regression
lik2 <- numeric(cols)
dof <- numeric(cols)
fit1 <- ordinal::clm(target ~ 1, weights = wei)
lik1 <- as.numeric( logLik(fit1) )
df1 <- length( coef(fit1) )
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
mat <- model.matrix(target ~ dataset[, i] )
fit2 <- ordinal::clm.fit(target, mat, weights = wei)
lik2[i] <- as.numeric( fit2$logLik )
dof[i] <- length( coef(fit2) ) - df1
}
stat = 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "ordinal") %dopar% {
mat <- model.matrix(target ~ dataset[, i] )
fit2 <- ordinal::clm.fit(target, mat, weights = wei)
lik2 <- as.numeric( fit2$logLik )
return( c(lik2, length( coef(fit2) ) ) )
}
parallel::stopCluster(cl)
stat = 2 * (mod[, 1] - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2] - df1, lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndMultinom) ) { ## multinomial regression
target = as.factor( as.numeric( target ) )
lik2 = numeric(cols)
dof = numeric(cols)
fit1 = nnet::multinom(target ~ 1, trace = FALSE, weights = wei)
lik1 = as.numeric( logLik(fit1) )
df1 = length( coef(fit1) )
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = nnet::multinom(target ~ dataset[, i], trace = FALSE, weights = wei )
lik2[i] = as.numeric( logLik(fit2) )
dof[i] = length( coef(fit2) ) - df1
}
stat = 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "nnet") %dopar% {
fit2 = nnet::multinom(target ~ dataset[, i], weights = wei)
lik2 = as.numeric( logLik(fit2 ) )
return( c(lik2, length( coef(fit2) ) ) )
}
parallel::stopCluster(cl)
stat = 2 * (mod[, 1] - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2] - df1, lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndLogistic) & is.matrix(dataset) & is.null(wei) ) { ## logistic regression
if ( is.factor(target) ) target <- as.numeric(target) - 1
mod <- Rfast::univglms( target, dataset, oiko = "binomial", logged = TRUE )
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndLogistic) & is.data.frame(dataset) & is.null(wei) ) { ## logistic regression
if ( is.factor(target) ) target <- as.numeric(target) - 1
mod <- Rfast::univglms2( target, dataset, oiko = "binomial", logged = TRUE )
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndLogistic) & !is.null(wei) ) { ## Logistic regression
fit1 = glm(target ~ 1, binomial, weights = wei)
lik1 = fit1$deviance
lik2 = numeric(cols)
dof = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = glm( target ~ dataset[, i], binomial, weights = wei )
lik2[i] = fit2$deviance
dof[i] = length( coef(fit2) ) - 1
}
stat = lik1 - lik2
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind) %dopar% {
fit2 = glm( target ~ dataset[, i], binomial, weights = wei )
lik2 = fit2$deviance
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = lik1 - mod[, 1]
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndBinom) ) { ## Binomial regression
wei <- target[, 2]
y <- target[, 1] / wei
fit1 = glm(y ~ 1, binomial, weights = wei)
lik1 = fit1$deviance
lik2 = numeric(cols)
dof = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = glm( y ~ dataset[, i], binomial, weights = wei )
lik2[i] = fit2$deviance
dof[i] = length( coef(fit2) ) - 1
}
stat = lik1 - lik2
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
wei <- target[, 2]
y <- target[, 1] / wei
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 = glm( y ~ dataset[, i], binomial, weights = wei )
lik2 = as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = as.vector( lik1 - mod[, 1] )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndPois) & is.matrix(dataset) & is.null(wei) ) { ## Poisson regression
mod <- Rfast::univglms( target, dataset, oiko = "poisson", logged = TRUE )
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndPois) & is.data.frame(dataset) & is.null(wei) ) { ## Poisson regression
mod <- Rfast::univglms2( target, dataset, oiko = "poisson", logged = TRUE )
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else if ( identical(test, testIndPois) & !is.null(wei) ) { ## Poisson regression
fit1 = glm(target ~ 1, poisson, weights = wei)
lik1 = fit1$deviance
lik2 = numeric(cols)
dof = numeric(cols)
ina <- 1:cols
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in ina ) {
fit2 = glm( target ~ dataset[, i], poisson, weights = wei )
lik2[i] = fit2$deviance
dof[i] = length( coef(fit2) ) - 1
}
stat = lik1 - lik2
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 = glm( target ~ dataset[, i], poisson, weights = wei )
return( c(fit2$deviance, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = as.vector( lik1 - mod[, 1] )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndNB) ) { ## Negative binomial regression
lik1 <- MASS::glm.nb( target ~ 1, weights = wei )$twologlik
if ( ncores <= 1 | is.null(ncores) ) {
lik2 <- dof <- numeric(cols)
for ( i in 1:cols ) {
fit2 = MASS::glm.nb( target ~ dataset[, i], weights = wei )
lik2[i] = fit2$twologlik
dof[i] = length( coef(fit2) ) - 1
}
stat = lik2 - lik1
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind, .packages = "MASS") %dopar% {
fit2 = MASS::glm.nb( target ~ dataset[, i], weights = wei )
return( c(fit2$twologlik, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat <- as.vector(mod[, 1]) - lik1
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndNormLog) ) { ## Normal log link regression
fit1 = glm(target ~ 1, family = gaussian(link = log), weights = wei)
lik1 = fit1$deviance
lik2 = numeric(cols)
dof = numeric(cols)
ina <- 1:cols
phi <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in ina ) {
fit2 = glm( target ~ dataset[, i], family = gaussian(link = log), weights = wei )
lik2[i] = fit2$deviance
phi[i] <- summary(fit2)[[14]]
dof[i] = length( fit2$coefficients )
}
stat = (lik1 - lik2 ) / (dof - 1) / phi
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, dof - 1, rows - dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 = glm( target ~ dataset[, i], family = gaussian(link = log), weights = wei )
return( c(fit2$deviance, length( fit2$coefficients ), summary(fit2)[[14]] ) )
}
parallel::stopCluster(cl)
stat = as.vector( lik1 - mod[, 1] ) / (mod[, 2] - 1) / mod[, 3]
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, mod[, 2] - 1, rows - mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndGamma) ) { ## Gamma regression
fit1 = glm(target ~ 1, family = Gamma(link = log), weights = wei)
lik1 = fit1$deviance
lik2 = numeric(cols)
dof = numeric(cols)
ina <- 1:cols
phi <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in ina ) {
fit2 = glm( target ~ dataset[, i], family = Gamma(link = log), weights = wei )
lik2[i] = fit2$deviance
phi[i] = summary(fit2)[[ 14 ]]
dof[i] = length( fit2$coefficients)
}
stat = (lik1 - lik2) / (dof - 1) / phi
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, dof - 1, rows - dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 = glm( target ~ dataset[, i], family = Gamma(link = log), weights = wei )
return( c(fit2$deviance, length( fit2$coefficients ), summary(fit2)[[14]] ) )
}
parallel::stopCluster(cl)
stat = as.vector( lik1 - mod[, 1] ) / (mod[, 2] - 1) / mod[, 3]
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, mod[, 2] - 1, rows - mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndZIP) ) { ## Zero-inflated Poisson regression
moda <- zip.regs(target, dataset, wei, logged = TRUE, ncores = ncores)
univariateModels$stat <- moda[, 1]
univariateModels$pvalue <- moda[, 2]
} else if ( identical(test, testIndRQ) ) { ## Median (quantile) regression
fit1 = quantreg::rq(target ~ 1, weights = wei)
stat = pval = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = quantreg::rq(target ~ dataset[, i], weights = wei )
ww = anova(fit1, fit2, test = "rank")
df1 = as.numeric( ww[[1]][1] )
df2 = as.numeric( ww[[1]][2] )
stat[i] = as.numeric( ww[[1]][3] )
pval[i] = pf(stat[i], df1, df2, lower.tail = FALSE, log.p = TRUE)
}
univariateModels$stat <- stat
univariateModels$pvalue <- pval
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind, .packages = "quantreg") %dopar% {
fit2 = quantreg::rq(target ~ dataset[, i], weights = wei )
ww = anova(fit1, fit2, test = "rank")
df1 = as.numeric( ww[[1]][1] )
df2 = as.numeric( ww[[1]][2] )
stat = as.numeric( ww[[1]][3] )
pval = pf(stat, df1, df2, lower.tail = FALSE, log.p = TRUE)
return( c(stat, pval ) )
}
parallel::stopCluster(cl)
univariateModels$stat <- as.vector( mod[, 1] )
univariateModels$pvalue <- as.vector( mod[, 2] )
}
} else if ( identical(test, testIndIGreg) ) { ## Inverse Gaussian regression
fit1 = glm(target ~ 1, family = inverse.gaussian(link = log), weights = wei)
lik1 = as.numeric( logLik(fit1) )
lik2 = numeric(cols)
dof = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = glm( target ~ dataset[, i], family = inverse.gaussian(link = log), weights = wei )
lik2[i] = as.numeric( logLik(fit2) )
dof[i] = length( coef(fit2) ) - 1
}
stat = 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind) %dopar% {
fit2 = glm( target ~ dataset[, i], family = inverse.gaussian(link = log), weights = wei )
lik2 = as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = as.vector( 2 * (mod[, 1] - lik1) )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, censIndCR) ) { ## Cox regression
stat = numeric(cols)
dof = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = survival::coxph( target ~ dataset[, i], weights = wei)
res <- anova(fit2)
dof[i] <- res[2, 3]
stat[i] <- res[2, 2]
}
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 = survival::coxph( target ~ dataset[, i], weights = wei )
res <- anova(fit2)
return( c(res[2, 2], res[2, 3] ) )
}
parallel::stopCluster(cl)
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- pchisq(mod[, 1], mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, censIndWR) ) { ## Weibull regression
fit1 <- survival::survreg(target ~ 1, weights = wei)
lik1 <- as.numeric( logLik(fit1) )
lik2 <- numeric(cols)
dof <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- survival::survreg( target ~ dataset[, i], weights = wei, control=list(iter.max = 5000) )
lik2[i] <- as.numeric( logLik(fit2) )
dof[i] <- length( coef(fit2) ) - 1
}
stat <- 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 <- survival::survreg( target ~ dataset[, i], weights = wei, control=list(iter.max = 5000) )
lik2 <- as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat <- as.vector( 2 * (mod[, 1] - lik1) )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, censIndLLR) ) { ## Weibull regression
fit1 <- survival::survreg(target ~ 1, weights = wei, dist = "loglogistic")
lik1 <- as.numeric( logLik(fit1) )
lik2 <- numeric(cols)
dof <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- survival::survreg( target ~ dataset[, i], weights = wei, control = list(iter.max = 5000), dist = "loglogistic" )
lik2[i] <- as.numeric( logLik(fit2) )
dof[i] <- length( coef(fit2) ) - 1
}
stat <- 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 <- survival::survreg( target ~ dataset[, i], weights = wei, control = list(iter.max = 5000), dist = "loglogistic" )
lik2 <- as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat <- as.vector( 2 * (mod[, 1] - lik1) )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndTobit) ) { ## Tobit regression
fit1 = survival::survreg(target ~ 1, weights = wei, dist = "gaussian")
lik1 = as.numeric( logLik(fit1) )
lik2 = numeric(cols)
dof = numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 = survival::survreg( target ~ dataset[, i], weights = wei, dist = "gaussian" )
lik2[i] = as.numeric( logLik(fit2) )
dof[i] = length( coef(fit2) ) - 1
}
stat = 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 = survival::survreg( target ~ dataset[, i], weights = wei, dist = "gaussian" )
lik2 = as.numeric( logLik(fit2) )
return( c(lik2, length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = as.vector( 2 * (mod[, 1] - lik1) )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndClogit) ) { ## Conditional logistic regression
case <- as.logical(target[, 1]); ## case
subject <- target[, 2] #the patient id
stat <- numeric(cols)
dof <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- survival::clogit( case ~ dataset[, i] + strata(subject) )
dof[i] <- length( fit2$coefficients )
stat[i] <- diff( fit2$loglik )
}
univariateModels$stat <- 2 * stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 <- survival::clogit(case ~ dataset[, i] + strata(subject) )
return( c( diff(fit2$loglik) , length( fit2$coefficients ) ) )
}
parallel::stopCluster(cl)
univariateModels$stat <- 2 * as.vector( mod[, 1] )
univariateModels$pvalue <- pchisq(mod[, 1], mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, censIndER) ) { ## Exponential regression
fit1 <- survival::survreg(target ~ 1, dist = "exponential", weights = wei)
lik1 <- as.numeric( logLik(fit1) )
lik2 <- numeric(cols)
dof <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in 1:cols ) {
fit2 <- survival::survreg( target ~ dataset[, i], dist = "exponential", weights = wei )
lik2[i] <- as.numeric( logLik(fit2) )
dof[i] <- length( coef(fit2) ) - 1
}
stat <- 2 * (lik2 - lik1)
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = 1:cols, .combine = rbind, .packages = "survival") %dopar% {
fit2 <- survival::survreg( target ~ dataset[, i], dist = "exponential", weights = wei )
return( c(as.numeric( logLik(fit2) ), length( coef(fit2) ) - 1 ) )
}
parallel::stopCluster(cl)
stat = as.vector( 2 * (mod[, 1] - lik1) )
univariateModels$stat <- stat
univariateModels$pvalue <- pchisq(stat, mod[ ,2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndQBinom) ) { ## Quasi Binomial regression
fit1 <- glm(target ~ 1, family = quasibinomial(link = logit), weights = wei)
lik1 <- fit1$deviance
lik2 <- numeric(cols)
dof <- numeric(cols)
ina <- 1:cols
phi <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in ina ) {
fit2 <- glm( target ~ dataset[, i], family = quasibinomial(link = logit), weights = wei )
lik2[i] <- fit2$deviance
phi[i] <- summary(fit2)[[ 14 ]]
dof[i] <- length( fit2$coefficients)
}
stat <- (lik1 - lik2) / (dof - 1) / phi
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, dof - 1, rows - dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 <- glm( target ~ dataset[, i], family = quasibinomial(link = logit), weights = wei )
return( c(fit2$deviance, length( fit2$coefficients ), summary(fit2)[[14]] ) )
}
parallel::stopCluster(cl)
stat <- as.vector( lik1 - mod[, 1] ) / (mod[, 2] - 1) / mod[, 3]
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, mod[, 2] - 1, rows - mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndQPois) ) { ## Quasi Poisson regression
fit1 <- glm(target ~ 1, family = quasipoisson(link = log), weights = wei)
lik1 <- fit1$deviance
lik2 <- numeric(cols)
dof <- numeric(cols)
ina <- 1:cols
phi <- numeric(cols)
if ( ncores <= 1 | is.null(ncores) ) {
for ( i in ina ) {
fit2 <- glm( target ~ dataset[, i], family = quasipoisson(link = log), weights = wei )
lik2[i] <- fit2$deviance
phi[i] <- summary(fit2)[[ 14 ]]
dof[i] <- length( fit2$coefficients)
}
stat <- (lik1 - lik2) / (dof - 1) / phi
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, dof - 1, rows - dof, lower.tail = FALSE, log.p = TRUE)
} else {
cl <- parallel::makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
mod <- foreach(i = ina, .combine = rbind) %dopar% {
fit2 <- glm( target ~ dataset[, i], family = quasipoisson(link = log), weights = wei )
return( c(fit2$deviance, length( fit2$coefficients ), summary(fit2)[[14]] ) )
}
parallel::stopCluster(cl)
stat <- as.vector( lik1 - mod[, 1] ) / (mod[, 2] - 1) / mod[, 3]
univariateModels$stat <- stat
univariateModels$pvalue <- pf(stat, mod[, 2] - 1, rows - mod[, 2], lower.tail = FALSE, log.p = TRUE)
}
} else if ( identical(test, testIndSPML) ) { ## Circular regression
if ( !is.matrix(dataset) ) dataset <- as.matrix(dataset)
mod <- Rfast::spml.regs(target, dataset, logged = TRUE, parallel = (ncores > 1) )
univariateModels$stat <- mod[, 1]
univariateModels$pvalue <- mod[, 2]
} else univariateModels <- NULL
if ( !is.null(univariateModels) ) {
if (targetID != - 1) {
univariateModels$stat[targetID] <- 0
univariateModels$pvalue[targetID] <- log(1)
}
if ( sum( id > 0 ) > 0 ) {
univariateModels$stat[id] <- 0
univariateModels$pvalue[id] <- log(1)
}
}
univariateModels
}
|
18e2787d745174d3427dbf7061f5f845c5beb900
|
50cb903e9365b7461c2d50c208bb0e272f666278
|
/R/Nancy rF.R
|
c9511e3af969c82d1f50667c048ac82daa646fa7
|
[] |
no_license
|
GillisLabAtMoffitt/myelosuppression_CHIP
|
96024396ed7374429e847ab650c05e2f21a80349
|
c96d7b7783bf98579e5c945e33bd2c0d45c2db5c
|
refs/heads/master
| 2022-11-28T09:00:44.392125
| 2020-08-07T14:08:21
| 2020-08-07T14:08:21
| 274,204,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,941
|
r
|
Nancy rF.R
|
rf.data = read.csv('CHIP_CRASH_data_for_stats_v07_randomforest_2.csv',stringsAsFactors = F,header=T)
# calculate random forest to get variable importance
rf.data <- data.frame(rf.data)
head(rf.data)
rf.data <- rf.data %>% select("Case_Control", "CHIP", "Age", "Gender", "Race", "Ethnicity",
"Smoking", "Mets",
"Prior_chemo", "Prior_rad",
"BaseANC", "BaseHGB", "BasePLT", "BaseWBC"# ,
# "MAX2heme"
)
rf.data$Ethnicity[rf.data$Ethnicity==''] = 'unknown'
rf.data <- rf.data %>%
mutate(Case_Control = factor(Case_Control, labels = c("Cases", "Controls"), levels= c(1, 0)))
rf.data = rf.data %>% mutate_if(is.character, as.factor)
for(i in 11:14){
rf.data[is.na(rf.data[,i]),i] = median(rf.data[,i],na.rm=T)
}
set.seed(1485)
# rf <- randomForest(rf.data, as.factor(rf.data$Case_Control),ntree=2000,importance=TRUE)#,sampsize=c(samp.size,samp.size))
rf <- randomForest(Case_Control ~ ., data=rf.data,ntree=2000,importance=TRUE)
print(rf)
rf.feat <- rf$importance
######################################################################
# Plotting Classification Trees with the plot.rpart and rattle pckages
cat("Getting Features...")
feats <- rf.feat[order(rf.feat[,"MeanDecreaseGini"],decreasing=TRUE),"MeanDecreaseGini"]
write.csv(rf.feat,"output/RF_features_gini_scores.csv")
png("output/RF_features_gini_plot.png",width=600,height=600,units='px')
plot(feats,type="l",ylab="Mean Decrease Gini Score",xlab="Genes")
abline(h=.2,col="red") #cutoff
feats <- feats[feats>.2]
abline(v=length(feats),col="red")
graphics.off()
probe.feats <- names(feats)
tree.data <- rf.data[,probe.feats]
tree.data <- cbind(as.character(rf.data$Case_Control),tree.data)
colnames(tree.data)[1] <- "Response"
tree.data <- data.frame(tree.data)
head(tree.data)
# Make big tree
form <- as.formula(Response ~ .)
# Leave one out cross-validation LOOCV
library(rpart)
set.seed(1485)
leave.out <- sample(1:nrow(tree.data),replace=FALSE)
loocv.results <- NULL
for(i in 1:nrow(tree.data)){
train <- tree.data[-leave.out[i],]
test <- tree.data[leave.out[i],]
sample.id <- rownames(tree.data)[leave.out[i]]
all.tree <- rpart(form,train,control=rpart.control(minsplit=2,minbucket=1))
# cat("Pruning Tree...")
# prune.val <- all.tree$cptable[which.min(all.tree$cptable[,"xerror"])[1],"CP"]
# train.pfit <- prune(all.tree,cp=prune.val)
# cat("Done.\n")
#don't prune
train.pfit <- all.tree
#cat("Plotting Tree...")
#fancyRpartPlot(all.tree,type=1,extra=2)
#pdf(paste0("Decision_Tree_CVfold-",fold,"_trainData_",format(Sys.Date(),"%d%b%Y"),".pdf"),width=18,height=12)
#fancyRpartPlot_DR(all.tree)
#graphics.off()
#cat("Done.\n")
cat("Predicting using test data for calls...LOOCV:",i,"/",length(leave.out),"\n")
pred.tree <- predict(train.pfit,test)
calls <- colnames(pred.tree)[apply(pred.tree,1,which.max)]
features <- unlist(unique(train.pfit$frame[1]))
features <- paste(features[grep("<leaf>",features,invert=TRUE)],collapse=";")
temp <- c(i,sample.id,calls,as.character(test[["Response"]]),features)
#calls <- cbind(rownames(pred.tree),calls)
#actual <- phenotype
#calls <- cbind(calls,actual)
loocv.results <- rbind(loocv.results,temp)
}
colnames(loocv.results) <- c("iteration","sample_id_left_out","calls","actual","features")
head(loocv.results)
#Calculate percent accuracy
score <- apply(loocv.results,1,function(x) if(x[3]==x[4]){return(1)}else{return(0)})
loocv.results <- cbind(loocv.results,score)
perc.correct <- sum(score)/length(score)
for(i in unique(loocv.results[,"calls"])){
print(i)
print(sum(as.numeric(loocv.results[which(loocv.results[,"calls"]==i),"score"]))/length(which(loocv.results[,"calls"]==i)))
}
actual <- loocv.results[,"actual"]
cv.call.mat <- matrix(0,nrow=length(unique(actual)),ncol=length(unique(actual)))
colnames(cv.call.mat) <- unique(actual)
rownames(cv.call.mat) <- unique(actual)
cat("Calculing Model Performance...")
for(i in 1:nrow(loocv.results)){
act <- loocv.results[i,"actual"]
pred <- loocv.results[i,"calls"]
cv.call.mat[pred,act] <- cv.call.mat[pred,act]+1
}
### create final tree based on loocv features
final.feats <- unique(unlist(strsplit(loocv.results[,"features"],split=";")))
final.tree.data <- tree.data[c("Response",final.feats)]
final.tree <- rpart(form,final.tree.data,control=rpart.control(minsplit=2,minbucket=1))
# cat("Pruning Tree...")
# prune.val <- final.tree$cptable[which.min(final.tree$cptable[,"xerror"])[1],"CP"]
# final.pfit <- prune(final.tree,cp=prune.val)
# cat("Done.\n")
# don't prune
final.pfit <- final.tree
#cat("Plotting Tree...")
#fancyRpartPlot(all.tree,type=1,extra=2)
pdf(paste0("output/Decision_Tree_ALLData.pdf"),width=18,height=12)
fancyRpartPlot(final.pfit)
graphics.off()
#cat("Done.\n")
cat("Predicting back on data for calls...")
pred.tree <- predict(final.pfit,final.tree.data)
calls <- colnames(pred.tree)[apply(pred.tree,1,which.max)]
calls <- cbind(rownames(pred.tree),calls)
actual <- as.character(final.tree.data[,"Response"])
calls <- cbind(calls,actual)
score <- apply(calls,1,function(x) if(x[2]==x[3]){return(1)}else{return(0)})
calls <- cbind(calls,score)
perc.correct <- sum(score)/length(score)
cat("Done.\n")
final.features <- unlist(unique(final.pfit$frame[1]))
final.features <- paste(final.features[grep("<leaf>",final.features,invert=TRUE)],collapse=";")
final.call.mat <- matrix(0,nrow=length(unique(actual)),ncol=length(unique(actual)))
colnames(final.call.mat) <- unique(actual)
rownames(final.call.mat) <- unique(actual)
cat("Calculing Model Performance...")
for(i in 1:nrow(calls)){
act <- calls[i,"actual"]
pred <- calls[i,"calls"]
final.call.mat[pred,act] <- final.call.mat[pred,act]+1
}
summary.file <- NULL
#calculate summary statistics
for(resp in unique(colnames(cv.call.mat))){
j <- which(rownames(cv.call.mat)==resp)
tp <- cv.call.mat[j,j]
fp <- sum(cv.call.mat[j,-j])
tn <- sum(cv.call.mat[-j,-j])
fn <- sum(cv.call.mat[-j,j])
sens <- round(tp/(tp+fn),digits = 3)
spec <- round(tn/(tn+fp), digits = 3)
ba <- mean(c(sens,spec))
OR <- (tp/fp)/(fn/tn)
OR.se <- sqrt((1/cv.call.mat[1,1])+(1/cv.call.mat[1,2])+(1/cv.call.mat[2,1])+(1/cv.call.mat[2,2]))
OR.ci.l <- exp(log(OR)-(1.96*OR.se))
OR.ci.h <- exp(log(OR)+(1.96*OR.se))
RR <- (tp/(tp+fp)) / (fn/(tn+fn))
TPR <- sens
FPR <- 1-spec
PPV <- tp/(tp+fp)
NPV <- tn/(tn+fn)
p.val <- fisher.test(matrix(c(tp,fp,tn,fn),nrow=2))[[1]]
temp.out <- c("LOOCV",resp,sens,spec,ba,OR,OR.ci.l,OR.ci.h,RR,TPR,FPR,PPV,NPV,p.val,features)
summary.file <- rbind(summary.file,temp.out)
}
for(resp in unique(colnames(final.call.mat))){
j <- which(rownames(final.call.mat)==resp)
tp <- final.call.mat[j,j]
fp <- sum(final.call.mat[j,-j])
tn <- sum(final.call.mat[-j,-j])
fn <- sum(final.call.mat[-j,j])
test.n <- "NA"#nrow(test.tree.data)
train.n <-nrow(tree.data)
n.cancer.train <- length(which(tree.data[,"Response"]==resp))
n.cancer.test <- "NA"#length(which(test.tree.data[,"Class"]==j))
sens <- round(tp/(tp+fn),digits = 3)
spec <- round(tn/(tn+fp), digits = 3)
ba <- mean(c(sens,spec))
OR <- (tp/fp)/(fn/tn)
OR.se <- sqrt((1/final.call.mat[1,1])+(1/final.call.mat[1,2])+(1/final.call.mat[2,1])+(1/final.call.mat[2,2]))
OR.ci.l <- exp(log(OR)-(1.96*OR.se))
OR.ci.h <- exp(log(OR)+(1.96*OR.se))
RR <- (tp/(tp+fp)) / (fn/(tn+fn))
TPR <- sens
FPR <- 1-spec
PPV <- tp/(tp+fp)
NPV <- tn/(tn+fn)
p.val <- fisher.test(matrix(c(tp,fp,tn,fn),nrow=2))[[1]]
temp.out <- c("ALL",resp,sens,spec,ba,OR,OR.ci.l,OR.ci.h,RR,TPR,FPR,PPV,NPV,p.val,final.features)
summary.file <- rbind(summary.file,temp.out)
}
#out.path <- "Data/Predictive_Model/DecisionTree/ALL_AML/noCNV/"
write.csv(calls,"output/Decision_Tree_Calls_ALL_Data.csv",row.names=FALSE)
write.csv(loocv.results,"output/Decision_Tree_Calls_LOOCV_Data.csv",row.names=FALSE)
## Counts for LOOCV and ALL
write("#rows=predicted;cols=actual","output/Decision_Tree_CallCounts.csv",sep="")
write("LOOCV","output/Decision_Tree_CallCounts.csv",sep="",append = TRUE)
cv.call.mat.out <- cbind(rownames(cv.call.mat),cv.call.mat)
write.table(cv.call.mat.out,"output/Decision_Tree_CallCounts.csv",sep=",",append=TRUE,row.names=FALSE)
write("\nAll Data","output/Decision_Tree_CallCounts.csv",sep="",append = TRUE)
final.call.mat.out <- cbind(rownames(final.call.mat),final.call.mat)
write.table(final.call.mat.out,"output/Decision_Tree_CallCounts.csv",sep=",",append=TRUE,row.names=FALSE)
##write out summary file
colnames(summary.file) <-c("Model","Response","sens","spec","ba","OR","OR_95ci_l","OR_95ci_h","RR","TPR","FPR","PPV","NPV","Fisher_p","features")
write.csv(summary.file,"output/Decision_Tree_SummaryResults.csv",row.names=FALSE)
# For Baseline prdiction----
rf.data = read.csv('CHIP_CRASH_data_for_stats_v07_randomforest_2.csv',stringsAsFactors = F,header=T)
# calculate random forest to get variable importance
rf.data <- data.frame(rf.data)
head(rf.data)
rf.data$BaseANC
median(rf.data$BaseANC, na.rm = TRUE)
rf.data <- rf.data %>% select("Case_Control", "CHIP", "Age", "Gender", "Race", "Ethnicity",
"Smoking", "Mets",
"Prior_chemo", "Prior_rad",
"BaseANC", "VAF"# ,
# "MAX2heme"
)
for(i in 11:14){
rf.data[is.na(rf.data[,i]),i] = median(rf.data[,i],na.rm=T)
}
rf.data$Ethnicity[rf.data$Ethnicity==''] = 'unknown'
rf.data <- rf.data %>%
mutate(Case_Control = factor(Case_Control, labels = c("Cases", "Controls"), levels= c(1, 0))) %>%
mutate(Base_ANC_grp = case_when(
BaseANC >= median(rf.data$BaseANC, na.rm = TRUE) ~ "ANC_high",
BaseANC < median(rf.data$BaseANC, na.rm = TRUE) ~ "ANC_low"
)) %>%
mutate_if(is.character, as.factor) %>%
select(-BaseANC)
set.seed(1485)
# rf <- randomForest(rf.data, as.factor(rf.data$Case_Control),ntree=2000,importance=TRUE)#,sampsize=c(samp.size,samp.size))
rf <- randomForest(Base_ANC_grp ~ ., data=rf.data,ntree=2000,importance=TRUE)
print(rf)
rf.feat <- rf$importance
######################################################################
# Plotting Classification Trees with the plot.rpart and rattle pckages
cat("Getting Features...")
feats <- rf.feat[order(rf.feat[,"MeanDecreaseGini"],decreasing=TRUE),"MeanDecreaseGini"]
write.csv(rf.feat,"output/RF_features_gini_scores.csv")
png("output/RF_features_gini_plot.png",width=600,height=600,units='px')
plot(feats,type="l",ylab="Mean Decrease Gini Score",xlab="Genes")
abline(h=.2,col="red") #cutoff
feats <- feats[feats>.2]
abline(v=length(feats),col="red")
graphics.off()
probe.feats <- names(feats)
tree.data <- rf.data[,probe.feats]
tree.data <- cbind(as.character(rf.data$Base_ANC_grp),tree.data)
colnames(tree.data)[1] <- "Response"
tree.data <- data.frame(tree.data)
head(tree.data)
# Make big tree
form <- as.formula(Response ~ .)
# Leave one out cross-validation LOOCV
library(rpart)
set.seed(1485)
leave.out <- sample(1:nrow(tree.data),replace=FALSE)
loocv.results <- NULL
for(i in 1:nrow(tree.data)){
train <- tree.data[-leave.out[i],]
test <- tree.data[leave.out[i],]
sample.id <- rownames(tree.data)[leave.out[i]]
all.tree <- rpart(form,train,control=rpart.control(minsplit=2,minbucket=1))
# cat("Pruning Tree...")
# prune.val <- all.tree$cptable[which.min(all.tree$cptable[,"xerror"])[1],"CP"]
# train.pfit <- prune(all.tree,cp=prune.val)
# cat("Done.\n")
#don't prune
train.pfit <- all.tree
#cat("Plotting Tree...")
#fancyRpartPlot(all.tree,type=1,extra=2)
#pdf(paste0("Decision_Tree_CVfold-",fold,"_trainData_",format(Sys.Date(),"%d%b%Y"),".pdf"),width=18,height=12)
#fancyRpartPlot_DR(all.tree)
#graphics.off()
#cat("Done.\n")
cat("Predicting using test data for calls...LOOCV:",i,"/",length(leave.out),"\n")
pred.tree <- predict(train.pfit,test)
calls <- colnames(pred.tree)[apply(pred.tree,1,which.max)]
features <- unlist(unique(train.pfit$frame[1]))
features <- paste(features[grep("<leaf>",features,invert=TRUE)],collapse=";")
temp <- c(i,sample.id,calls,as.character(test[["Response"]]),features)
#calls <- cbind(rownames(pred.tree),calls)
#actual <- phenotype
#calls <- cbind(calls,actual)
loocv.results <- rbind(loocv.results,temp)
}
colnames(loocv.results) <- c("iteration","sample_id_left_out","calls","actual","features")
head(loocv.results)
#Calculate percent accuracy
score <- apply(loocv.results,1,function(x) if(x[3]==x[4]){return(1)}else{return(0)})
loocv.results <- cbind(loocv.results,score)
perc.correct <- sum(score)/length(score)
for(i in unique(loocv.results[,"calls"])){
print(i)
print(sum(as.numeric(loocv.results[which(loocv.results[,"calls"]==i),"score"]))/length(which(loocv.results[,"calls"]==i)))
}
actual <- loocv.results[,"actual"]
cv.call.mat <- matrix(0,nrow=length(unique(actual)),ncol=length(unique(actual)))
colnames(cv.call.mat) <- unique(actual)
rownames(cv.call.mat) <- unique(actual)
cat("Calculing Model Performance...")
for(i in 1:nrow(loocv.results)){
act <- loocv.results[i,"actual"]
pred <- loocv.results[i,"calls"]
cv.call.mat[pred,act] <- cv.call.mat[pred,act]+1
}
### create final tree based on loocv features
final.feats <- unique(unlist(strsplit(loocv.results[,"features"],split=";")))
final.tree.data <- tree.data[c("Response",final.feats)]
final.tree <- rpart(form,final.tree.data,control=rpart.control(minsplit=2,minbucket=1))
# cat("Pruning Tree...")
# prune.val <- final.tree$cptable[which.min(final.tree$cptable[,"xerror"])[1],"CP"]
# final.pfit <- prune(final.tree,cp=prune.val)
# cat("Done.\n")
# don't prune
final.pfit <- final.tree
#cat("Plotting Tree...")
#fancyRpartPlot(all.tree,type=1,extra=2)
pdf(paste0("output/Decision_Tree_ALLData.pdf"),width=18,height=12)
fancyRpartPlot(final.pfit)
graphics.off()
#cat("Done.\n")
cat("Predicting back on data for calls...")
pred.tree <- predict(final.pfit,final.tree.data)
calls <- colnames(pred.tree)[apply(pred.tree,1,which.max)]
calls <- cbind(rownames(pred.tree),calls)
actual <- as.character(final.tree.data[,"Response"])
calls <- cbind(calls,actual)
score <- apply(calls,1,function(x) if(x[2]==x[3]){return(1)}else{return(0)})
calls <- cbind(calls,score)
perc.correct <- sum(score)/length(score)
cat("Done.\n")
final.features <- unlist(unique(final.pfit$frame[1]))
final.features <- paste(final.features[grep("<leaf>",final.features,invert=TRUE)],collapse=";")
final.call.mat <- matrix(0,nrow=length(unique(actual)),ncol=length(unique(actual)))
colnames(final.call.mat) <- unique(actual)
rownames(final.call.mat) <- unique(actual)
cat("Calculing Model Performance...")
for(i in 1:nrow(calls)){
act <- calls[i,"actual"]
pred <- calls[i,"calls"]
final.call.mat[pred,act] <- final.call.mat[pred,act]+1
}
summary.file <- NULL
#calculate summary statistics
for(resp in unique(colnames(cv.call.mat))){
j <- which(rownames(cv.call.mat)==resp)
tp <- cv.call.mat[j,j]
fp <- sum(cv.call.mat[j,-j])
tn <- sum(cv.call.mat[-j,-j])
fn <- sum(cv.call.mat[-j,j])
sens <- round(tp/(tp+fn),digits = 3)
spec <- round(tn/(tn+fp), digits = 3)
ba <- mean(c(sens,spec))
OR <- (tp/fp)/(fn/tn)
OR.se <- sqrt((1/cv.call.mat[1,1])+(1/cv.call.mat[1,2])+(1/cv.call.mat[2,1])+(1/cv.call.mat[2,2]))
OR.ci.l <- exp(log(OR)-(1.96*OR.se))
OR.ci.h <- exp(log(OR)+(1.96*OR.se))
RR <- (tp/(tp+fp)) / (fn/(tn+fn))
TPR <- sens
FPR <- 1-spec
PPV <- tp/(tp+fp)
NPV <- tn/(tn+fn)
p.val <- fisher.test(matrix(c(tp,fp,tn,fn),nrow=2))[[1]]
temp.out <- c("LOOCV",resp,sens,spec,ba,OR,OR.ci.l,OR.ci.h,RR,TPR,FPR,PPV,NPV,p.val,features)
summary.file <- rbind(summary.file,temp.out)
}
for(resp in unique(colnames(final.call.mat))){
j <- which(rownames(final.call.mat)==resp)
tp <- final.call.mat[j,j]
fp <- sum(final.call.mat[j,-j])
tn <- sum(final.call.mat[-j,-j])
fn <- sum(final.call.mat[-j,j])
test.n <- "NA"#nrow(test.tree.data)
train.n <-nrow(tree.data)
n.cancer.train <- length(which(tree.data[,"Response"]==resp))
n.cancer.test <- "NA"#length(which(test.tree.data[,"Class"]==j))
sens <- round(tp/(tp+fn),digits = 3)
spec <- round(tn/(tn+fp), digits = 3)
ba <- mean(c(sens,spec))
OR <- (tp/fp)/(fn/tn)
OR.se <- sqrt((1/final.call.mat[1,1])+(1/final.call.mat[1,2])+(1/final.call.mat[2,1])+(1/final.call.mat[2,2]))
OR.ci.l <- exp(log(OR)-(1.96*OR.se))
OR.ci.h <- exp(log(OR)+(1.96*OR.se))
RR <- (tp/(tp+fp)) / (fn/(tn+fn))
TPR <- sens
FPR <- 1-spec
PPV <- tp/(tp+fp)
NPV <- tn/(tn+fn)
p.val <- fisher.test(matrix(c(tp,fp,tn,fn),nrow=2))[[1]]
temp.out <- c("ALL",resp,sens,spec,ba,OR,OR.ci.l,OR.ci.h,RR,TPR,FPR,PPV,NPV,p.val,final.features)
summary.file <- rbind(summary.file,temp.out)
}
#out.path <- "Data/Predictive_Model/DecisionTree/ALL_AML/noCNV/"
write.csv(calls,"output/Decision_Tree_Calls_ALL_Data.csv",row.names=FALSE)
write.csv(loocv.results,"output/Decision_Tree_Calls_LOOCV_Data.csv",row.names=FALSE)
## Counts for LOOCV and ALL
write("#rows=predicted;cols=actual","output/Decision_Tree_CallCounts.csv",sep="")
write("LOOCV","output/Decision_Tree_CallCounts.csv",sep="",append = TRUE)
cv.call.mat.out <- cbind(rownames(cv.call.mat),cv.call.mat)
write.table(cv.call.mat.out,"output/Decision_Tree_CallCounts.csv",sep=",",append=TRUE,row.names=FALSE)
write("\nAll Data","output/Decision_Tree_CallCounts.csv",sep="",append = TRUE)
final.call.mat.out <- cbind(rownames(final.call.mat),final.call.mat)
write.table(final.call.mat.out,"output/Decision_Tree_CallCounts.csv",sep=",",append=TRUE,row.names=FALSE)
##write out summary file
colnames(summary.file) <-c("Model","Response","sens","spec","ba","OR","OR_95ci_l","OR_95ci_h","RR","TPR","FPR","PPV","NPV","Fisher_p","features")
write.csv(summary.file,"output/Decision_Tree_SummaryResults.csv",row.names=FALSE)
|
006bf30df306957e06c25410d9f62bb6ffb30dc5
|
d0a6b7960ff3acdb6dff56e19dc782963fb0ee1d
|
/man/covid_data.Rd
|
98e77ca05293f112b17a15f6be1659ee64fe5a91
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hamgamb/aitidata
|
30b813827e58cca22340aa7df222f70339030c37
|
84191c07b9e89204520b5d7f507f3fcd4c9da1e1
|
refs/heads/master
| 2023-04-22T21:37:18.528490
| 2021-04-27T02:22:07
| 2021-04-27T02:22:07
| 357,006,003
| 0
| 0
|
CC0-1.0
| 2021-04-12T06:09:22
| 2021-04-11T23:50:51
|
R
|
UTF-8
|
R
| false
| true
| 934
|
rd
|
covid_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{covid_data}
\alias{covid_data}
\title{COVID-19 Geographic data}
\format{
A dataframe with 6 variables:
\describe{
\item{date}{Date}
\item{state_name_2016}{State name}
\item{indicator}{COVID-19 indicators}
\item{value}{Value}
\item{statistical_area}{Number of digits in statistical area}
\item{statistical_area_code}{Statistical area code}
}
}
\usage{
covid_data
}
\description{
A dataset containing the jobkeeper applications, jobseeker payments, and derived impact from COVID-19 at
a geographical level. Jobkeeper and Jobseeker data is based on the SA2 classiciation, and employment
impact is based on the SA4 classification. This dataset combines data from jobseeker_sa2, jobkeeper_sa2,
payroll_sa4, and small_area_labour_market datasets and is intended for use on the AITI Economic
Indicators dashboard.
}
\keyword{datasets}
|
1ab1fe38f35dc94df0734c0a68e59882ab193949
|
57144c2d9a8c77faa7766d3003efe553bea2a9eb
|
/R/loonGrob_l_layer_graph.R
|
b30c60131398017fe1f069192a3332e23d5f9d8e
|
[] |
no_license
|
Bhanditz/loon
|
63565f059d3c0ccb0756ede8c6536690649e44df
|
540eecd6dc5efa9c3369a69cae78b94ee5c54857
|
refs/heads/master
| 2020-04-19T08:46:48.989059
| 2018-12-10T08:10:03
| 2018-12-10T08:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,735
|
r
|
loonGrob_l_layer_graph.R
|
#' @rdname loonGrob
#'
#' @examples
#'
#' \dontrun{
#' ## graph examples
#'
#' G <- completegraph(names(iris[,-5]))
#' LG <- linegraph(G)
#' g <- l_graph(LG)
#'
#' nav0 <- l_navigator_add(g)
#' l_configure(nav0, label = 0)
#' con0 <- l_context_add_geodesic2d(navigator=nav0, data=iris[,-5])
#'
#' nav1 <- l_navigator_add(g, from = "Sepal.Length:Petal.Width",
#' to = "Petal.Length:Petal.Width", proportion = 0.6)
#' l_configure(nav1, label = 1)
#' con1 <- l_context_add_geodesic2d(navigator=nav1, data=iris[,-5])
#'
#' nav2 <- l_navigator_add(g, from = "Sepal.Length:Petal.Length",
#' to = "Sepal.Width:Petal.Length", proportion = 0.5)
#' l_configure(nav2, label = 2)
#' con2 <- l_context_add_geodesic2d(navigator=nav2, data=iris[,-5])
#'
#' # To print directly use either
#' plot(g)
#' # or
#' grid.loon(g)
#' # or to save structure
#' library(grid)
#' lgrob <- loonGrob(g)
#' grid.newpage(); grid.draw(lgrob)
#' }
#'
#' @export
loonGrob.l_layer_graph <- function(target, name = NULL, gp = NULL, vp = NULL) {
widget <- l_create_handle(attr(target, "widget"))
states <- get_layer_states(widget)
active <- states$active
if (!any(active)) {
grob(name = name, gp = gp, vp = vp)
} else {
edgesGrob <- edgesGrob(states)
nodeGlyphGrob <- nodeGlyphGrob(states)
labelGrob <- labelGrob(states)
# add navigators
nav_ids <- l_navigator_ids(widget)
if(length(nav_ids) == 0){
# No navigator, just return the graph
gTree(children =
gList(
edgesGrob,
nodeGlyphGrob,
labelGrob),
name = name, gp = gp, vp = vp
)
} else {
# have navigator, need path and navigator as well
activeNavigator <- widget["activeNavigator"]
gTree(
children = gList(
edgesGrob,
do.call(gList,
lapply(nav_ids,
function(nav_id){
navPathGrob(states,
navigator = l_create_handle(c(widget, nav_id)),
name = paste0("navigation path edges", nav_id))
})
),
nodeGlyphGrob,
labelGrob,
do.call(gList,
lapply(nav_ids,
function(nav_id){
navPointsGrob(activeNavigator,
states,
navigator = l_create_handle(c(widget, nav_id)),
name = paste0("navigation points edges", nav_id))
})
)
),
name = if (is.null(name)) "graph" else name, gp = gp, vp = vp
)
}
}
}
edgesGrob <- function(states = NULL, name = NULL){
active <- states$active
activeNode <- states$nodes[active]
activeX <- states$x[active]
activeY <- states$y[active]
isActiveEdge <- states$activeEdge
gTree(children =
do.call(
gList,
lapply(seq_len(length(activeNode)),
function(i) {
nodeFrom <- activeNode[i]
nodeFrom_EdgeId <- which(states$from[isActiveEdge] == nodeFrom)
if (length(nodeFrom_EdgeId) != 0){
nodeTo <- states$to[isActiveEdge][nodeFrom_EdgeId]
nodeTo_CoordId <- which (activeNode %in% nodeTo == TRUE)
numNodesTo <- length(nodeTo_CoordId)
cols <- states$colorEdge[isActiveEdge][nodeFrom_EdgeId]
x <- unit(c(rep(activeX[i], numNodesTo),
activeX[nodeTo_CoordId]),
"native")
y <- unit(c(rep(activeY[i], numNodesTo),
activeY[nodeTo_CoordId]),
"native")
polylineGrob(x, y,
id=rep(1:numNodesTo, 2),
gp=gpar(col= cols, lwd=1),
name = paste("edge", i))
} else {
condGrob(test = FALSE,
grobFun = polylineGrob,
name = paste("edge", i, "missing")
)
}
}
)
),
name = if (is.null(name)) "graph edges" else name
)
}
labelGrob <- function(states = NULL, name = NULL){
active <- states$active
activeNode <- states$nodes[active]
activeX <- states$x[active]
activeY <- states$y[active]
activeAngle <- states$orbitAngle[active]
orbitDistance <- states$orbitDistance
gTree(children = do.call(
gList,
lapply(seq_len(length(activeNode)),
function(i) {
condGrob(test = states$showOrbit,
grobFun = textGrob,
name = paste("label", i),
label = activeNode[i],
x = unit(activeX[i], "native") +
unit(orbitDistance * cos(activeAngle[i]),
"mm" ),
y = unit(activeY[i], "native") +
unit(orbitDistance * sin(activeAngle[i]),
"mm" ),
gp=gpar(fontsize= 8, # TODO find this somewhere
col= l_getOption("foreground")))
}
)
),
name = if (is.null(name)) "graph labels" else name
)
}
nodeGlyphGrob <- function(states = NULL, name = NULL){
active <- states$active
cex <- as_r_point_size(states$size[active])
selected <- states$selected[active]
col <- get_display_color(states$color[active], selected)
pch <- glyph_to_pch(states$glyph[active])
# is there a fill colour?
filled <- pch %in% 21:24
activeX <- states$x[active]
activeY <- states$y[active]
gTree(
children = do.call(gList,
lapply(seq_len(length(filled)),
function(i){
gp <- if (filled[i]) {
gpar(fill = col[i],
col = l_getOption("foreground"),
cex = cex[i])
} else {
gpar(col = col[i], cex = cex[i])
}
pointsGrob(x = activeX[i],
y = activeY[i],
pch = pch[i],
gp = gp,
name = paste("node", i)
)
}
)),
name = if (is.null(name)) "graph nodes" else name
)
}
navPathGrob <- function(states, navigator, name = NULL){
x <- as.numeric(states$x)
y <- as.numeric(states$y)
node <- states$nodes
color <- as_hex6color(navigator['color'])
from <- navigator['from']
to <- navigator['to']
prop <- navigator['proportion']
fromId <- sapply(1:length(from), function(i){which(node %in% from[i] == T)})
toId <- sapply(1:length(to), function(i){which(node %in% to[i] == T)})
if(length(from) == 0 || length(to) == 0) {
grob(name = name)
} else {
visitedLinesGrob <-
if(length(from) < 2) {
grob(name = name)
} else {
do.call(gList,
lapply(1:(length(from) - 1),
function(i){
linesGrob(unit(c(x[fromId[i]], x[fromId[i+1]]),
"native"),
unit( c(y[fromId[i]], y[fromId[i+1]]),
"native"),
gp = gpar(col = color,
lwd = 9), #TODO find the line widths
name = paste("line", i, "(visited)")
)
}
)
)
}
unvisitedLinesGrob <-
if(length(to) < 2){
grob(name = name)
} else {
do.call(gList,
lapply(1:(length(to) - 1),
function(i){
linesGrob(unit( c(x[toId[i]], x[toId[i+1]]), "native"),
unit( c(y[toId[i]], y[toId[i+1]]), "native"),
gp = gpar(col = color,
lwd = 3), #TODO find the line widths
name = paste("line", i, "(unvisited)")
)
}
)
)
}
xn <- (1 - prop) * x[fromId[length(fromId)]] + prop * x[toId[1]]
yn <- (1 - prop) * y[fromId[length(fromId)]] + prop * y[toId[1]]
betweenLinesGrob <- gList(linesGrob(unit(c(x[fromId[length(fromId)]], xn), "native"),
unit(c(y[fromId[length(fromId)]], yn), "native"),
gp = gpar(col = color, lwd = 9)), #TODO find the line widths
linesGrob(unit(c(xn, x[toId[1]]), "native"),
unit(c(yn, y[toId[1]]), "native"),
gp = gpar(col = color, lwd = 3)) #TODO find the line widths
)
gTree(children = gList(unvisitedLinesGrob,
visitedLinesGrob,
betweenLinesGrob),
name = if (is.null(name)) "navigation path" else name
)
}
}
# size of navigator is arbitrary, just as close as loon object.
navPointsGrob <- function(activeNavigator,
states,
navigator,
name){
x <- as.numeric(states$x)
y <- as.numeric(states$y)
node <- states$nodes
color <- as_hex6color(navigator['color'])
from <- navigator['from']
to <- navigator['to']
prop <- navigator['proportion']
label <- navigator['label']
fromId <- sapply(1:length(from), function(i){which(node %in% from[i] == TRUE)})
toId <- sapply(1:length(to), function(i){which(node %in% to[i] == TRUE)})
sel_color <- as.character(l_getOption("select-color"))
if (grepl("^#", sel_color) && nchar(sel_color) == 13) {
sel_color <- hex12tohex6(sel_color)
}
circleGp <- if(length(activeNavigator) != 0) {
if(activeNavigator == navigator) {
gpar(fill = color, lwd = 4, col = sel_color) # TODO line width?
} else {
gpar(fill = color)
}
} else {gpar(fill = color)}
fromRadius <- unit(5.5, "mm")
if(length(from) == 0){
xx <- unit(0.1, "npc")
yy <- unit(0.9, "npc")
gTree(children = gList(circleGrob(xx, yy, r = fromRadius,
gp = circleGp,
name = "navigator circle"),
if(length(label) != 0) {
textGrob(paste(label, collapse = " "),
xx, yy,
gp = gpar(fill = "black", fontsize = 9),
name = "navigator label") # font size?
}
),
name = if (is.null(name)) "navigator" else name
)
} else if(length(from) == 1 & length(to) == 0) {
xx <- unit(x[fromId], "native")
yy <- unit(y[fromId], "native")
gTree(children = gList(
circleGrob(x = xx,
y = yy,
r = fromRadius,
gp = circleGp,
name = "navigator circle"
),
if(length(label) != 0) {
textGrob(paste(label, collapse = " "), xx, yy,
gp = gpar(fill = "black", fontsize = 9),
name = "navigator label")
}
),
name = if (is.null(name)) "navigator" else name
)
} else {
xx <- unit( (1 - prop) * x[fromId[length(fromId)]] + prop * x[toId[1]], "native")
yy <- unit( (1 - prop) * y[fromId[length(fromId)]] + prop * y[toId[1]], "native")
toRadius <- unit(1, "mm")
gTree(children = gList(
# 'to' dot
circleGrob(unit(x[toId[length(toId)]], "native"),
unit(y[toId[length(toId)]], "native"),
r = toRadius,
gp = gpar(fill = color),
name = "to dot"
),
# 'from' navigator
circleGrob(xx, yy,
r = fromRadius,
gp = circleGp,
name = "navigator circle"
),
# 'text' on the navigator
condGrob(
test = length(label) != 0,
grobFun = textGrob,
name = "navigator label",
label = paste(label, collapse = " "),
x = xx, y = yy,
gp = gpar(fill = "black", fontsize = 9)
)
),
name = if (is.null(name)) "navigator" else name
)
}
}
|
bb8f20712b36a4163a49a285f2d99a49383a4618
|
0dcd2d146b5b0f2a7c889e53480e49c9eb32c79c
|
/rSPACE/R/UtilityFunctions.R
|
b85c4c41c183b0fcf200716739b25f8351ee3e07
|
[] |
no_license
|
Jordan-Heiman/Full_rSPACE
|
4e276ea86f239f5d9bf480cdc166bed2724b73ae
|
6ef641ac4f63dbd1a251c399150477a42091f24a
|
refs/heads/master
| 2023-03-17T08:27:34.808051
| 2018-01-30T21:01:16
| 2018-01-30T21:01:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
UtilityFunctions.R
|
# Little functions to make things work
setDefault<-function(x,val)
ifelse(is.null(x),val,x)
|
9d89fe9081ee0dd90fff8eaef4ff8524df21c4d5
|
d582b1d42f2c548abf6b032f04b5aeefb48d848c
|
/man/bic.join.strings.Rd
|
28dbaef99f3e1e19d6d3282ca73da09cb18ca5dc
|
[] |
no_license
|
caitlinjones/bicrnaseq
|
82dc6dacb7ad44af1637c449525d6e13cbb3a0ba
|
3a74d39c329ab19aa73d2f26eb2d678d111f20fd
|
refs/heads/master
| 2021-05-23T05:46:24.178527
| 2018-04-28T00:00:27
| 2018-04-28T00:00:27
| 94,919,835
| 0
| 1
| null | 2017-06-20T17:54:02
| 2017-06-20T17:54:02
| null |
UTF-8
|
R
| false
| true
| 462
|
rd
|
bic.join.strings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bic_util.R
\name{bic.join.strings}
\alias{bic.join.strings}
\title{Convert a character vector to a delimited string}
\usage{
bic.join.strings(x, delim)
}
\arguments{
\item{x}{character vector}
\item{delim}{delimiter to join vector elements}
}
\value{
a string
}
\description{
Given a character vector and a delimiter, join all
items in the vector, separated by the given delimiter
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.