blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5de30ef9cc39833600c2f9114327f0d57b36b17a
|
abaa167322f05f70b5cde591c438ffdb8e679dba
|
/180425_B3_pop_analysis.R
|
83a1edebfb3b671106a8a9e08f635a04955df637
|
[] |
no_license
|
KatrinaHarris23/PALTEanalysis
|
e305b981fb27b2b8dfdcfeba8990730400080823
|
d14084d32be5afa622d9e2e5bc4d732b168d80ef
|
refs/heads/master
| 2021-06-03T10:54:41.597406
| 2021-05-24T14:30:30
| 2021-05-24T14:30:30
| 141,172,636
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,550
|
r
|
180425_B3_pop_analysis.R
|
#this is the B3 population of the LTE that I am trying to automate the analysis of
#The input for this script is a csv file of a breseq output. Can use the breseq_cat script that nate/Chris Dietrick made to get an excel file. You do have to do a little bit of manipulation on this file in excel first, described in comments below.
#this script will take the list of mutations and subtract mutations:1. found in the ancestor, 2. that don't reach a cumulative frequency of 10%, 3. that only appear at one time point, 4. that are fixed from the first measured time point, 5. that are almost fixed from the first time point, and 6. that do not change in frequency by at least 10% over the course of the experiment.
#the output of this script is an excel sheet that is formatted to go directly into Katya's matlab scripts to define mutational cohorts, infer ancestry, and make muller plots. there are many places that data frames can be printed for other purposes.
library("vegan")
library("plyr")
library("RColorBrewer")
library("ggplot2")
library("data.table")
library("dplyr")
library("reshape2")
library("xlsx")
library("scales")
theme_set(theme_bw())
setwd("/Users/katrina/Desktop/working/B3")
#first thing is downloading time course breseq data from beagle
#/home/kah231/scripts/BreseqCat.py -d /home/kah231/PA/analysis/LTEpitt_seq/B3
#and the ancester
#/home/kah231/scripts/SingleBreseqCat.py -f /home/kah231/PA/analysis/LTEpitt_seq/KBH5_WT/output/index.html
#
#
##get rid of spaces in all columns except description.
#convert arrows ← and → o < and > get rid of all commas, remove Â, ‑,–
#need to make sure days are just days not "B317" etc.
##saved the snp tab as a csv format
#FIle locations
#/Users/katrina/Desktop/working
#called:
# B3_Breseq_Output.csv
# KBH5_WT_Breseq_Output.csv
#ancestral/background SNPs
ancestor_snps <- read.csv("/Users/katrina/Desktop/working/KBH5_WT_Breseq_Output.csv", header=TRUE)
head(ancestor_snps)
View(ancestor_snps) #want SeqID column because that is where the positions are
#B3 population SNPs
B3_snps <- read.csv("B3_Breseq_Output.csv",header=TRUE)
View(B3_snps) #again SeqID is what we want to match
nrow(B3_snps) #6771
#get the SNPs that are found in both files according to the position numbers that are
#actually stored in the SeqID column
#filter1 = no mutations found in the ancestral clone
B3_filter1 <- B3_snps[ !(B3_snps$SeqID %in% ancestor_snps$SeqID), ]
#see how many rows are in the data frame now
nrow(B3_filter1) #5333
#create a data frame of the shared mutations (the ones taken out with filter 1)
B3_filter1_refmatch <- B3_snps[ (B3_snps$SeqID %in% ancestor_snps$SeqID), ]
nrow(B3_filter1_refmatch)#1438
write.csv(B3_filter1_refmatch, file = "B3_ancestral.csv")
#will create a df with taking out based on the gene name. I don't think this will be what I want, I think it will be too strict but I am doing it anyway
#I don't like using this because some genes really can get more than one mutation, especially if it is a big gene
B3_genelevelfilter <- B3_snps[ !(B3_snps$Gene %in% ancestor_snps$Gene), ]
nrow(B3_genelevelfilter)#4956
#now see how many it took out
B3_genelevelfilter_refmatch <- B3_snps[ (B3_snps$Gene %in% ancestor_snps$Gene), ]
nrow(B3_genelevelfilter_refmatch)#1815
#this is done to the original data set through filter1
#remove '%' symbol
B3_filter1$Mutation <- gsub( "%", "", as.character(B3_filter1$Mutation), n)
B3_filter1$Mutation <- as.numeric(as.character(B3_filter1$Mutation))
#combine annotation::gene::description in one column
B3_filter1$desc_gene_annot <- paste(B3_filter1$Annotation,B3_filter1$Gene,B3_filter1$Description, sep="::")
B3_filter1$desc_gene_annot <- as.factor(B3_filter1$desc_gene_annot)
#combine desc_gene_annot and position (SeqID) in one column
B3_filter1$details <- paste(B3_filter1$desc_gene_annot, B3_filter1$SeqID, sep=";;")
B3_filter1$details <- as.factor(B3_filter1$details)
#combine details with the actual mutation
B3_filter1$info <- paste(B3_filter1$details, B3_filter1$Position, sep=";;")
B3_filter1$info <- as.factor(B3_filter1$info)
View(B3_filter1)
#melt data frame for casting
m_B3_filter1 <- melt(B3_filter1, id=c("Sample","Evidence","SeqID","Position","Annotation","Gene","Description","desc_gene_annot", "details", "info"),measure.vars = c("Mutation"))
head(m_B3_filter1)
View(m_B3_filter1)
#cast data frame - organizing with each mutation as the rows and the frequency of that mutation on a given day as the columns
B3_cast1 <- t(dcast(m_B3_filter1,Sample~info,mean, value.var = "value",fill=0))
B3_cast1 <- as.data.frame(B3_cast1,header=TRUE)
colnames(B3_cast1) <- as.character(unlist(B3_cast1[1,]))
colnames(B3_cast1)
View(B3_cast1)
B3_cast1$"0" <- 0.0
View(B3_cast1)
B3_cast1 <- B3_cast1[-1,]
View(B3_cast1)
#need to reorder the columns in ascending order
B3_column_order <- c("0","17", "25", "44","66","75", "90")
colnames(B3_cast1)
setcolorder(B3_cast1, B3_column_order)
View(B3_cast1)
nrow(B3_cast1)#2349
#transpose the matrix
t_B3 <- as.data.frame(t(B3_cast1))
View(t_B3)
#figure out what class the frequency values are in the matrix - they need to be numeric
class(t_B3[2,2]) #factor
ncol(t_B3)#2349 <- for sanity this check should match up with what you found previously for a nrow count
#convert frequency values to numeric class - start as "character"
t_B3[,2:2349] <-(apply(t_B3[,2:2349], 2, function(x) as.numeric(as.character(x))))
B3 <- transpose(t_B3[,2:2349])
colnames(B3) <- rownames(t_B3)
rownames(B3) <- colnames(t_B3[,2:2349])
View(B3)
class(B3[2,2]) #yay, it's numeric now!!
#adds a count number on each row (mutation) that tells how many columns (days) have a frequency above 0
B3$count <- rowSums(B3!=0.0)
#sums up the rows to tell you the total % that you get - flaw is that it also adds the count, so need to subtract that value
B3$Sums <- rowSums(B3)-B3[,8]
nrow(B3)#2348
View(B3) #these last two are mostly just sanity checks
#filter 2!!!!! select only rows with greater than 10% total frequency
B3_filter2 <- (subset(B3, B3$Sums >= 10)) #greater than 10%
nrow(B3_filter2)#1268
B3_filter2_out <- (subset(B3, B3$Sums < 10)) #put all of the filtered out mutations in one place
#filter 3!! select only rows that appear in more than 1 day
B3_filter3 <- (subset(B3_filter2, B3_filter2$count > 1))
nrow(B3_filter3)#1011
B3_filter3_out <- (subset(B3_filter2, B3_filter2$count <= 1))
#filter 4 -- remove all mutations at 100% across all measured time points
#4 time points so 400 value --> problem with this is mutations can start at 100 and dip slightly
B3_filter4 <- (subset(B3_filter3, B3_filter3$Sums < 600))
nrow(B3_filter4) #998
B3_filter4_out <- (subset(B3_filter3, B3_filter3$Sums >= 600))
#filter out if the first time points that are 95% or above
B3_filter5 <- (subset(B3_filter4, B3_filter4$"17" < 95))
nrow(B3_filter5) #997
B3_filter5_out <- (subset(B3_filter4, B3_filter4$"17" >= 95))
#filter out if the HIGHEST frequency isn't 10, not if the combined total frequency doesn't get to 10
#filter out if the change in frequency from the first time point to the last time point does not change by at least 10%, having the additive value be above 10 isn't stringent enough.
############need to change this because it does not do what I thought it did
#B3_filter6 <- (subset(B3_filter5, (B3_filter5$"17"+B3_filter5$"90")/2 >= 10))
#nrow(B3_filter6)#248
#filter6_removed <- (subset(B3_filter5, (B3_filter5$"17"+B3_filter5$"90")/2 < 10))
#View(filter6_removed)
#I really should be checking what mutations are being taken out with each filter...
#View(B3_filter6)
###############
#what about subtracting 90 from 17 and thenn if the absolute value isn't above 10 filter it out
B3_filter62 <- (subset(B3_filter5, abs(B3_filter5$"17"-B3_filter5$"90") >= 10))
nrow(B3_filter62) #141
B3_filter62_out <- (subset(B3_filter5, abs(B3_filter5$"17"-B3_filter5$"90") < 10))
View(B3_filter62)
not_real_mutations <- rbind(B3_filter2_out, B3_filter3_out, B3_filter4_out, B3_filter5_out, B3_filter62_out)
write.csv(not_real_mutations, file = "B3_Filtered_out.csv")
ncol(B3_filter62)#9
B3_filter7 <- B3_filter62[,-c(8,9)] #remove columns with count and sums
View(B3_filter7)
#split the row names into columns again
B3_split <- B3_filter7
B3_split$info <- rownames(B3_filter7)
View(B3_split)
B3_split4 = transform(B3_split, info =colsplit(B3_split$info,';;', names = c('desc_gene_annot','position', 'Mutation')))
#View(B3_split4)
B3_split4 <- as.data.frame(B3_split4)
colnames(B3_split4)#8
info <- (B3_split4$info)
head(info)
B3_split4 <- B3_split4[,c(1:7)]
View(B3_split4)
#B3_split4$desc_gene_annot <- rownames(B3_split4)
#B3_split4$desc_gene_annot <- gsub(";;.*","",B3_split4$desc_gene_annot)
#does_this_work <- merge(B3_split4, info, by="desc_gene_annot")
#this does work but I like my way better because I know what it is doing and I still have the rownames
#View(does_this_work )
View(B3_split4)
View(info)
B3_split5 <- B3_split4
B3_split5$Position <- info$position
B3_split5$Mutation <- info$Mutation
View(B3_split5)
#B3_split6 <- B3_split5[,-6]
#View(B3_split6)
#rename columns after splitting
#colnames(B3_split6)
colnames(B3_split5) <- c("0","17","25", "44","66","75","90","Position", "Mutation")
View(B3_split5)
#write this to a file that I can find
write.csv(B3_split5,file="B3_allfilters.csv")
#oh plotting....
#have to melt it first...
#melt - with whatever i want to keep
#transform
View(B3_split5)
nrow(B3_split5)#141
t_B3_plot <- t(B3_split5)
nrow(t_B3_plot)#9
t_B3_plot_2 <- t_B3_plot[-c(8,9),]
m_B3_plot <- melt(t_B3_plot_2, id=c("desc_gene_annot"),Value.name = "frequency")
head(m_B3_plot)
#plot
#plot(NA, xlim=c(0,100), ylim=c(0,100))
#lines(m_B3_plot$X1, m_B3_plot$value)
#lines(c(0,17,44,66,90), t_final_B3_filter3[,1], add=TRUE)
#lines(c(0,17,44,66,90), t_final_B3_filter3[,2], add=TRUE)
#this is how I could add things one at a time
#but I want to know how to use ggplot
colnames(m_B3_plot) <- c("day", "mutation","value")
m_B3_plot$value <- as.numeric(as.character(m_B3_plot$value)) # you have to change from a factor to a number this way. You have to co to a character first always. if I had just gone to a number it would have given me the level of factor that the previous data point was.
View(m_B3_plot)
ggplot(m_B3_plot,aes(x=day,y=value,color=mutation)) +theme(text = element_text(size=20),legend.text=element_text(size=10),legend.position="none") +geom_point(size=4) +geom_line()
#####now to make the file pretty
B3 <- read.csv("/Users/katrina/Desktop/working/B3/B3_allfilters.csv")
splitB3_1 <- colsplit(B3$X, ";;", names = c("desc_gene_annot","Position","Mutation"))
splitB3_2 <- colsplit(splitB3_1$desc_gene_annot, "::", names = c("Description", "Gene","Annotation"))
View(splitB3_1)
nrow(splitB3_1)
nrow(splitB3_2)
B3 <- cbind(splitB3_2,splitB3_1[,2:3],B3)
ncol(B3)
B3 <- B3[,1:13]
B3 <- B3[-6]
colnames(B3)
B3names <- c("Description", "Gene", "Annotation", "Position", "Mutation", "0", "17", "25", "44", "66", "75", "90")
colnames(B3) <- B3names
#View(B1) #I want description and mutation columns
colnames(B3)
write.csv(B3,file="/Users/katrina/Desktop/working/B3/B3_pretty.csv")
#this data set is going to be formatted to go into katya's matlab scripts. The data frame needs specific columns with particular data. they will have to be in a specific order and named correctly, but first I just need to make them holding the correct information.
View(B3)
B3_Muller <- B3[,6:12]
B3_Muller$Population <- "B1"
B3_Muller$Population2 <- 1L #make sure this is a number
B3_Muller$Chromosome <- 1L #make sure this is a number
B3_Muller$Position <- B3$Position
B3_Muller$Class <- "SNP"
B3_Muller$Mutation <- B3$Mutation
B3_Muller$Gene <- B3$Gene
B3_Muller$AminoAcid <- B3$Description
B3_Muller$Class2 <- ""
B3_Muller$Amino <- ""
B3_Muller$NearestDownstreamGene <- ""
B3_Muller$Distance <- ""
B3_Muller$Trajectory <- 1:nrow(B3_Muller)
colnames(B3_Muller)
#now put the columns in the correct order
Muller_col_order <- c("Population", "Population2","Trajectory","Chromosome","Position","Class","Mutation","Gene","AminoAcid","Class2","Amino","NearestDownstreamGene","Distance","0","17","25", "44","66","75", "90")
setcolorder(B3_Muller,Muller_col_order)
#now I need to name them what they are actually supposed to be named
colnames(B3_Muller)
colnames(B3_Muller) <-c("Population", "Population number","Trajectory","Chromosome","Position","Class","Mutation","Gene","Amino Acid","Class","Amino","Nearest Downstream Gene","Distance","0","17","25","44","66","75", "90")
View(B3_Muller)
#need to remove the rownames
#rownames(B3_Muller) <- c()
#View(B3_Muller)
#decided not to do this because I can just print without including the row names. this will also just print out the row names, they will just be the numbers instead of the descriptions
#latest problem is that the frequencies need to be percentages and the column names for the frequencies need to be numbers.
#first solve the frequencies to percentages problem - should be able to do with scales package
#would like to keep 2 decimal points if possible
B3_Muller_try <- B3_Muller
B3_Muller_try$`90` <- as.numeric(as.character(B3_Muller_try$`90`))
B3_Muller_try$`75` <- as.numeric(as.character(B3_Muller_try$`75`))
B3_Muller_try$`66` <- as.numeric(as.character(B3_Muller_try$`66`))
B3_Muller_try$`44` <- as.numeric(as.character(B3_Muller_try$`44`))
B3_Muller_try$`25` <- as.numeric(as.character(B3_Muller_try$`25`))
B3_Muller_try$`17` <- as.numeric(as.character(B3_Muller_try$`17`))
B3_Muller_try$`0` <- as.numeric(as.character(B3_Muller_try$`0`))
#the following does work. I divide everything by 100 so that when in excel I can change to "percent" type and it will be the correct value. Remember to keep 1 decimal when changing the type in excel or it will round everything.
B3_Muller_try$`90` <- (B3_Muller_try$`90`/100)
B3_Muller_try$`75` <- (B3_Muller_try$`75`/100)
B3_Muller_try$`66` <- (B3_Muller_try$`66`/100)
B3_Muller_try$`44` <- (B3_Muller_try$`44`/100)
B3_Muller_try$`25` <- (B3_Muller_try$`25`/100)
B3_Muller_try$`17` <- (B3_Muller_try$`17`/100)
B3_Muller_try$`0` <- (B3_Muller_try$`0`/100)
View(B3_Muller_try)
#now to write this file so that I can use it as an input to matlab. The matlab file requires it to be a .xlsx file so I can just write to that type of file. need to make sure that I don't print out the row names or they will be the first column. I do NEED the column names though.
write.csv(B3_Muller_try, file="B3_Muller.csv", row.names = FALSE)
#this file needs to be loaded into matlab for Katya's scripts.
#now to analyze the mutations...
Mutations_analysis <- function(Mutation_Data, AminoAcid="description", Bases = "Mutation") {
#Mutation_Data is the input CSV file that is a breseq output with rows containing different mutations and columns containing the various characteristics for those mutations.
#AminoAcid is the column name that holds the breseq information on amino acid changes. this column will look like "*342C(TGA>TGC)" or say coding, pseudogene, etc. The default that will be looked for is "Description". This is case sensitive!
#Bases is the column name that holds the breseq information for the nucleotide mutations. These are things like A>C, or T>G in the breseq output. This is case sensitive!!!
##############
#first i am going to deal with the nucleotide level - the mutation column. This uses the Mutation_Data that you put in and grabs the information in the column that you specified under Bases. It looks for the > symbol, because that is how breseq separates the two bases, and splits the data. It then creates a new data set called Nucleotides that has 2 columns containing the original base (from the ancestor) and the mutated base.
Nucleotides <- colsplit(Mutation_Data[,Bases], ">", names = c("original", "mutant"))
#View(Nucleotides)
#I want to calculate the total number of mutations present in the sample.
allmutations <- nrow(Nucleotides)
#I want to determine the number of mutations that are not just substituting one base from another. These are indels, because this is how breseq represents them.
indel <- sum(grepl("[^ACGT]", Nucleotides$original))
#I need to find all of the different combinations for base replacements. To do this I am going to find the index for each base, and then I will look at that index in the second column and see what the new base is.
C <- grep("C", Nucleotides$original) #find placeswhere the original was C
CT <- sum(ifelse(Nucleotides$mutant[C]=="T",1,0)) # find when there was a C to T transition
CA <- sum(ifelse(Nucleotides$mutant[C]=="A",1,0)) # find when there was a C to A transition
CG <- sum(ifelse(Nucleotides$mutant[C]=="G",1,0)) # find when there was a C to G transition
Ts <- grep("T", Nucleotides$original) #find when the original was a T
TC <- sum(ifelse(Nucleotides$mutant[Ts]=="C",1,0)) #find when there were T to C transitions
TG <- sum(ifelse(Nucleotides$mutant[Ts]=="G",1,0)) #find when there were T to G transitions
TA <- sum(ifelse(Nucleotides$mutant[Ts]=="A",1,0)) #find when there were T to A transitions
G <- grep("G", Nucleotides$original) #find placeswhere the original was G
GA <- sum(ifelse(Nucleotides$mutant[G]=="A",1,0)) # find when there was a G to A transition
GT <- sum(ifelse(Nucleotides$mutant[G]=="T",1,0)) # find when there was a G to T transition
GC <- sum(ifelse(Nucleotides$mutant[G]=="C",1,0)) # find when there was a G to C transition
A <- grep("A", Nucleotides$original) #find placeswhere the original was A
AG <- sum(ifelse(Nucleotides$mutant[A]=="G",1,0)) # find when there was a A to G transition
AC <- sum(ifelse(Nucleotides$mutant[A]=="C",1,0)) # find when there was a A to C transition
AT <- sum(ifelse(Nucleotides$mutant[A]=="T",1,0)) # find when there was a A to T transition
# Now that I have the numbers of all of the possible base changes, I can look for the
transitions <- sum(c(CT,TC,GA,AG)) #there are 4 options for transitions. C>T, T>C, G>A, A>G. this adds up all of those changes
transversions <- AT+AC+GC+GT+CA+CG+TG+TA # need to do to check that the sums of the transition categories actually do add up to the number of transitions that there should be (assuming transitions and indel numbers are correct) when I turn this into a function I need to stop if transversions != trans -- should be fine but just an extra error checking step.
###############
### now at the Amino acid level
#have to get the amino acid column that the user specifies out of the input data
Protein <- colsplit(Mutation_Data[,AminoAcid], "\\(", names = c("AA", "DNA"))
#there are a few options that you can get for this one and I can't just split the column. I need to look for all of them in the strings FOr this I need to use regular expressions. I will have to use gregexpr which returns a list of positions and then I will have to find the length of that to determine the number of them. The regular expressios that I will use for each option are as follows.
# reg expressions to use "coding", "intergenic", "pseudogene",
#"[A-Z][0-9]+[A-Z]" #this looks for a base, followed by a number that can be any size 1 or more, followed by a base.
#"\\*[0-9]+[A-Z]" #this looks for an asterisk, followed by at least 1 number, followed by a base
#"[A-Z][0-9]*\\*" #this looks for a base, followed by at least 1 number, followed by an asterisk
coding = sum(grepl("coding",Protein$AA)) #Breseq's coding region
intergenic = sum(grepl("intergenic",Protein$AA)) #intergenic breseq designation
pseudogene = sum(grepl("pseudogene",Protein$AA)) #pseudogene breseq designation
prematurestop = sum(lengths(regmatches(Protein$AA,gregexpr("[A-Z][0-9]*\\*", Protein$AA)))) #these are when you have a coding amino acid that gets mutated into a stop codon
elongating = sum(lengths(regmatches(Protein$AA,gregexpr("\\*[0-9]*[A-Z]", Protein$AA)))) #these are stop codons that get mutated into coding amino acids that elongate your protein.
aamutation = sum(lengths(regmatches(Protein$AA,gregexpr("[A-Z][0-9]+[A-Z]", Protein$AA)))) # these are all of the mutations that dont fit other categories. so these mutations change one amino acid to another with no other breseq designation.
#I now need to determine if the amino acid mutations category are synonymous or nonsynonymous. The above just determines the number of leter to leter strings exist. Now I need to actually look at that subset of mutations and determine if
aas <- lengths(regmatches(Protein$AA,gregexpr("[A-Z][0-9]+[A-Z]", Protein$AA))) #this returns a list of logicals that tell you if there is an aamutation at that spot (1) or not (0)
#aas
aminos <- as.matrix(Protein$AA[aas==1]) #this find the amino acid changes at the previously found indexes, so these are the actual identities of the aamutation mutations.
aminos2 <- colsplit(aminos, "[0-9]+", names = c("first", "last")) # splitting the breseq annotation. I am taking out the number, because the position is irrelevent, and separating the two letters into two columns containing the first, original aa, and the last, or mutated, aa.
synonymous <- sum(ifelse(aminos2$first ==aminos2$last,1,0)) #if the letters are the same before and after the mutation it is synonymous
nonsynonymous <- sum(ifelse(aminos2$first == aminos2$last,0,1)) #if the letters are different then it is nonsynonymous
dnds <- nonsynonymous/synonymous
# I am now making a table of all of the mutational types that I can print out later. The other thing that I would like to do is give it a name specific to the data set that you put in, but I don't know how to do that. For the moment it will just always return the same named file each time, so you have to change the name before you use the code again.
table<- matrix(c("Mutations: ", allmutations,
"Nucleotide level mutations", "",
"Indels: ", indel,
"Transitions: ",transitions,
"C>T: ", CT,
"T>C: ", TC,
"A>G: ", AG,
"G>A: ", GA,
"Transversions: ", transversions,
"A>T: ", AT,
"A>C: ", AC,
"G>C: ", GC,
"G>T: ", GT,
"C>A: ", CA,
"C>G: ", CG,
"T>G: ", TG,
"T>A: ", TA,
"Amino acid level mutations", "",
"Coding: ", coding,
"Intergenic: ", intergenic,
"Pseudogene: ", pseudogene,
"Premature stop: ", prematurestop,
"Elongating: ", elongating,
"Synonymous: ", synonymous,
"Non Synonymous: ", nonsynonymous,
"dN/dS: ", dnds), ncol = 2, byrow=T)
#write out the file. Adding col.names won't actually give the columns names but it will keep things in two different columns instead of compiling it all.
write.csv(table, file = "Mutations_table.csv", col.names = T)
}
setwd("/Users/katrina/Desktop/working/B3/")
Mutations_analysis(B3, "Description", "Mutation")
#for running Deitrick's scripts
#python muller.py -i /Users/katrina/Desktop/working/B2/B2_muller.xlsx -o /Users/katrina/Desktop/working/B2/genotypes
#see what the last filter took out to make sure the mutations are what I want them to be. This check needs to be done on all mutations that are taken out from step 1 on.
filter62_removed <- (subset(B3_filter5, abs(B3_filter5$"17"-B3_filter5$"90") < 10))
View(filter62_removed)#yay it removed what I wanted it to!!
nrow(filter62_removed)#587
#I need to look at the pileups for these mutations to se why they are coming up in my populations.
#there are way more of these "nonsense" mutations than there dhould be if it were just normal sequencing errors. so the question is where is the source of the variation. I will look at the pileup at the areas of these mutations in IGV to try to determine the answer to this.
split_removed <- filter62_removed
split_removed$info <- rownames(filter62_removed)
split_removed2 = transform(split_removed, info =colsplit(split_removed$info,';;', names = c('desc_gene_annot','position', 'Mutation')))
split_removed2 <- as.data.frame(split_removed2)
colnames(split_removed2)
View(split_removed2)
info2 <-(split_removed2$info)
View(info2)
ncol(split_removed2)
split_removed2 <- split_removed2[,-c(8)]
View(split_removed2)
colnames(info2)
split_removed2$position <- info2$position
split_removed2$Mutation <- info2$Mutation
View(split_removed2)
write.csv(split_removed2, file = "B3_removed.csv")
#######################################
#the following is a section of MatLab code (so won't work to run in R)
#problem I ran into is that there is apparently something wrong with the excel file... it won't read in
#manipulations done to excel file once exported
#change column names of frequencies to "number" type
#change the frequencies to "percentage" type
#
names = ["B3_muller.xlsx"]; %take in the excel file you got from R
sheets = ["Sheet1"]; %it is an excel workbook so you have to tell it which sheet its on
tNum = 5; %the number of timepoints
timepoints = [0,17,44,66,90]; %the names of the timepoints
xUnits = ["Days"]; %units of the timepoints
time_series_import_KMF %this reads in the data
get_genotypes_KMF_KBH %this will determine the genotypes --> need to learn how it is defining a genotype
%genotype_plots_KMF %this plots the genotypes. It will give 3 plots. the first is unclustered, there is an intermediate, and the third is the final fully clustered data set.
order_clusters_KMF % this will determine the order in which the genotypes showed up
%ordered_cluster_plots_KMF %visualize the final clusters with the ancestry incorporated --> should save this figure.
frequencies = squeeze(genneststotal(1, any(squeeze(genneststotal(1, :, :)), 2), 2:trajSize)); % define the frequencies variable
nests = squeeze(genneststotal(1, any(squeeze(genneststotal(1, :, :)), 2), nameSize:end)); % define the nests variable
nests = nests(:, any(nests, 1)); #still defninng nests
#muller_plots_KMF(frequencies, nests, timepoints) % problem with this is that it always outputs with the x axis saying "time in generations" I want to be able to change this to whatever I want to
csvwrite("/Users/katrina/Desktop/working/B3/B3timepoints.csv", timepoints)
csvwrite("/Users/katrina/Desktop/working/B3/B3frequencies.csv",frequencies)
csvwrite("/Users/katrina/Desktop/working/B3/B3nests.csv",nests)
|
c220db1d3a207355c2833e5b1c39e48b0db09fe0
|
b7842eb8030790f4014f1980d1d924278567e721
|
/modelo_cdmx_20200317_helpers.R
|
12d20ff737436edf6734874e95698b0c7b230556
|
[] |
no_license
|
guillermodeandajauregui/ncov-wuhan-stochastic-model
|
9fc676ca06b1c5867608e2dc87e55399fb4b1d90
|
bcc0d279400b5c1653323ff752b5d00a94dce1a3
|
refs/heads/master
| 2021-03-31T06:02:15.110876
| 2020-03-18T02:33:21
| 2020-03-18T02:33:21
| 248,083,866
| 0
| 0
| null | 2020-03-17T22:16:38
| 2020-03-17T22:16:38
| null |
UTF-8
|
R
| false
| false
| 2,094
|
r
|
modelo_cdmx_20200317_helpers.R
|
xxx <- vroom::vroom("data/Tabla_casos_positivos_resultado_InDRE_2020.03.16-Table-1-2.csv")
xxx %>%
mutate(date = lubridate::dmy(`Fecha de Inicio de síntomas`)) %>%
group_by(date) %>%
tally(name = "cases") %>%
drop_na() %>%
vroom::vroom_write(path = "mx.datecases.2020.03.16.tsv")
xxx %>%
mutate(date = lubridate::dmy(`Fecha de Inicio de síntomas`)) %>%
filter(Estado=="CIUDAD DE MÉXICO") %>%
group_by(date) %>%
tally(name = "cases") %>%
drop_na() %>% pull(cases) %>% sum
vroom::vroom_write(path = "data/cdmx.datecases.2020.03.16.tsv")
wawawa <- escenarios[,-1] %>% as.data.frame
lapply(wawawa, sum) %>% length
i = 1
parms <- as.list(scenarios[i,2:10])
init <- as.list(scenarios[i,11:28])
plot.model(data = mis_modelos$`1`, log = "", title = "prueba1")
plot.model(data = mis_modelos[1], log = "", title = "prueba2")
plot.model(data = mis_modelos[[5]], log = "", title = "prueba3")
lapply(X = 1:5, FUN = function(i){
plot.model(data = mis_modelos[[i]], log = "", title = escenarios$Description[[i]])
})
plot.ensemble(mis_modelos)
get.range(mis_modelos)
mis_modelos$`1`
mis_modelos[[1]] %>% bind_rows() %>% select("C") %>% max()
mis_modelos %>%
lapply(FUN = function(i){
mini <- i %>% bind_rows() %>% select("C") %>% min()
maxi <- i %>% bind_rows() %>% select("C") %>% max()
return(data.frame(minimum = mini,
maximum = maxi))
}) %>% bind_rows(.id = "escenario")
mis_out <- lapply(mis_modelos, get.range)
names(mis_out) <- paste0("escenario_", 1:5)
plot.ensemble(x = mis_out, plausible = 1)
mis_out %>% bind_rows() %>% cbind(data.frame(parameter = c("min", "max"))) %>% pivot_longer(cols = -parameter) %>%
rename(escenario = name, casos_esperados = value) %>%
mutate(medidas = ifelse(escenario%in%c("escenario_1", "escenario_3"), "no", "si")) %>%
ggplot(aes(x = escenario, y = casos_esperados, colour = medidas)) +
geom_line() +
theme_minimal() +
scale_colour_manual(values = c("red", "blue")) +
scale_y_log10() +
ggtitle("casos esperados a treinta días de la primera infección")
|
d9126307bc5af8e6322dcbb1f560535841468cbc
|
cddbd03d7beaf8ccf20ee6554770679c2d8eaa34
|
/R/Rene/test_new_code.R
|
e0a2ff60cd5f99018d553e449f574109ff32b80c
|
[] |
no_license
|
RWardenaar/aneufinder
|
c920c72b6d4bedb16d7bbcc25fc8c627bc2c5182
|
9a23b77624be7eaec21398caf7d0a554958ba769
|
refs/heads/master
| 2020-04-11T08:35:16.049455
| 2019-03-07T16:42:03
| 2019-03-07T16:42:03
| 161,648,604
| 0
| 0
| null | 2018-12-13T14:12:19
| 2018-12-13T14:12:19
| null |
UTF-8
|
R
| false
| false
| 32,304
|
r
|
test_new_code.R
|
# =====================================================================================================================
# test new code
# =====================================================================================================================
# Organization: ERIBA (CRIPSR/iPSC facility)
# Programmer: René Wardenaar (Original code written by Aaron Taudt)
# Starting date: 25-09-18
# Last modified: 07-01-19
# Version: 1.0
# ---------------------------------------------------------------------------------------------------------------------
# =====================================================================================================================
# START R CODE
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
# WD AND LIBRARIES
# ---------------------------------------------------------------------------------------------------------------------
library(AneuFinder)
library(BSgenome.Hsapiens.NCBI.GRCh38)
library(genomation)
library(Rsamtools)
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\binReads.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\checkClass.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\chromosomeLengths.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\correctGC.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\dnbinom.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\findCNVs.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\fixedWidthBins.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\importReads.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\initializeStates.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\rwConfig.R")
source("D:\\Rene\\Projects\\AneuFinder\\Code\\Rewritten\\Functions\\variableWidthBins.R")
# ---------------------------------------------------------------------------------------------------------------------
# SETTINGS
# ---------------------------------------------------------------------------------------------------------------------
inputfolder <- "D:\\Rene\\Projects\\AneuFinder\\Data\\Data_test_new_code\\Bam"
outputfolder <- "D:\\Rene\\Projects\\AneuFinder\\Data\\Data_test_new_code\\Bam_out"
configfile <- "D:\\Rene\\Projects\\AneuFinder\\Data\\Data_test_new_code\\aneufinder_human_hg38.config"
numCPU <- 1 # [GENERAL]
reuse.existing.files <- TRUE
binsizes <- 1e6 # [BINNING]
stepsizes <- binsizes
variable.width.reference <- NULL
reads.per.bin <- NULL
pairedEndReads <- FALSE
assembly <- NULL
chromosomes <- NULL
remove.duplicate.reads <- TRUE
min.mapq <- 10
blacklist <- NULL
use.bamsignals <- FALSE
reads.store <- FALSE
correction.method <- NULL # [CORRECTION]
GC.BSgenome <- NULL
method <- c('edivisive') # [COPYNUMBERCALLING]
strandseq <- FALSE
R <- 10
sig.lvl <- 0.1
eps <- 0.01
max.time <- 60
max.iter <- 5000
num.trials <- 15
states <- c('zero-inflation',paste0(0:10,'-somy'))
most.frequent.state <- '2-somy' # N: New!
most.frequent.state.strandseq <- '1-somy' # N: New!
confint <- NULL
refine.breakpoints <- FALSE
hotspot.bandwidth <- NULL
hotspot.pval <- 5e-2
cluster.plots <- TRUE
# =====================================================================================================================
# WITHIN ANEUFINDER
# =====================================================================================================================
# =====================================================================================================================
# PART 1 OF 4 | PREPARATION
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.01 | READ CONFIG FILE
# ---------------------------------------------------------------------------------------------------------------------
conf <- NULL
if(!is.null(configfile)){
errstring <- tryCatch({
conf <- RW_readConfig(configfile)
errstring <- ''
}, error = function(err) {
errstring <- paste0("Could not read configuration file ",configfile)
})
if(errstring!='') {
stop(errstring)
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.02 | COMBINE ARGUMENTS CONFIG FILE WITH SUPPLIED ARGUMENTS
# ---------------------------------------------------------------------------------------------------------------------
params <- list(inputfolder=inputfolder, outputfolder=outputfolder, numCPU=numCPU,
reuse.existing.files=reuse.existing.files, binsizes=binsizes, stepsizes=stepsizes,
variable.width.reference=variable.width.reference, reads.per.bin=reads.per.bin,
pairedEndReads=pairedEndReads, assembly=assembly, chromosomes=chromosomes,
remove.duplicate.reads=remove.duplicate.reads, min.mapq=min.mapq, blacklist=blacklist,
reads.store=reads.store, use.bamsignals=use.bamsignals, correction.method=correction.method,
GC.BSgenome=GC.BSgenome, method=method, strandseq=strandseq, eps=eps, max.time=max.time,
max.iter=max.iter, num.trials=num.trials, states=states,
most.frequent.state=most.frequent.state,
most.frequent.state.strandseq=most.frequent.state.strandseq, R=R, sig.lvl=sig.lvl,
confint=confint, refine.breakpoints=refine.breakpoints, hotspot.bandwidth=hotspot.bandwidth,
hotspot.pval=hotspot.pval, cluster.plots=cluster.plots)
conf <- c(conf, params[setdiff(names(params),names(conf))])
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.03 | CHECK CLASS
# ---------------------------------------------------------------------------------------------------------------------
checkClass(conf=conf)
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.04 | GET INPUT FILES AND CHECK FORMAT
# ---------------------------------------------------------------------------------------------------------------------
input.files <- list.files(inputfolder, full.names=TRUE, pattern='\\.bam$|\\.bed$|\\.bed\\.gz$')
if(length(input.files) == 0){
stop("None of the input files have the correct format. Expected formats are '.bam', '.bed' and '.bed.gz'")
}
files.clean <- sub('\\.gz$','',input.files)
input.type <- unique(sapply(strsplit(files.clean,'\\.'),function(x){rev(x)[1]}))
if(length(input.type) == 2){
stop("Both bam and bed files in input directory. Only one type allowed.") # N: Check if input files is a mix of different types (bam, bed, bed.gz). Allow only one.
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.05 | [GENERAL]
# ---------------------------------------------------------------------------------------------------------------------
if(conf[['reuse.existing.files']] == FALSE){ # N: Delete old directory if desired
if(file.exists(outputfolder)){
message("Deleting old directory: ",outputfolder)
unlink(outputfolder, recursive=TRUE)
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.06 | [BINNING]
# ---------------------------------------------------------------------------------------------------------------------
if(is.null(conf[['stepsizes']])){
conf[['stepsizes']] <- conf[['binsizes']]
}
if(length(conf[['binsizes']]) != length(conf[['stepsizes']])){
stop("Need one element in 'stepsizes' for each element in 'binsizes'.")
}
if(any(conf[['binsizes']] < conf[['stepsizes']])){
stop("'stepsizes' must be smaller/equal than 'binsizes'")
}
if(!is.null(conf[['variable.width.reference']])){
file.clean <- sub('\\.gz$','',conf[['variable.width.reference']])
ref.type <- rev(strsplit(file.clean,'\\.')[[1]])[1]
if((ref.type != 'bam') & (ref.type != 'bed')){
stop("The variable width reference file does not have the correct format.
The expected formats are '.bam', '.bed' and '.bed.gz'")
}
if(!file.exists(conf[['variable.width.reference']])){
stop("variable.width.reference file '",conf[['variable.width.reference']],"' does not exist.")
}
}
if(!is.null(conf[['reads.per.bin']])){
if(conf[['reads.per.bin']] < 1){
stop("The number of reads per bin is smaller than the minimum allowed (<1).")
}
}
if(conf[['min.mapq']] < 0){
stop("Unusual low 'min.mapq': ",conf[['min.mapq']])
}
if(is.character(conf[['blacklist']])){
file.clean <- sub('\\.gz$','',conf[['blacklist']])
black.type <- rev(strsplit(file.clean,'\\.')[[1]])[1]
if(black.type != 'bed'){
stop("The blacklist has the wrong file format: ",file.format,". Allowed formats are: 'bed' (tab delimited)")
}
if(!file.exists(conf[['blacklist']])){
stop("Blacklist file '",conf[['blacklist']],"' does not exist.")
}
conf[['blacklist']] <- suppressMessages(readBed(conf[['blacklist']], track.line="auto", remove.unusual=FALSE,
zero.based=TRUE))
if(class(conf[['blacklist']])[1] != "GRanges"){
stop("Something went wrong with reading the bed file. Please check whether the bed file is tab delimited.")
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.07 [CORRECTION]
# ---------------------------------------------------------------------------------------------------------------------
if(!all(conf[['correction.method']] %in% c('GC'))){ # Check if the correct correction method has been given.
stop("Unknown correction method: ",paste(setdiff(conf[['correction.method']],c("GC")),collapse=', '),".
Allowed methods are: 'GC'.")
}
if('GC' %in% conf[['correction.method']] & is.null(conf[['GC.BSgenome']])){ # Check whether method 'GC' is given in combination with 'GC.genome'
stop("Option 'GC.bsgenome' has to be given if correction.method='GC'.")
}
if(!is.null(conf[['GC.BSgenome']])){
if(is.character(conf[['GC.BSgenome']])){
eval(parse(text=paste0("conf[['GC.BSgenome']] <- ",conf[['GC.BSgenome']])))
}else if(class(conf[['GC.BSgenome']]) != 'BSgenome'){
stop("Unknown class for 'GC.BSgenome': ",class(GC.BSgenome),".
'GC.BSgenome' should either be of class 'character' or 'BSgenome'")
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.08 [COPYNUMBERCALLING]
# ---------------------------------------------------------------------------------------------------------------------
if(!all(conf[['method']] %in% c('HMM','dnacopy','edivisive'))){
stop("Unknown copynumber calling method ('method'): ",paste(setdiff(conf[['method']],c('HMM','dnacopy','edivisive')),
collapse=', '),". Allowed methods are: 'HMM', 'dnacopy' and 'edivisive'.")
}
if(conf[['R']] < 1){
stop("The maximum number of random permutations to use in each iteration of the permutation test should be 1 or
higher (method: edivisive). Current value ('R'): ",conf[['R']])
}
if(conf[['sig.lvl']] <= 0 | conf[['sig.lvl']] > 1){
stop("The statistical significance level for a proposed change point should be between 0 and 1.
Current value ('sig.lvl'): ",conf[['sig.lvl']])
}
if(conf[['eps']] <= 0){
stop("The Convergence threshold for the Baum-Welch algorithm should be higher than 0.
Current value ('eps'): ",conf[['eps']])
}
if(conf[['eps']] > 0.1){
warning("Unusual high number for the Convergence threshold for the Baum-Welch algorithm (>0.1).
Current value ('eps'): ",conf[['eps']])
}
conf[['max.time']] <- round(conf[['max.time']]) # Ensures that we get an integer.
if(conf[['max.time']] < -1 | conf[['max.time']] == 0){
stop("The maximum running time in seconds for the Baum-Welch algorithm can have a value higher than zero or -1
(no limit). Current value ('max.time'): ",conf[['max.time']])
}
conf[['max.iter']] <- round(conf[['max.iter']]) # Ensures that we get an integer.
if(conf[['max.iter']] < -1 | conf[['max.iter']] == 0){
stop("The maximum number of iterations for the Baum-Welch algorithm can have a value higher than zero or -1
(no limit). Current value ('max.iter'): ",conf[['max.iter']])
}
conf[['num.trials']] <- round(conf[['num.trials']]) # Ensures that we get an integer.
if(conf[['num.trials']] <= 0){
stop("The number of trials to find a fit where state 'most.frequent.state' is most frequent should be higher than 0.
Current value ('num.trials'): ",conf[['num.trials']])
}
if(any(!grepl('zero-inflation|^[0-9]+-somy|+[0-9]+-somy',conf[['states']]))){
stop("")
}
if(any(table(conf[['states']]) != 1)){ # Check non-unique states.
stop("States are not unique.")
}
if(any(grepl('zero-inflation',conf[['states']]))){
if(grep('zero-inflation',conf[['states']]) != 1){
stop("The zero-inflation state should be the first of all states.")
}
}
state.somy <- grep('-somy',conf[['states']],value=TRUE)
state.num <- substr(state.somy,1,nchar(state.somy)-5)
state.plus <- grep('^\\+',state.num)
if(length(state.plus) > 0){
if(length(state.plus) > 1){
stop("There is more than one +[number]-somy state.")
}
if(state.plus == length(state.num)){
state.num <- state.num[-state.plus]
}
}
if(any(state.num != sort(as.numeric(state.num)))){
stop("States are not ordered.")
}
if(!conf[['most.frequent.state']] %in% conf[['states']]){
stop("argument 'most.frequent.state' must be one of c(",paste(states, collapse=","),")")
}
if(!conf[['most.frequent.state.strandseq']] %in% conf[['states']]){
stop("argument 'most.frequent.state.strandseq' must be one of c(",paste(states, collapse=","),")")
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.09 | CHECK CHROMOSOME FORMATS AND OVERLAP WITH SPECIFIED CHROMOSOMES
# ---------------------------------------------------------------------------------------------------------------------
if(input.type == 'bam'){ # N: We take the chromosome format of the input files as the format to work with.
seq.names <- GenomeInfoDb::seqlevels(Rsamtools::BamFile(input.files[1]))
}else if(input.type == 'bed' | input.type == 'bed.gz'){
seq.names <- GenomeInfoDb::seqlevels(readBed(input.files[1]))
}
if(all(grepl('^chr',seq.names))){
chrom.type <- 'chr'
}else if(all(!grepl('^chr',seq.names))){
chrom.type <- 'num'
}else{ # Q: Check if there is a mix of chr and not chr?
stop("Inconsistency in chromosome names input files. Some start with 'chr' while others do not.")
}
chrom.missing <- setdiff(conf[['chromosomes']],seq.names)
if(length(chrom.missing) == length(conf[['chromosomes']])){
chr.string <- paste0(chrom.missing, collapse=', ')
stop("The specified chromosomes ",chr.string, " are not found in the data (sam/bed files).
Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}else if(length(chrom.missing) > 0){
chr.string <- paste0(chrom.missing, collapse=', ')
warning(paste0('Not using chromosomes ',chr.string,' because they are not found in the data (sam/bed files).'))
}
if(!is.null(conf[['variable.width.reference']])){
if(ref.type == 'bam'){ # N: Check 'variable.width.reference'
seq.names <- GenomeInfoDb::seqlevels(Rsamtools::BamFile(conf[['variable.width.reference']]))
}else if(ref.type == 'bed'){
seq.names <- GenomeInfoDb::seqlevels(readBed(conf[['variable.width.reference']]))
}
if(all(grepl('^chr',seq.names))){
if(chrom.type == 'num'){
stop("The specified chromosomes do not exist in the data.
Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}
}else if(all(!grepl('^chr',seq.names))){
if(chrom.type == 'chr'){
stop("The specified chromosomes do not exist in the data.
Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}
}else{ # Q: Check if there is a mix of chr and not chr?
stop("Inconsistency in chromosome names variable width reference. Some start with 'chr' while others do not.")
}
chrom.missing <- setdiff(conf[['chromosomes']],seq.names)
if(length(chrom.missing) == length(conf[['chromosomes']])){
chr.string <- paste0(chrom.missing, collapse=', ')
stop("The specified chromosomes ",chr.string, " are not found in the data (variable.width.reference).
Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}else if(length(chrom.missing) > 0){
chr.string <- paste0(chrom.missing, collapse=', ')
warning(paste0('Not using chromosomes ',chr.string,' because they are not found in the data
(variable.width.reference).'))
}
}
if(!is.null(conf[['blacklist']])){
seq.names <- GenomeInfoDb::seqlevels(conf[['blacklist']])
if(all(grepl('^chr',seq.names))){
if(chrom.type == 'num'){
seqlevels(conf[['blacklist']]) <- sub('chr','',seqlevels(conf[['blacklist']]))
}
}else if(all(!grepl('^chr',seq.names))){
if(chrom.type == 'chr'){
seqlevels(conf[['blacklist']]) <- paste0('chr',seqlevels(conf[['blacklist']]))
}
}else{ # Q: Check if there is a mix of chr and not chr?
stop("Inconsistency in chromosome names blacklist. Some start with 'chr' while others do not.")
}
chrom.missing <- setdiff(conf[['chromosomes']],seq.names)
if(length(chrom.missing) == length(conf[['chromosomes']])){
chr.string <- paste0(chrom.missing, collapse=', ')
stop("The specified chromosomes ",chr.string," are not found in the data (blacklist).
Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}else if(length(chrom.missing) > 0){
chr.string <- paste0(chrom.missing, collapse=', ')
warning(paste0("Not using chromosomes ",chr.string," because they are not found in the data (blacklist)."))
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.10 | GET CHROMOSOME LENGTHS
# ---------------------------------------------------------------------------------------------------------------------
chrom.lengths <- RW_chromosomeLengths(assembly=conf[['assembly']],chrom.type=chrom.type,input.files=input.files) # N: We can indicate what kind of source is used.
chrom.lengths <- chrom.lengths[which(names(chrom.lengths) %in% conf[['chromosomes']])]
chrom.missing <- setdiff(conf[['chromosomes']],names(chrom.lengths))
if(length(chrom.missing) == length(conf[['chromosomes']])){
chr.string <- paste0(chrom.missing, collapse=', ')
stop("The specified chromosomes ",chr.string," are not found within the object or file that is specifying the
chromosome lengths. Pay attention to the naming convention in your data, e.g. 'chr1' or '1'.")
}else if(length(chrom.missing) > 0){
chr.string <- paste0(chrom.missing, collapse=', ')
warning(paste0('Not using chromosomes ',chr.string,' because the object or file that is specifying the chromosome
lengths does not contain them.'))
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 1.11 | CREATE OUTPUT DIRECTORY
# ---------------------------------------------------------------------------------------------------------------------
if(!file.exists(conf[['outputfolder']])){
dir.create(conf[['outputfolder']])
}
# =====================================================================================================================
# PART 2 OF 4 | FILTERING, BINNING AND CORRECTING THE DATA
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
# PART 2.01 | MAKE BIN LIST
# ---------------------------------------------------------------------------------------------------------------------
bins.list <- RW_fixedWidthBins(chrom.lengths=chrom.lengths, binsizes=conf[['binsizes']],
stepsizes=conf[['stepsizes']])
if(!is.null(conf[['variable.width.reference']])){ # Q: This part takes a long time. Can we skip it if the binned files are already present? Check if all expected files are there based on names input files etc.
if(ref.type == 'bam'){
bam.index.ref <- paste0(conf[['variable.width.reference']],".bai")
if(!file.exists(bam.index.ref)){
bam.index.ref <- Rsamtools::indexBam(conf[['variable.width.reference']])
warning("Couldn't find BAM index-file. Creating our own file ", bam.index.ref," instead.")
}
reads.ref <- RW_bam2GRanges(bamfile=conf[['variable.width.reference']], bamindex=bam.index.ref,
chrom.lengths=chrom.lengths, pairedEndReads=conf[['pairedEndReads']],
remove.duplicate.reads=conf[['remove.duplicate.reads']], min.mapq=conf[['min.mapq']],
blacklist=blacklist)
}else if(ref.type == 'bed'){
reads.ref <- RW_bed2GRanges(bedfile=conf[['variable.width.reference']], chrom.lengths=chrom.lengths,
remove.duplicate.reads=conf[['remove.duplicate.reads']], min.mapq=conf[['min.mapq']],
blacklist=blacklist)
}
binned.ref <- RW_binReads(reads=reads.ref, bins.list=bins.list)
bins.list <- NULL
bins.list <- RW_variableWidthBins(reads=reads.ref, binned.list=binned.ref)
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 2.02 | GET READ DATA AND FILTER
# ---------------------------------------------------------------------------------------------------------------------
path.filtered.reads <- file.path(conf[['outputfolder']],'filtered')
if(!file.exists(path.filtered.reads)){dir.create(path.filtered.reads)}
files.to.do <- setdiff(basename(input.files),list.files(path.filtered.reads,full.names=FALSE))
files.to.do <- file.path(conf[['inputfolder']],files.to.do)
for(file.cur in files.to.do){
if(input.type == "bam"){
bam.index <- paste0(file.cur,".bai")
if(!file.exists(bam.index)){
bam.index <- Rsamtools::indexBam(file.cur)
warning("Couldn't find BAM index-file. Creating our own file ",bam.index," instead.")
}
reads <- RW_bam2GRanges(bamfile=file.cur, bamindex=bam.index, chrom.lengths=chrom.lengths,
pairedEndReads=conf[['pairedEndReads']],
remove.duplicate.reads=conf[['remove.duplicate.reads']],
min.mapq=conf[['min.mapq']], blacklist=blacklist)
}else if(input.type == "bed"){
reads <- RW_bed2GRanges(bedfile=file.cur, chrom.lengths=chrom.lengths,
remove.duplicate.reads=conf[['remove.duplicate.reads']],
min.mapq=conf[['min.mapq']], blacklist=blacklist)
}
save(reads,file=file.path(path.filtered.reads,paste0(basename(file.cur),'.Rdata')))
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 2.03 | BIN THE READS
# ---------------------------------------------------------------------------------------------------------------------
path.uncorrected.bins <- file.path(conf[['outputfolder']],'binned')
if(!file.exists(path.uncorrected.bins)){dir.create(path.uncorrected.bins)}
files.to.do <- list.files(path.filtered.reads,full.names=TRUE)
for(file.cur in files.to.do){
reads <- get(load(file.cur))
for(ibss in 1:length(conf[['binsizes']])){ # ibss: index bin step size combinations
binsize <- conf[['binsizes']][ibss]
stepsize <- conf[['stepsizes']][ibss]
combi <- paste0("binsize_",format(binsize,scientific=TRUE,trim=TRUE),"_stepsize_",
format(stepsize,scientific=TRUE,trim=TRUE))
inp_file <- basename(file.cur)
inp_file <- substr(inp_file,1,(nchar(inp_file)-6))
file.save <- file.path(path.uncorrected.bins,paste0(inp_file,"_",combi,".RData"))
if(!file.exists(file.save)){
binned <- RW_binReads(reads=reads,bins=bins.list[[combi]])
save(binned,file=file.save)
}
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 2.04 | CORRECT READ COUNT
# ---------------------------------------------------------------------------------------------------------------------
if(!is.null(conf[['correction.method']])){ # Q: Note: after correction the total bin count differs from the sum of positive and negative strand due to rounding. Should we somehow 'fix' this?
path.corrected.bins <- paste0(path.uncorrected.bins,'-',conf[['correction.method']])
if(!file.exists(path.corrected.bins)){dir.create(path.corrected.bins)}
if(conf[['correction.method']] == 'GC'){
bins.list.GC <- RW_getGCContentBins(bins.list=bins.list,GC.BSgenome=conf[['GC.BSgenome']])
}
files.to.do <- setdiff(list.files(path.uncorrected.bins,full.names=FALSE),
list.files(path.corrected.bins,full.names=FALSE))
files.to.do <- file.path(path.uncorrected.bins,files.to.do)
for(file.cur in files.to.do){
if(conf[['correction.method']] == 'GC'){
binned <- get(load(file.cur))
split_res <- strsplit(basename(file.cur),"_binsize_") # N: Split on "_binsize_"
combi <- paste0("binsize_",substr(split_res[[1]][2],1,(nchar(split_res[[1]][2])-6))) # N: Remove ".RData" and add again "binsize_"
binned.GC <- merge(binned,bins.list.GC[[combi]])
binned.GC.cor <- RW_correctGC(binned.gc=binned.GC,method='loess')
save(binned.GC.cor,file=file.path(path.corrected.bins,basename(file.cur)))
}
}
}else{
path.corrected.bins <- path.uncorrected.bins
}
# =====================================================================================================================
# PART 3 OF 4 | RUN MODELS
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
# PART 3.01 | RUN MODELS
# ---------------------------------------------------------------------------------------------------------------------
path.model <- file.path(conf[['outputfolder']],'MODELS')
if(!file.exists(path.model)){dir.create(path.model)}
for(method in conf[['method']]){
path.method <- file.path(path.model,paste0('method-',method))
if(!file.exists(path.method)){dir.create(path.method)}
files.to.do <- setdiff(list.files(path.corrected.bins,full.names=FALSE),
list.files(path.method,full.names=FALSE))
files.to.do <- file.path(path.corrected.bins,files.to.do)
for(file.cur in files.to.do){
binned <- get(load(file.cur))
model <- RW_findCNVs(strandseq=conf[['strandseq']], binned=binned, method=method, R=conf[['R']],
sig.lvl=conf[['sig.lvl']], eps=conf[['eps']], max.time=conf[['max.time']],
max.iter=conf[['max.iter']], num.trials=conf[['num.trials']], states=conf[['states']],
most.frequent.state=conf[['most.frequent.state']],
most.frequent.state.strandseq=conf[['most.frequent.state.strandseq']])
####### IMPLEMENTATION BREAKPOINTS HERE ---->
.
save(model,file=file.path(path.method,basename(file.cur)))
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PART 3.02 | REFINE BREAKPOINTS
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# PART 3.03 | FIND BREAKPOINT HOTSPOTS
# ---------------------------------------------------------------------------------------------------------------------
# =====================================================================================================================
# PART 4 OF 4 | CREATING BROWSER FILES, PLOTTING
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
# END END END END END END END END END END END END END END END END END END END END END END END END END END END END END
# ---------------------------------------------------------------------------------------------------------------------
|
f372300ae06de1b9bf641eef881fb19b426d603b
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609957749-test.R
|
f0ab5e10a3c0d790d82806c7aa21116247dae74a
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
1609957749-test.R
|
testlist <- list(x = structure(c(4.55669484873935e-305, 6.41012488536059e-310, 7.21408662741556e-229, 5.9233691322999e-304, 3.5121263843364e-309, 4.77783808228084e-299, 4.51013743332069e-309, 7.29023199001299e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 8L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
3534fcd2325ba847db20ae61b75898656968bead
|
0992959415ff5a276476910a8419040f655b9247
|
/Sales.R
|
3b929cbfcad8131396cdec542ed45f3e3dff3d0a
|
[] |
no_license
|
matej-s/Sales
|
339a254e27f16cd40de56ac57c8f2418df32eb9b
|
839123640e4eeda1b27629ea78c4a23c42513b6e
|
refs/heads/main
| 2023-02-11T21:24:52.634903
| 2020-12-18T08:25:21
| 2020-12-18T08:25:21
| 312,060,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,877
|
r
|
Sales.R
|
#' ## 1 Introduction
#'
#' The aim of the project is to develop a system that predicts
#' the success of sales by using existing company sales data.
#'
#' ## 2 Method and Analysis
#' ### 2.1 Download Data and Generate Data Sets
# to find total processing time
startTime_ALL_2 <- Sys.time()
# Install packages and call library for the project
inst_pack_load_lib <- function(need_pkg)
{ install_pkg <- need_pkg[!(need_pkg %in% installed.packages()[,"Package"])]
if(length(install_pkg))
{ install.packages(install_pkg, repos = "http://cran.us.r-project.org")}
for(package_name in need_pkg)
{ library(package_name,character.only=TRUE,quietly=TRUE)}
}
# required packages list
need_pkg <- c("tidyverse", "jjb", "caret" , "rpart", "e1071",
"kableExtra", "reshape2", "ggcorrplot", "knitr" , "gridExtra",
"ggridges", "ggplot2", "gtable", "grid", "egg" ,
"lemon", "ggpubr", "huxtable", "scales", "ggpubr",
"naivebayes" , "fastAdaboost", "ada", "precrec")
# install and load missing packages
inst_pack_load_lib(need_pkg);
# make directory dataset to save data set
mkdir("dataset")
# make directory figs to save figures
mkdir("figs")
# Download data
# WA_Fn-UseC_-Sales-Win-Loss dataset
# project location on GitHub
# https://github.com/matej-s/Sales
# link used to download data
# "https://raw.githubusercontent.com/matej-s/Sales/
#main/dataset/WA_Fn-UseC_-Sales-Win-Loss.csv.zip"
zip_url <- "https://raw.githubusercontent.com/matej-s/Sales/
main/dataset/WA_Fn-UseC_-Sales-Win-Loss.csv.zip"
zip_file <- "WA_Fn-UseC_-Sales-Win-Loss.csv.zip"
zip_dest <- as.character(paste(getwd(), zip_file, sep = "/dataset/"))
download.file(zip_url, destfile = zip_dest)
#unzip in working directory
unzip(zip_dest)
# move unzip data from working directory to /dataeet project directory
path_to_original_file <- file.path(getwd(), "WA_Fn-UseC_-Sales-Win-Loss.csv")
path_to_move_to <- file.path(getwd(), "dataset/WA_Fn-UseC_-Sales-Win-Loss.csv")
file.copy(from = path_to_original_file, to = path_to_move_to)
file.remove(path_to_original_file)
# Dataset
# load sales_dataset with downloaded data
sales_dataset <- read_csv(path_to_move_to)
#' ### 2.1.1 Initial Exploration
#Initial data exploration
str(sales_dataset)
dim(sales_dataset)
head(sales_dataset)
glimpse(sales_dataset)
# this data set covers sales activities
#is there empty data
sum(is.na(sales_dataset))
#[1] 0
#no empty data in data set
#is there duplicate Ids - Opportunity Number
duplicated(sales_dataset$`Opportunity Number`)
no_unique <- sales_dataset$`OpportunityNumber`[
duplicated(sales_dataset$`Opportunity Number`)]
length(no_unique)
#[1] 196
#there are 196 duplicates
#Remove duplicated rows on Opportunity Number
sales_dataset <- sales_dataset %>%
distinct(sales_dataset$`Opportunity Number`, .keep_all = TRUE)
nrow(sales_dataset)
#[1] 77829
# data preparation - column names, column format
# make column names without spaces
names(sales_dataset) <- make.names(names(sales_dataset))
# remove duplicate columns sales_dataset$`Opportunity Number`
sales_dataset = subset(sales_dataset, select =
-c(Opportunity.Number, sales_dataset..Opportunity.Number.))
str(sales_dataset)
# check column type and format columns
levels(as.factor(sales_dataset$Supplies.Subgroup))
# [1] "Batteries & Accessories" "Car Electronics" "Exterior Accessories"
# [4] "Garage & Car Care" "Interior Accessories" "Motorcycle Parts"
# [7] "Performance Parts" "Replacement Parts" "Shelters & RV"
#[10] "Tires & Wheels" "Towing & Hitches"
# make as factor
sales_dataset$Supplies.Subgroup=factor(sales_dataset$Supplies.Subgroup)
levels(as.factor(sales_dataset$Supplies.Group))
#[1] "Car Accessories" "Car Electronics" "Performance & Non-auto"
#[4] "Tires & Wheels"
# make as factor
sales_dataset$Supplies.Group=factor(sales_dataset$Supplies.Group)
levels(as.factor(sales_dataset$Region))
#[1] "Mid-Atlantic" "Midwest" "Northeast" "Northwest" "Pacific"
#[6] "Southeast" "Southwest"
# make as factor
sales_dataset$Region=factor(sales_dataset$Region)
levels(as.factor(sales_dataset$Route.To.Market))
#[1] "Fields Sales" "Other" "Reseller" "Telecoverage" "Telesales"
# make as factor
# set levels "Fields Sales" "Reseller" "Telecoverage" "Telesales" "Other"
sales_dataset$Route.To.Market <- factor(sales_dataset$Route.To.Market, levels=
c("Fields Sales", "Reseller", "Telecoverage", "Telesales", "Other"))
levels(as.factor(sales_dataset$Opportunity.Result))
#[1] "Loss" "Won"
# make as factor
# make base level for Opportunity.Result to Won
sales_dataset$Opportunity.Result <- factor(sales_dataset$Opportunity.Result,
levels=c("Won", "Loss"))
# move Opportunity.Result column to the start
sales_dataset <- sales_dataset %>% select(Opportunity.Result, everything())
levels(as.factor(sales_dataset$Competitor.Type))
#[1] "Known" "None" "Unknown"
# make as factor
sales_dataset$Competitor.Type=factor(sales_dataset$Competitor.Type)
levels(as.factor(sales_dataset$Deal.Size.Category))
#[1] "1" "2" "3" "4" "5" "6" "7"
# make as factor
sales_dataset$Deal.Size.Category=factor(sales_dataset$Deal.Size.Category)
levels(as.factor(sales_dataset$Client.Size.By.Revenue))
#[1] "1" "2" "3" "4" "5"
# make as factor
sales_dataset$Client.Size.By.Revenue=factor(sales_dataset$Client.Size.By.Revenue)
levels(as.factor(sales_dataset$Client.Size.By.Employee.Count))
#[1] "1" "2" "3" "4" "5"
# make as factor
sales_dataset$Client.Size.By.Employee.Count=
factor(sales_dataset$Client.Size.By.Employee.Count)
levels(as.factor(sales_dataset$Revenue.From.Client.Past.Two.Years))
#[1] "0" "1" "2" "3" "4"
# make as factor
sales_dataset$Revenue.From.Client.Past.Two.Years=
factor(sales_dataset$Revenue.From.Client.Past.Two.Years)
# structure of data set
str(sales_dataset)
#tibble [77,829 x 18] (S3: tbl_df/tbl/data.frame)
# $ Opportunity.Result : Factor w/ 2 levels "Won","Loss": 1 2 1 2 2 2 1 2 2 2 ...
# $ Supplies.Subgroup : Factor w/ 11 levels "Batteries & Accessories",..: 3 3 6 9 3 9 4 3 1 3 ...
# $ Supplies.Group : Factor w/ 4 levels "Car Accessories",..: 1 1 3 3 1 3 1 1 1 1 ...
# $ Region : Factor w/ 7 levels "Mid-Atlantic",..: 4 5 5 2 5 5 5 5 4 5 ...
# $ Route.To.Market : Factor w/ 5 levels "Fields Sales",..: 1 2 2 2 2 2 1 1 1 2 ...
# $ Elapsed.Days.In.Sales.Stage : num [1:77829] 76 63 24 16 69 89 111 82 68 18 ...
# $ Sales.Stage.Change.Count : num [1:77829] 13 2 7 5 11 3 12 6 8 7 ...
# $ Total.Days.Identified.Through.Closing : num [1:77829] 104 163 82 124 91 114 112 70 156 50 ...
# $ Total.Days.Identified.Through.Qualified: num [1:77829] 101 163 82 124 13 0 112 70 156 50 ...
# $ Opportunity.Amount.USD : num [1:77829] 0 0 7750 0 69756 ...
# $ Client.Size.By.Revenue : Factor w/ 5 levels "1","2","3","4",..: 5 3 1 1 1 5 4 1 1 1 ...
# $ Client.Size.By.Employee.Count : Factor w/ 5 levels "1","2","3","4",..: 5 5 1 1 1 1 5 1 5 1 ...
# $ Revenue.From.Client.Past.Two.Years : Factor w/ 5 levels "0","1","2","3",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ Competitor.Type : Factor w/ 3 levels "Known","None",..: 3 3 3 1 3 3 3 1 2 3 ...
# $ Ratio.Days.Identified.To.Total.Days : num [1:77829] 0.696 0 1 1 0 ...
# $ Ratio.Days.Validated.To.Total.Days : num [1:77829] 0.114 1 0 0 0.141 ...
# $ Ratio.Days.Qualified.To.Total.Days : num [1:77829] 0.154 0 0 0 0 ...
# $ Deal.Size.Category : Factor w/ 7 levels "1","2","3","4",..: 1 1 1 1 4 5 2 6 6 4 ...
#' ### 2.1.2 Project Data Sets
# preparing data sets for train, test and final validation
# final validation set (validation_set) will be 10% of the entire sales_dataset
set.seed(211120, sample.kind = "Rounding")
test_index <- createDataPartition(y = sales_dataset$Opportunity.Result,
times = 1,
p = 0.1,
list = FALSE)
# final validation sets
sales_set <- sales_dataset[-test_index,]
validation_set <- sales_dataset[test_index,]
# split sales_set (80/20) to train and test algorithms
set.seed(211120, sample.kind = "Rounding")
test2_index <- createDataPartition(y = sales_set$Opportunity.Result,
times = 1,
p = 0.2,
list = FALSE)
train_sales_set <- sales_set[-test2_index,]
test_sales_set <- sales_set[test2_index,]
# data set used in projects
part1 <- nrow(sales_set) / nrow(sales_dataset)
part2 <- nrow(validation_set) / nrow(sales_dataset)
table_stes <- tibble(sales_dataset_split="sales_set",
rows=format(nrow(sales_set), big.mark= ',' ),
percentage = percent(part1, accuracy=1) )
table_stes <- bind_rows(table_stes, tibble(sales_dataset_split="validation_set",
rows=format(nrow(validation_set), big.mark= ',' ),
percentage = percent(part2, accuracy=1)) )
part3 <- nrow(train_sales_set) / nrow(sales_set)
part4 <- nrow(test_sales_set) / nrow(sales_set)
table_sets_b <- tibble(sales_set_split="train_sales_set",
rows=format(nrow(train_sales_set), big.mark= ',' ),
percentage = percent(part3, accuracy=1) )
table_sets_b <- bind_rows(table_sets_b, tibble(sales_set_split="test_sales_set",
rows=format(nrow(test_sales_set), big.mark= ',' ),
percentage = percent(part4, accuracy=1) ))
kable( list(table_stes, table_sets_b), caption = 'Data sets',
booktabs = TRUE, valign = 't') %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.2 Exploration and Vizualization
# all predictors, all columns except Opportunity.Result
pred_col <- c("Supplies.Subgroup",
"Supplies.Group",
"Region",
"Route.To.Market",
"Elapsed.Days.In.Sales.Stage",
"Sales.Stage.Change.Count",
"Total.Days.Identified.Through.Closing",
"Total.Days.Identified.Through.Qualified",
"Opportunity.Amount.USD",
"Client.Size.By.Revenue" ,
"Client.Size.By.Employee.Count",
"Revenue.From.Client.Past.Two.Years",
"Competitor.Type",
"Ratio.Days.Identified.To.Total.Days",
"Ratio.Days.Validated.To.Total.Days",
"Ratio.Days.Qualified.To.Total.Days",
"Deal.Size.Category")
# categorical variables
categ_col <- c("Opportunity.Result",
"Supplies.Subgroup",
"Supplies.Group",
"Region",
"Route.To.Market",
"Competitor.Type",
"Deal.Size.Category",
"Client.Size.By.Revenue" ,
"Client.Size.By.Employee.Count",
"Revenue.From.Client.Past.Two.Years" )
# continuous variables
cont_col <- c( "Elapsed.Days.In.Sales.Stage",
"Sales.Stage.Change.Count",
"Total.Days.Identified.Through.Closing",
"Total.Days.Identified.Through.Qualified",
"Opportunity.Amount.USD",
"Ratio.Days.Identified.To.Total.Days",
"Ratio.Days.Validated.To.Total.Days",
"Ratio.Days.Qualified.To.Total.Days")
#' ### 2.2.1 Correlation
# heatmap
# set for the correlation
cor_sales_set = subset(sales_set,
select = -c(Opportunity.Result, Supplies.Subgroup,
Supplies.Group, Region, Route.To.Market,
Competitor.Type, Deal.Size.Category,
Client.Size.By.Revenue,
Client.Size.By.Employee.Count,
Revenue.From.Client.Past.Two.Years ))
# correlation matrix
cor_mat_sales <- round(cor(cor_sales_set),2)
# create the correlation heatmap
# library(reshape2)
melted_cor_mat_sales <- melt(cor_mat_sales)
# save qq plot in figures
png(file="figs/corr_1.png", width=480, height=240)
# show the correlation matrix
ggcorrplot(cor_mat_sales,
hc.order = TRUE,
type = "lower",
lab = TRUE, lab_size = 2.5) +
theme(axis.text.x = element_text(size = 8)) +
theme(axis.text.y = element_text(size = 8))
dev.off()
# call heatmap graph in report
include_graphics("figs/corr_1.png",
auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
# drop Total.Days.Identified.Through.Closing from data sets
sales_set = subset(sales_set, select = -c(Total.Days.Identified.Through.Closing))
train_sales_set = subset(train_sales_set, select =
-c(Total.Days.Identified.Through.Closing))
test_sales_set = subset(test_sales_set, select =
-c(Total.Days.Identified.Through.Closing))
validation_set = subset(validation_set, select =
-c(Total.Days.Identified.Through.Closing))
pred_col <- pred_col[pred_col != "Total.Days.Identified.Through.Closing"]
cont_col <- cont_col[cont_col != "Total.Days.Identified.Through.Closing"]
#' ### 2.2.2 Near Zero Variance Predictors
# identification of near zero variance predictors using nearZeroVar
kable(nearZeroVar(sales_set[, pred_col],
saveMetrics = TRUE), caption = 'nearZeroVar', digits = 4, booktabs=TRUE) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
# count values for Revenue.From.Client.Past.Two.Years
tbl1 <- sales_set %>% count(Revenue.From.Client.Past.Two.Years)
kable( tbl1, digits = 3, booktabs = TRUE, align = c("r", "r")) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.2.3 Statistics Summary
# data set variables summary statistics
#summary(sales_set)
temp <- sales_set[ , 1:5]
kable( summary(temp), booktabs = TRUE) %>%
kable_styling(latex_options = "scale_down", font_size = 8)
temp <- sales_set[ , 6:9]
kable( summary(temp), booktabs = TRUE) %>%
kable_styling(latex_options = "scale_down", font_size = 8)
temp <- sales_set[ , 10:13]
kable( summary(temp), booktabs = TRUE) %>%
kable_styling(latex_options = "scale_down", font_size = 8)
temp <- sales_set[ , 14:17]
kable( summary(temp), booktabs = TRUE) %>%
kable_styling(latex_options = "scale_down", font_size = 8)
#' ### 2.2.4 QQ Plot
# save qq plot in figures
png(file="figs/qq_1.png", width=480, height=270)
# QQ plots to look up if the feature is normally distributed
qq_grid <- lapply(cont_col, FUN=function(var) {
sales_set %>%
dplyr::select(all_of(var)) %>%
ggplot(data = ., aes(sample = scale(.))) +
stat_qq() +
stat_qq_line(colour = "red") +
theme(axis.text.x = element_text(hjust = 1)) +
ggtitle(var)+
theme(title =element_text(size=8),
axis.text=element_text(size=9),
axis.title=element_text(size=9))
})
do.call(grid.arrange, args=c(qq_grid, list(ncol=3)))
dev.off()
# call qq plots in report
include_graphics("figs/qq_1.png",
auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.2.5 Density Plot
# save density plot in figures
png(file="figs/density_grid_1.png", width=480, height=330)
# grid of density plots
dens_grid <- lapply(cont_col, FUN=function(var) {
# Build the plots
ggplot(sales_set) +
geom_density(aes_string(x = var, fill = "Opportunity.Result"), alpha = 0.5) +
ggtitle(var)+
theme(title =element_text(size=8),
axis.text=element_text(size=6),
axis.title=element_text(size=8), axis.title.x=element_blank())
})
do.call(grid_arrange_shared_legend, args=c(dens_grid, nrow = 3, ncol = 3))
dev.off()
# call density plots in report
include_graphics("figs/density_grid_1.png",
auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.2.6 Prevalence
# distribution of Won Loss Opportunity.Result
sales_set_won <- sales_set[which(sales_set$Opportunity.Result == "Won"),]
sales_set_loss <- sales_set[which(sales_set$Opportunity.Result == "Loss"),]
part1 <- nrow(sales_set_won) / nrow(sales_set)
part2 <- nrow(sales_set_loss) / nrow(sales_set)
table_stes <- tibble(Opportunity.Result="Won",
rows=format(nrow(sales_set_won), big.mark= ',' ),
percentage = percent(part1, accuracy=1) )
table_stes <- bind_rows(table_stes, tibble(Opportunity.Result="Loss",
rows=format(nrow(sales_set_loss), big.mark= ',' ) ,
percentage = percent(part2, accuracy = 1) ))
kable( table_stes, booktabs = TRUE, valign = 't') %>%
kable_styling(latex_options = "hold_position", font_size = 8)
#' ### 2.2.7 Class Distribution
# save categorical plots in figures
png(file="figs/class_1.png", width=480, height=300)
# categorical variables class distribution
p1 <- ggplot(data = sales_set) + geom_bar(aes(x=Supplies.Subgroup,
fill=Opportunity.Result)) + labs(title = "Supplies.Subgroup") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p2 <- ggplot(data = sales_set) + geom_bar(aes(x=Supplies.Group,
fill=Opportunity.Result)) + labs(title = "Supplies.Group") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p3 <- ggplot(data = sales_set) + geom_bar(aes(x=Region,
fill=Opportunity.Result)) + labs(title = "Region") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p4 <- ggplot(data = sales_set) + geom_bar(aes(x=Route.To.Market,
fill=Opportunity.Result)) + labs(title = "Route.To.Market") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p5 <- ggplot(data = sales_set) + geom_bar(aes(x=Competitor.Type,
fill=Opportunity.Result)) + labs(title = "Competitor.Type") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p6 <- ggplot(data = sales_set) + geom_bar(aes(x=Deal.Size.Category,
fill=Opportunity.Result)) + labs(title = "Deal.Size.Category") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p7 <- ggplot(data = sales_set) + geom_bar(aes(x=Client.Size.By.Revenue,
fill=Opportunity.Result)) + labs(title = "Client.Size.By.Revenue") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p8 <-ggplot(data = sales_set) + geom_bar(aes(x=Client.Size.By.Employee.Count,
fill=Opportunity.Result)) + labs(title = "Client.Size.By.Revenue") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
p9 <- ggplot(data = sales_set) + geom_bar(aes(x=Revenue.From.Client.Past.Two.Years,
fill=Opportunity.Result))+labs(title = "Revenue.From.Client.Past.Two.Years") +
theme(title =element_text(size=9), axis.text.x=element_blank(),
axis.title.x=element_blank(), axis.title.y = element_blank()) +
theme(legend.position = "none") + ylab("Proportion")
ggarrange(p1, p2, p3, p4, p5, p6, p7, p8, p9, nrow=3, ncol=3,
common.legend = TRUE, legend="bottom")
dev.off()
# call class plots in report
include_graphics("figs/class_1.png",
auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.2.8 Feature Selections
# variable importance for Opportunity.Result
filterVarImp(x = sales_set[,pred_col],
y = sales_set$Opportunity.Result)
# variable importance table
tbl_varimp <- tibble(Feature =
rownames(filterVarImp(x = sales_set[,pred_col],
y = sales_set$Opportunity.Result)),
Won = filterVarImp(x = sales_set[,pred_col],
sales_set$Opportunity.Result)$Won,
Loss = filterVarImp(x = sales_set[,pred_col],
sales_set$Opportunity.Result)$Loss) %>%
arrange(desc(Won))
kable( tbl_varimp, digits = 3, caption='variable importance',
booktabs = TRUE, valign = 't' ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.3 Modeling Approach
#' The goal is to successfully predict successful sale (Won)
#' what we are trying to achieve by testing different algorithms.
#' For algorithm testing we use train_sales_set sets and perform cross validation to train algorithm ten times.
#' Then with best training result algorithms performance are evaluates according to test_sales_set
#' and resulting measures are calculated using confusionMatrix function.
#' As a basic measure, we use a baseline model.
#' ## 3 Results
#' ### 3.1 Baseline Model
# baseline model
set.seed(211120, sample.kind = "Rounding")
# guess with equal probability of Opportunity.Result Won, Loss
baseline <- sample(c("Loss", "Won"), nrow(train_sales_set), replace = TRUE)
baseline <- factor(baseline, levels = c("Won", "Loss"))
# use confusionMatrix to view the results
cm_baseline_1 <- confusionMatrix(data = factor(baseline),
reference = factor(train_sales_set$Opportunity.Result))
cm_baseline_2 <- confusionMatrix(data = factor(baseline),
reference = factor(train_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for baseline
baseline_results <- tibble(Model = "baseline",
Accuracy = cm_baseline_1$overall["Accuracy"],
Sensitivity = cm_baseline_1$byClass["Sensitivity"],
Specificity = cm_baseline_1$byClass["Specificity"],
Precision = cm_baseline_2$byClass["Precision"],
F1_Score = cm_baseline_2$byClass["F1"])
#baseline_results
kable( baseline_results, caption = "baseline model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.2 Generalized Linear Model
# glm model
set.seed(211120, sample.kind = "Rounding")
# train glm model
train_glm_all <- train(Opportunity.Result ~ .,
method = "glm",
data = train_sales_set,
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
glm_all_preds <- predict(train_glm_all, test_sales_set)
## use confusionMatrix to view the results
cm_glm_1 <- confusionMatrix(data = factor(glm_all_preds),
reference = factor(test_sales_set$Opportunity.Result))
cm_glm_2 <- confusionMatrix(data = factor(glm_all_preds),
reference = factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for glm model
glm_results <- tibble(Model = "glm",
Accuracy = cm_glm_1$overall["Accuracy"],
Sensitivity = cm_glm_1$byClass["Sensitivity"],
Specificity = cm_glm_1$byClass["Specificity"],
Precision = cm_glm_2$byClass["Precision"],
F1_Score = cm_glm_2$byClass["F1"])
kable( glm_results, caption = "glm model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.3 Naive Bayes Model
# naive_bayes model
set.seed(211120, sample.kind = "Rounding")
# train naive_bayes model
train_nb_all <- train(Opportunity.Result ~ . ,
data = train_sales_set,
method = "naive_bayes",
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
nb_preds_all <- predict(train_nb_all, test_sales_set)
## use confusionMatrix to view the results
cm_nb_1 <- confusionMatrix(data = factor(nb_preds_all), reference =
factor(test_sales_set$Opportunity.Result))
cm_nb_2 <- confusionMatrix(data = factor(nb_preds_all), reference =
factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for naivebayes
naivebayes_results <- tibble(Model = "naivebayes",
Accuracy = cm_nb_1$overall["Accuracy"],
Sensitivity = cm_nb_1$byClass["Sensitivity"],
Specificity = cm_nb_1$byClass["Specificity"],
Precision = cm_nb_2$byClass["Precision"],
F1_Score = cm_nb_2$byClass["F1"])
kable( naivebayes_results, caption = "naive bayes model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.4 K-Nearest Neighbors
# train i test set normalization for knn
# function to normalize values
normalize <- function(x) {
return ((x-min(x)) / (max(x) - min(x)))
}
#normalization train set n_
n_train_sales_set <- train_sales_set
#$ Opportunity.Result : Factor w/ 2 levels "Won","Loss": 1 2 2 2 2 2 2 2 2 2 ...
n_train_sales_set$Supplies.Subgroup <- as.numeric(
factor(n_train_sales_set$Supplies.Subgroup))
n_train_sales_set$Supplies.Group <- as.numeric(
factor(n_train_sales_set$Supplies.Group))
n_train_sales_set$Region <- as.numeric(
factor(n_train_sales_set$Region))
n_train_sales_set$Route.To.Market <- as.numeric(
factor(n_train_sales_set$Route.To.Market))
#$ Elapsed.Days.In.Sales.Stage : num [1:56035] 76 69 89 82 68 18 35 16 81 83 ...
#Sales.Stage.Change.Count : num [1:56035] 13 11 3 6 8 7 6 5 10 13 ...
#$ Total.Days.Identified.Through.Qualified: num [1:56035] 101 13 0 70 156 50 31 208 138 130 ...
#$ Opportunity.Amount.USD : num [1:56035] 0 69756 232522 450000 25
n_train_sales_set$Client.Size.By.Revenue <- as.numeric(
factor(n_train_sales_set$Client.Size.By.Revenue))
n_train_sales_set$Client.Size.By.Employee.Count <- as.numeric(
factor(n_train_sales_set$Client.Size.By.Employee.Count))
n_train_sales_set$Revenue.From.Client.Past.Two.Years <- as.numeric(
factor(n_train_sales_set$Revenue.From.Client.Past.Two.Years))
n_train_sales_set$Competitor.Type <- as.numeric(
factor(n_train_sales_set$Competitor.Type))
#$ Ratio.Days.Identified.To.Total.Days : num [1:56035] 0.696 0 0 0.264 0 ...
#$ Ratio.Days.Validated.To.Total.Days : num [1:56035] 0.113985 0.141125 0.000877 0.73639 0.562821 ...
#$ Ratio.Days.Qualified.To.Total.Days : num [1:56035] 0.154 0 0 0 0.437 ...
n_train_sales_set$Deal.Size.Category <- as.numeric(
factor(n_train_sales_set$Deal.Size.Category))
n_train_sales_set <- data.frame(n_train_sales_set[1],
lapply(n_train_sales_set[2:17], normalize) )
#normalization test set n_ normalization
n_test_sales_set <- test_sales_set
#$ Opportunity.Result : Factor w/ 2 levels "Won","Loss": 1 2 2 2 2 2 2 2 2 2 ...
n_test_sales_set$Supplies.Subgroup <- as.numeric(
factor(n_test_sales_set$Supplies.Subgroup))
n_test_sales_set$Supplies.Group <- as.numeric(
factor(n_test_sales_set$Supplies.Group))
n_test_sales_set$Region <- as.numeric(
factor(n_test_sales_set$Region))
n_test_sales_set$Route.To.Market <- as.numeric(
factor(n_test_sales_set$Route.To.Market))
#$ Elapsed.Days.In.Sales.Stage : num [1:56035] 76 69 89 82 68 18 35 16 81 83 ...
#Sales.Stage.Change.Count : num [1:56035] 13 11 3 6 8 7 6 5 10 13 ...
#$ Total.Days.Identified.Through.Qualified: num [1:56035] 101 13 0 70 156 50 31 208 138 130 ...
#$ Opportunity.Amount.USD : num [1:56035] 0 69756 232522 450000 25
n_test_sales_set$Client.Size.By.Revenue <- as.numeric(
factor(n_test_sales_set$Client.Size.By.Revenue))
n_test_sales_set$Client.Size.By.Employee.Count <- as.numeric(
factor(n_test_sales_set$Client.Size.By.Employee.Count))
n_test_sales_set$Revenue.From.Client.Past.Two.Years <- as.numeric(
factor(n_test_sales_set$Revenue.From.Client.Past.Two.Years))
n_test_sales_set$Competitor.Type <- as.numeric(
factor(n_test_sales_set$Competitor.Type))
#$ Ratio.Days.Identified.To.Total.Days : num [1:56035] 0.696 0 0 0.264 0 ...
#$ Ratio.Days.Validated.To.Total.Days : num [1:56035] 0.113985 0.141125 0.000877 0.73639 0.562821 ...
#$ Ratio.Days.Qualified.To.Total.Days : num [1:56035] 0.154 0 0 0 0.437 ...
n_test_sales_set$Deal.Size.Category <- as.numeric(
factor(n_test_sales_set$Deal.Size.Category))
n_test_sales_set <- data.frame(n_test_sales_set[1],
lapply(n_test_sales_set[2:17], normalize))
#normalize train i test set
# n_train_sales_set
# n_test_sales_set
# knn model
set.seed(211120, sample.kind = "Rounding")
# train knn model
train_knn_bp <- train(Opportunity.Result ~ Total.Days.Identified.Through.Qualified +
Deal.Size.Category + Ratio.Days.Qualified.To.Total.Days +
Revenue.From.Client.Past.Two.Years +
Ratio.Days.Identified.To.Total.Days + Opportunity.Amount.USD,
method = "knn",
data = n_train_sales_set,
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
knn_preds_bp <- predict(train_knn_bp, n_test_sales_set)
### use confusionMatrix to view the results
cm_knn_1 <- confusionMatrix(data = factor(knn_preds_bp), reference =
factor(n_test_sales_set$Opportunity.Result))
cm_knn_2 <- confusionMatrix(data = factor(knn_preds_bp), reference =
factor(n_test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for knn
knn_results <- tibble(Model = "knn",
Accuracy = cm_knn_1$overall["Accuracy"],
Sensitivity = cm_knn_1$byClass["Sensitivity"],
Specificity = cm_knn_1$byClass["Specificity"],
Precision = cm_knn_2$byClass["Precision"],
F1_Score = cm_knn_2$byClass["F1"])
kable( knn_results, caption = "knn model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.5 Quadratic Discriminant Analysis
# qda model
set.seed(211120, sample.kind = "Rounding")
# train qda model
train_qda_bp <- train(Opportunity.Result ~ Total.Days.Identified.Through.Qualified +
Deal.Size.Category + Ratio.Days.Qualified.To.Total.Days +
Revenue.From.Client.Past.Two.Years +
Ratio.Days.Identified.To.Total.Days + Opportunity.Amount.USD,
data = train_sales_set,
method = "qda",
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
qda_preds_bp <- predict(train_qda_bp, test_sales_set)
### use confusionMatrix to view the results
cm_qda_1 <- confusionMatrix(data = factor(qda_preds_bp), reference =
factor(test_sales_set$Opportunity.Result))
cm_qda_2 <- confusionMatrix(data = factor(qda_preds_bp), reference =
factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for qda
qda_results <- tibble(Model = "qda",
Accuracy = cm_qda_1$overall["Accuracy"],
Sensitivity = cm_qda_1$byClass["Sensitivity"],
Specificity = cm_qda_1$byClass["Specificity"],
Precision = cm_qda_2$byClass["Precision"],
F1_Score = cm_qda_2$byClass["F1"])
kable( qda_results, caption = "qda model result") %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.6 Random Forest
# !!!this can take long execution time
# rf model
set.seed(211120, sample.kind = "Rounding")
# train rf model
train_rf_all <- train(Opportunity.Result ~ . ,
data = train_sales_set,
method = "rf",
ntree = 100, #ntree = 100
tuneGrid = data.frame(mtry = seq(1:10)),
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
rf_preds_all <- predict(train_rf_all, test_sales_set)
# use confusionMatrix to view the results
cm_RF_1 <- confusionMatrix(data = factor(rf_preds_all), reference =
factor(test_sales_set$Opportunity.Result))
cm_RF_2 <- confusionMatrix(data = factor(rf_preds_all), reference =
factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for rf
rf_results <- tibble(Model = "rf",
Accuracy = cm_RF_1$overall["Accuracy"],
Sensitivity = cm_RF_1$byClass["Sensitivity"],
Specificity = cm_RF_1$byClass["Specificity"],
Precision = cm_RF_2$byClass["Precision"],
F1_Score = cm_RF_2$byClass["F1"])
kable( rf_results, caption = "rf model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.7 AdaBoost
# !!!this can take very long execution time
# adaboost model
set.seed(211120, sample.kind = "Rounding")
# train adaboost model
train_adab_bp <- train(Opportunity.Result ~ Total.Days.Identified.Through.Qualified +
Deal.Size.Category + Ratio.Days.Qualified.To.Total.Days +
Revenue.From.Client.Past.Two.Years +
Ratio.Days.Identified.To.Total.Days + Opportunity.Amount.USD,
data = train_sales_set,
method = "adaboost",
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict on test data set
adab_preds_bp <- predict(train_adab_bp, test_sales_set)
# use confusionMatrix to view the results
cm_adab_1 <- confusionMatrix(data = factor(adab_preds_bp), reference =
factor(test_sales_set$Opportunity.Result))
cm_adab_2 <- confusionMatrix(data = factor(adab_preds_bp), reference =
factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for adaboost
adaboost_results <- tibble(Model = "adaboost",
Accuracy = cm_adab_1$overall["Accuracy"],
Sensitivity = cm_adab_1$byClass["Sensitivity"],
Specificity = cm_adab_1$byClass["Specificity"],
Precision = cm_adab_2$byClass["Precision"],
F1_Score = cm_adab_2$byClass["F1"])
kable( adaboost_results, caption = "adaboost model result" ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.8 Ensemble Model
# ensemble for alggorithms "glm", "naive_bayes", "knn", "qda", "rf", "adaboost"
# define algorithms for ensemble model
models <- c( "glm", "naive_bayes", "knn", "qda", "rf", "adaboost")
set.seed(211120, sample.kind = "Rounding")
# make predictions using already trained algorithms
preds <- sapply(models, function(model){
if (model == "glm") {
# glm use all predictors
fit <- train_glm_all
} else if (model == "naive_bayes") {
# naive bayes uuse all predictors
fit <- train_nb_all
}else if (model == "knn") {
# knn use set of most important predictors and normalization data
fit <- train_knn_bp
} else if (model == "qda") {
# qda use set of most important predictors
fit <- train_qda_bp
} else if (model == "rf") {
# Random forest use all predictors
fit <- train_rf_all
} else if (model == "adaboost") {
# adaboost use set of most important predictors
fit <- train_adab_bp
}
# predictions and predictions for knn
if (model == "knn") {
# knn use data sets with normalization
pred <- predict(object = fit, newdata = n_test_sales_set)
}else {
pred <- predict(object = fit, newdata = test_sales_set)
}
# these predictions will be used for ensemble
return(pred)
})
# Combine all models, use votes method to ensemble the predictions
votes <- rowMeans(preds == "Won")
ens_preds <- factor(ifelse(votes > 0.5, "Won", "Loss"))
ens_preds <- factor(ens_preds, levels=c("Won", "Loss"))
# use confusionMatrix to view the results
cm_ens_1 <- confusionMatrix(data = factor(ens_preds), reference =
factor(test_sales_set$Opportunity.Result))
cm_ens_2 <- confusionMatrix(data = factor(ens_preds), reference =
factor(test_sales_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# results table for Ensemble
ensemble_results <- tibble(Model = "ensemble",
Accuracy = cm_ens_1$overall["Accuracy"],
Sensitivity = cm_ens_1$byClass["Sensitivity"],
Specificity = cm_ens_1$byClass["Specificity"],
Precision = cm_ens_2$byClass["Precision"],
F1_Score = cm_ens_2$byClass["F1"])
kable(ensemble_results, caption = "ensemble model result") %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.9 Best Model Selection
# all model results in table model_results
# results baseline
model_results <- tibble(Model = "baseline",
Accuracy = cm_baseline_1$overall["Accuracy"],
Sensitivity = cm_baseline_1$byClass["Sensitivity"],
Specificity = cm_baseline_1$byClass["Specificity"],
Precision = cm_baseline_2$byClass["Precision"],
F1_Score = cm_baseline_2$byClass["F1"])
# glm
model_results <- model_results %>% add_row(Model = "glm",
Accuracy = cm_glm_1$overall["Accuracy"],
Sensitivity = cm_glm_1$byClass["Sensitivity"],
Specificity = cm_glm_1$byClass["Specificity"],
Precision = cm_glm_2$byClass["Precision"],
F1_Score = cm_glm_2$byClass["F1"])
# naive_bayes
model_results <- model_results %>% add_row(Model = "naive_bayes",
Accuracy = cm_nb_1$overall["Accuracy"],
Sensitivity = cm_nb_1$byClass["Sensitivity"],
Specificity = cm_nb_1$byClass["Specificity"],
Precision = cm_nb_2$byClass["Precision"],
F1_Score = cm_nb_2$byClass["F1"])
# knn
model_results <- model_results %>% add_row(Model = "knn",
Accuracy = cm_knn_1$overall["Accuracy"],
Sensitivity = cm_knn_1$byClass["Sensitivity"],
Specificity = cm_knn_1$byClass["Specificity"],
Precision = cm_knn_2$byClass["Precision"],
F1_Score = cm_knn_2$byClass["F1"])
# qda
model_results <- model_results %>% add_row(Model = "qda",
Accuracy = cm_qda_1$overall["Accuracy"],
Sensitivity = cm_qda_1$byClass["Sensitivity"],
Specificity = cm_qda_1$byClass["Specificity"],
Precision = cm_qda_2$byClass["Precision"],
F1_Score = cm_qda_2$byClass["F1"])
# rf
model_results <- model_results %>% add_row(Model = "rf",
Accuracy = cm_RF_1$overall["Accuracy"],
Sensitivity = cm_RF_1$byClass["Sensitivity"],
Specificity = cm_RF_1$byClass["Specificity"],
Precision = cm_RF_2$byClass["Precision"],
F1_Score = cm_RF_2$byClass["F1"])
# adaboost
model_results <- model_results %>% add_row(Model = "adaboost",
Accuracy = cm_adab_1$overall["Accuracy"],
Sensitivity = cm_adab_1$byClass["Sensitivity"],
Specificity = cm_adab_1$byClass["Specificity"],
Precision = cm_adab_2$byClass["Precision"],
F1_Score = cm_adab_2$byClass["F1"])
#ensemble
model_results <- model_results %>% add_row(Model = "ensemble",
Accuracy = cm_ens_1$overall["Accuracy"],
Sensitivity = cm_ens_1$byClass["Sensitivity"],
Specificity = cm_ens_1$byClass["Specificity"],
Precision = cm_ens_2$byClass["Precision"],
F1_Score = cm_ens_2$byClass["F1"])
#model_results
kable( model_results, caption = "Model results", digits = 4 ) %>%
kable_styling(latex_options = "HOLD_position", font_size = 8) %>%
row_spec(0,bold=TRUE)
# Show ROC and Precision-Recall curves for all models, using precrec package
# glm scores and labels for precrec package
scores_glm <- str_replace_all(glm_all_preds, 'Won', '1')
scores_glm <- str_replace_all(scores_glm, 'Loss', '0')
scores_glm <- as.numeric(as.character(scores_glm))
labels_glm <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_glm <- str_replace_all(labels_glm, 'Loss', '0')
labels_glm <- as.numeric(as.character(labels_glm))
# naive bayes scores and labels for precrec package
scores_nb <- str_replace_all(nb_preds_all, 'Won', '1')
scores_nb <- str_replace_all(scores_nb, 'Loss', '0')
scores_nb <- as.numeric(as.character(scores_nb))
labels_nb <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_nb <- str_replace_all(labels_nb, 'Loss', '0')
labels_nb <- as.numeric(as.character(labels_nb))
# knn scores and labels for precrec package
scores_knn <- str_replace_all(knn_preds_bp, 'Won', '1')
scores_knn <- str_replace_all(scores_knn, 'Loss', '0')
scores_knn <- as.numeric(as.character(scores_knn))
#knn use set with normalization
labels_knn <- str_replace_all(n_test_sales_set$Opportunity.Result, 'Won', '1')
labels_knn <- str_replace_all(labels_knn, 'Loss', '0')
labels_knn <- as.numeric(as.character(labels_knn))
# qda scores and labels for precrec package
scores_qda <- str_replace_all(qda_preds_bp, 'Won', '1')
scores_qda <- str_replace_all(scores_qda, 'Loss', '0')
scores_qda <- as.numeric(as.character(scores_qda))
labels_qda <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_qda <- str_replace_all(labels_qda, 'Loss', '0')
labels_qda <- as.numeric(as.character(labels_qda))
# rf scores and labels for precrec package
scores_rf <- str_replace_all(rf_preds_all, 'Won', '1')
scores_rf <- str_replace_all(scores_rf, 'Loss', '0')
scores_rf <- as.numeric(as.character(scores_rf))
labels_rf <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_rf <- str_replace_all(labels_rf, 'Loss', '0')
labels_rf <- as.numeric(as.character(labels_rf))
# adaboost scores and labels for precrec package
scores_adab <- str_replace_all(adab_preds_bp, 'Won', '1')
scores_adab <- str_replace_all(scores_adab, 'Loss', '0')
scores_adab <- as.numeric(as.character(scores_adab))
labels_adab <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_adab <- str_replace_all(labels_adab, 'Loss', '0')
labels_adab <- as.numeric(as.character(labels_adab))
# ensemble scores and labels for precrec package
scores_ens <- str_replace_all(ens_preds, 'Won', '1')
scores_ens <- str_replace_all(scores_ens, 'Loss', '0')
scores_ens <- as.numeric(as.character(scores_ens))
labels_ens <- str_replace_all(test_sales_set$Opportunity.Result, 'Won', '1')
labels_ens <- str_replace_all(labels_ens, 'Loss', '0')
labels_ens <- as.numeric(as.character(labels_ens))
# save results ROC, PRC plots in figures
png(file="figs/result_plot_1.png", width=480, height=180)
# one plot for "glm", "naive_bayes", "knn", "qda", "rf", "adaboost" and ensemble
# join score vectors
scores1 <- join_scores(scores_glm, scores_nb, scores_knn, scores_qda,
scores_rf, scores_adab, scores_ens)
# join label vectors
labels1 <- join_labels(labels_glm, labels_nb, labels_knn, labels_qda,
labels_rf, labels_adab, labels_ens)
# specify model names and test data set names
mmmdat1 <- mmdata(scores1, labels1, modnames=
c("glm", "naive_bayes", "knn", "qda", "rf", "adaboost", "ensemble" ),
dsids = c(1, 2, 3, 4, 5, 6, 7))
# calculate curves for multiple models and multiple test datasets
mmcurves <- evalmod(mmmdat1)
# show average ROC curves
p1 <- autoplot(mmcurves, "ROC") + labs(title="ROC")
# show average Precision-Recall curves
p2 <- autoplot(mmcurves, "PRC") + labs(title="Precision-Recall")
ggarrange(p1, p2, nrow=1, ncol=2, common.legend = TRUE, legend="right" )
dev.off()
# call results plots in report
include_graphics("figs/result_plot_1.png",
auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 3.10 Final Validation
# !!!this can take long execution time
# final model rf, sales_set and validation set
set.seed(211120, sample.kind = "Rounding")
# train final rf model on sales_set
train_final_rf_all <- train(Opportunity.Result ~ . ,
data = sales_set,
method = "rf",
ntree = 100, #ntree = 100
tuneGrid = data.frame(mtry = seq(1:10)),
trControl = trainControl(method = "cv", number = 10, p = 0.9))
# predict for final rf model on validation set
final_rf_preds_all <- predict(train_final_rf_all, validation_set)
# use confusionMatrix to view the results
final_cm_RF_1 <- confusionMatrix(data = factor(final_rf_preds_all),
reference = factor(validation_set$Opportunity.Result))
final_cm_RF_2 <- confusionMatrix(data = factor(final_rf_preds_all),
reference = factor(validation_set$Opportunity.Result),
mode = "prec_recall", positive="Won")
# final validation results table for rf model on validation set
final_val_results <- tibble(Model = "final validation rf",
Accuracy = final_cm_RF_1$overall["Accuracy"],
Sensitivity = final_cm_RF_1$byClass["Sensitivity"],
Specificity = final_cm_RF_1$byClass["Specificity"],
Precision = final_cm_RF_2$byClass["Precision"],
F1_Score = final_cm_RF_2$byClass["F1"])
kable(final_val_results, caption = "rf final model results") %>%
kable_styling(latex_options = "HOLD_position", font_size = 8)
# end Time ALL
endTime_All_2 <- Sys.time()
# total time ALL
endTime_All_2 - startTime_ALL_2
# end.
|
940d22f65479ab5142b050d2f24a890f128c6c8e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LearnGeom/examples/Homothety.Rd.R
|
0b64ee366a41953229b22ab863f0ba6760798ff0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
Homothety.Rd.R
|
library(LearnGeom)
### Name: Homothety
### Title: Creates an homothety from a given polygon
### Aliases: Homothety
### ** Examples
x_min <- -2
x_max <- 6
y_min <- -3
y_max <- 5
CoordinatePlane(x_min, x_max, y_min, y_max)
P1 <- c(0,0)
P2 <- c(1,1)
P3 <- c(2,0)
Poly <- CreatePolygon(P1, P2, P3)
Draw(Poly, "blue")
C <- c(-1,-2)
k1 <- 0.5
Poly_homothety1 <- Homothety(Poly, C, k1, lines = TRUE)
Draw(Poly_homothety1, "orange")
k2 <- 2
Poly_homothety2 <- Homothety(Poly, C, k2, lines = TRUE)
Draw(Poly_homothety2, "orange")
|
86846ba0159b0a43988798921fdeb71690ec70ab
|
ccd06ad4d52aec1366f03d2d73a95f4bfaed328e
|
/F0101-BvZINB4.test.R
|
eed1e8f7846aad7329bb6dd45580ae2425f2877d
|
[] |
no_license
|
Hunyong/SCdep
|
7e2fd15fec411108f811f6ef39cf9dd5678736d5
|
fa1184fc9291aa0b5113122cb2d2760d9de28945
|
refs/heads/master
| 2021-01-23T00:48:40.483227
| 2019-01-04T02:33:34
| 2019-01-04T02:33:34
| 92,848,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,575
|
r
|
F0101-BvZINB4.test.R
|
library(tidyverse)
#### basis functions ####
H0 <- function(k, m, a0, a1, a2, b1, b2, x, y) {
gamma(a1 + k) * gamma(k+1)^-1 * gamma(a1)^-1 * gamma(x + y + a0 -m -k) *
gamma(x - k +1)^-1 * gamma(a0 + y - m)^-1 *
b1^x * b2^y * (b1 + b2 + 1)^(k + m - x - y -a0) *
(b1 + 1)^-(k + a1) * (b2 + 1)^-(m + a2)}
H1 <- function(k, a0, a1, a2, b1, b2, x, y) { # being lazy (y, a2, b2 not needed)
gamma(a0 + x - k) * gamma(a0)^-1 * gamma(x - k +1)^-1 *
gamma(a1 + k) * gamma(k + 1)^-1 * gamma(a1)^-1 *
b1^x * (b1 + 1)^-(x + a0 + a1)}
H2 <- function(m, a0, a1, a2, b1, b2, x, y) {
gamma(a0 + y - m) * gamma(a0)^-1 * gamma(y - m +1)^-1 *
gamma(a2 + m) * gamma(m + 1)^-1 * gamma(a2)^-1 *
b2^x * (b2 + 1)^-(y + a0 + a2)}
#### functions for ElogR0 ####
logR0.H0 <- function(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
H0(k, m, a0, a1, a2, b1, b2, x, y) *
(digamma(x + y - k - m + a0) + log(b1/(1 + b1 + b2))) * p1
}
logR0.H1 <- function(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (y == 0) {
H1(k, a0, a1, a2, b1, b2, x, y) *
(digamma(x - k + a0) + log(b1/(1 + b1))) * p2
} else {0}
}
logR0.H2 <- function(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (x == 0) {
H1(m, a0, a1, a2, b1, b2, x, y) *
(digamma(y - m + a0) + log(b1/(1 + b2))) * p3
} else {0}
}
#### functions for ElogR1 ####
logR1.H0 <- function(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
H0(k, m, a0, a1, a2, b1, b2, x, y) *
(digamma(k + a1) + log(b1/(1 + b1))) * p1
}
logR1.H1 <- function(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (y == 0) {
H1(k, a0, a1, a2, b1, b2, x, y) *
(digamma(k + a1) + log(b1/(1 + b1))) * p2
} else {0}
}
logR1.H2 <- function(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (x == 0) {
H1(m, a0, a1, a2, b1, b2, x, y) *
(digamma(a1) + log(b1)) * p3
} else {0}
}
#### functions for ElogR2 ####
logR2.H0 <- function(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
H0(k, m, a0, a1, a2, b1, b2, x, y) *
(digamma(m + a2) + log(b1/(1 + b2))) * p1
}
logR2.H1 <- function(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (y == 0) {
H1(k, a0, a1, a2, b1, b2, x, y) *
(digamma(a2) + log(b1)) * p2
} else {0}
}
logR2.H2 <- function(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y) {
if (x == 0) {
H1(m, a0, a1, a2, b1, b2, x, y) *
(digamma(m + a2) + log(b1/(1 + b2))) * p3
} else {0}
}
dBvZINB4.Expt.test <- function(x, y, a0, a1, a2, b1, b2, p1, p2, p3, p4) {
dBNB = dBvZINB4(x, y, a0, a1, a2, b1, b2, 1, 0, 0, 0, log=FALSE)
dNB.x = dnbinom(x, a0 + a1, b1/(1+b1))
dNB.y = dnbinom(y, a0 + a2, b2/(1+b2))
dBZINB = dBvZINB4(x, y, a0, a1, a2, b1, b2, p1, p2, p3, p4, log=FALSE)
dBZINB.0 = dBvZINB4(x, y, a0 + 1, a1, a2, b1, b2, p1, p2, p3, p4, log=FALSE)
dBZINB.1 = dBvZINB4(x, y, a0, a1 + 1, a2, b1, b2, p1, p2, p3, p4, log=FALSE)
dBZINB.2 = dBvZINB4(x, y, a0, a1, a2 + 1, b1, b2, p1, p2, p3, p4, log=FALSE)
#### ER0 - ER2 ####
ER0 = a0 * b1 * dBZINB.0 / dBZINB
ER1 = a1 * b1 * dBZINB.1 / dBZINB
ER2 = a2 * b1 * dBZINB.2 / dBZINB
#### ER0 - ER2 ####
EE1 = dBNB / dBZINB * p1
EE2 = if (y == 0) {dNB.x / dBZINB * p2} else {0}
EE3 = if (x == 0) {dNB.y / dBZINB * p3} else {0}
EE4 = if (x + y == 0) {p4 / dBZINB} else {0}
EX2 = ifelse(x + y > 0, y, 0) + (a0 + a2) * b1 /dBZINB * (ifelse(y == 0, dNB.x * p2, 0) + ifelse(x + y == 0, p4, 0))
#### ElogR0 ####
ElogR0.H0 =
sapply(0:y, function(m) {
sapply(0:x, function(k) {
logR0.H0(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
}) %>% sum
ElogR0.H1 =
sapply(0:x, function(k) {
logR0.H1(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR0.H2 =
sapply(0:y, function(m) {
logR0.H2(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR0 = ElogR0.H0 + ElogR0.H1 + ElogR0.H2 + if (x + y == 0) {p4 *(digamma(a0) + log(b1))}
#### ElogR1 ####
ElogR1.H0 =
sapply(0:y, function(m) {
sapply(0:x, function(k) {
logR1.H0(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
}) %>% sum
ElogR1.H1 =
sapply(0:x, function(k) {
logR1.H1(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR1.H2 =
sapply(0:y, function(m) {
logR1.H2(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR1 = ElogR1.H0 + ElogR1.H1 + ElogR1.H2 + if (x + y == 0) {p4 *(digamma(a1) + log(b1))}
#### ElogR2 ####
ElogR2.H0 =
sapply(0:y, function(m) {
sapply(0:x, function(k) {
logR2.H0(k, m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
}) %>% sum
ElogR2.H1 =
sapply(0:x, function(k) {
logR2.H1(k, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR2.H2 =
sapply(0:y, function(m) {
logR2.H2(m, a0, a1, a2, b1, b2, p1, p2, p3, p4, x, y)
}) %>% sum
ElogR2 = ElogR2.H0 + ElogR2.H1 + ElogR2.H2 + if (x + y == 0) {p4 *(digamma(a2) + log(b1))}
if (length(ElogR0) == 0) {ElogR0 = NA}
if (length(ElogR1) == 0) {ElogR1 = NA}
if (length(ElogR2) == 0) {ElogR2 = NA}
return(c(logdensity = log(dBZINB),
ER0 = ER0, ER1 = ER1, ER2 = ER2, ElogR0 = ElogR0, ElogR1 = ElogR1, ElogR2 = ElogR2,
EE1 = EE1, EE2 = EE2, EE3 = EE3, EE4 = EE4, EX2 = EX2))
}
dBvZINB4.Expt.test.vec <- Vectorize(dBvZINB4.Expt.test)
|
035f135e5db35bb2acc5d4cc2dc0ceaba39f759c
|
85b2acb2cfbd82733cdac4647a875e54b1f9a031
|
/pollutantmean.r
|
a856070817cfec96c95fb4337e4c4a475887ca0b
|
[] |
no_license
|
vacuoyoung/Data-Science-Learning
|
77ba3b867260721b36d10175f072f6dba7ecf6dc
|
21cb2242752dc95dea9d54d95efebf0d2761ee27
|
refs/heads/master
| 2020-05-31T22:39:15.351591
| 2019-08-04T10:20:30
| 2019-08-04T10:20:30
| 190,525,010
| 0
| 1
| null | 2019-06-23T16:44:41
| 2019-06-06T06:14:32
| null |
UTF-8
|
R
| false
| false
| 764
|
r
|
pollutantmean.r
|
#R Programming Week2-Assignment 1
#main function
pollutantmean<-function(directory,pollutant,id=1:332){
#now get the single point data
PMS<-function(directory,pollutant,id){
if(id<10){
FN<-paste("00",id,".csv",sep = "")
}
else if(id<100){
FN<-paste("0",id,".csv",sep = "")
}
else{
FN<-paste(id,".csv",sep = "")
}
temp<-read.csv(file = FN)
temp2<-temp$pullutant
temp3<-temp2[!is.na(temp2)]
mean(temp3)
}
#now loop within id and calculate the meanvalue
pollutant<-vector(length = length(id))
t<-1
for(i in id){
pollutant[t]<-PMS(directory,pollutant,i)
t+1
}
mean(pollutant)
}
|
76fa195e8667d0d62b72160acbf91272dcfddaa8
|
d7fe441cd0c9732484f12ef307d3ea8a6f6c78f9
|
/Fit_Vecchia_Model.R
|
1a4f2fc6b44011d2333beb001fca3ce6c658d98f
|
[] |
no_license
|
SebastienCoube/Palourdes
|
7baf940aadac957dcac388d119b63ffcbe8e5df9
|
3873893eae5143de240287b905acc7eee5312183
|
refs/heads/master
| 2020-12-26T07:55:28.608120
| 2020-02-14T14:46:48
| 2020-02-14T14:46:48
| 237,439,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,618
|
r
|
Fit_Vecchia_Model.R
|
####################
# fit_vecchia_mcmc #
####################
### Desciption # ###
# Function that fits the Nearest Neighbor Gaussian Process using
# delayed acceptance
# chromatic sampling
# parallel chains in order to improve convergence diagnostic
# automatic kernel variance adjustment and log-parametrized covariance parameters such as range and scale
# estimating model : field ~ N (beta_0, covmat), observed_field ~ N(beta_0, covmat + noise_variance I), observed_field/field ~ N(0, noise_variance I)
# covmat (i, j) = GpGp's parametrization of no-nugget Matérn covariance
### Arguments ###
# benchmark is a list that gathers
# geographic locations : a 2-column matrix under the name "locs"
# the observed field at the geographic locations : a numeric vector under the name "noisy_field"
# n_chains is the number of parallel MCMC chains
# m is the number of nearest neighbors in Vecchia approximation
# convergence_break is a vector of two stopping criterion for Gelman-Rubin-Brooks test
# first criterion : multivariate, second : univariate
# if only one criterion is met, the chain breaks
# applies to high-level parameters (covariance, intercept)
# pc_prior_scale, pc_prior_range : PC prior parameters
# see for example Fuglstad and al. https://arxiv.org/pdf/1503.00256.pdf
# if NULL, default guesses are picked from data
# n_cores : number of cores used to compute parallel chains
# if NULL : set equal to n_chains
# n_join is the number of iterations after which Gelman-Rubin-Brooks diagnostic is computed
# field_thinning is the proportion of field samples that are kept, it can save a lot of RAM...
# please keep it a divisor of n_join
# burn_in : the proportion of observations discarded to compute Gelman-Rubin-Brooks diagnostic
# n_delayed_acceptance : the number of observations that are used to do delayed acceptance
# if NULL it is set to the fifth of the number of observations, according to our experiments
# if you really want to touch it, keep it between m+1 and the number of observations...
### Outputs ###
# A list containing 3 objects
# chains : the result of the MCMC run. It contains n_chains lists, each corresponding to one chain. Each sub-list has the following bjects :
# iteration is a 2-colums matrix that records the iteration at the end of each chains join and the associated CPU time
# transition_kernel_sd is a list that stocks the (current) automatically-tuned transition kernels standard deviations
# params is a list that stocks the (current) parameters of the model, including covariance parameters, the value of the sampled field, etc
# records is a list that stocks the recorded parameters of the model, including covariance parameters, the value of the sampled field, etc. In terms of RAM, those are the biggest bit !
# estimates is the model's answer. It contains :
# Information about the marginal distribution of each observation of the Gaussian field
# Information about the marginal distribution of the high-level parameters, with 2 parametrization
# INLA's parametrization
# log-parametrization of covariance parameters. In GpGp's parametrization, log(\sigma), log(\alpha), log(\sigma \tau)
fit_mcmc_Vecchia = function(benchmark, n_iterations = 40000, n_chains = 3, m = 10, convergence_break = c(1.1, 1.1),
pc_prior_range = NULL, pc_prior_sd = NULL,
n_cores = NULL, n_join = 800, field_thinning = .05,
burn_in = .5, n_delayed_acceptance = NULL
)
{
#######################
# SOME INITIALIZATION #
#######################
# time
t_begin = Sys.time()
# seed
set.seed(1)
# cleaning RAM
gc()
# ordering locations using max-min ordering. according to https://arxiv.org/pdf/1609.05372.pdf this works better
locs_sample = GpGp::order_maxmin(benchmark$locs)
locs = benchmark$locs[locs_sample,]
observed_field = c(benchmark$noisy_field)[locs_sample]
# just creating a variable of the umber of observations
n_obs = length(observed_field)
# burn-in proportion, when i iterations are done the (1 - burn_in) * i last iterations are used
if(is.null(n_delayed_acceptance)) n_delayed_acceptance = round(nrow(locs)/5) #number of observations used to "taste" Vecchia approximation
# Computation parameters
if(is.null(n_cores)) n_cores = n_chains #number of cores for parallel computing of chains
# PC prior parameters
if(is.null(pc_prior_range))pc_prior_range = c(30, .5)
if(is.null(pc_prior_sd))pc_prior_sd = c(sqrt(var(observed_field)/2), .1)
###############################
# Vecchia approximation setup #
###############################
# This object gathers the NNarray table used by GpGp package and related objects
vecchia_approx = list()
#extracting NNarray = nearest neighbours for Vecchia approximation
vecchia_approx$NNarray = GpGp::find_ordered_nn(locs, m)
#computations from vecchia_approx$NNarray in order to create sparse Cholesky using Matrix::sparseMatrix
#non_NA indices from vecchia_approx$NNarray
vecchia_approx$NNarray_non_NA = !is.na(vecchia_approx$NNarray)
#column idx of the uncompressed sparse Cholesky factor
vecchia_approx$sparse_chol_column_idx = vecchia_approx$NNarray[vecchia_approx$NNarray_non_NA]
#row idx of the uncompressed sparse Cholesky factor
vecchia_approx$sparse_chol_row_idx = row(vecchia_approx$NNarray)[vecchia_approx$NNarray_non_NA]
#color the graph iduced by Vecchia approx
vecchia_approx$coloring = naive_greedy_coloring(vecchia_approx$NNarray)$cols
#########################
# Parallel chains setup #
#########################
chains = list()
# creating sub-list : each sub-list is a chain
for(i in seq(n_chains))
{
chains[[paste("chain", i, sep = "_")]] = list()
}
# for each chain, creating sub-lists in order to stock all the stuff that is related to one chain, including :
# iteration is a 2-colums matrix that records the iteration at the end of each chains join and the associated CPU time
# transition_kernel_sd is a list that stocks the (current) automatically-tuned transition kernels standard deviations
# params is a list that stocks the (current) parameters of the model, including covariance parameters, the value of the sampled field, etc
# records is a list that stocks the recorded parameters of the model, including covariance parameters, the value of the sampled field, etc. In terms of RAM, those are the biggest bit !
for(i in seq(n_chains))
{
chains[[i]]$iterations =c(0, 0)
names(chains[[i]]$iterations) = c("iteration", "time")
# Starting points for transition kernels, will be adaptatively tuned
chains[[i]]$idx = i
chains[[i]]$transition_kernel_sd = list()
chains[[i]]$transition_kernel_sd$covariance_params = .02
chains[[i]]$transition_kernel_sd$log_noise_variance = .02
chains[[i]]$transition_kernel_sd$beta_0 = .05
#starting points for covariance parameters
chains[[i]]$params$beta_0 = mean(observed_field)+rnorm(1, 0, sqrt(var(observed_field)/sqrt(n_obs)))
chains[[i]]$params$log_scale = sample(log(var(observed_field))-log(seq(1, 50, 1)), 1)
chains[[i]]$params$log_noise_variance = sample(log(var(observed_field))-log(seq(1, 50, 1)), 1)
chains[[i]]$params$log_range = sample(log(max(dist(locs[1:100,])))-log(seq(10, 100, 10)), 1)
#the field will be smoothed in order to match the randomized covariance parameters
chains[[i]]$params$field = observed_field
# storing chain results
chains[[i]]$records$params$beta_0 = rep(0, n_iterations)
chains[[i]]$records$params$log_scale = rep(0, n_iterations)
chains[[i]]$records$params$log_noise_variance = rep(0, n_iterations)
chains[[i]]$records$params$log_range = rep(0, n_iterations)
#matrix instead of vector since there is n field parameters = better thin the field
chains[[i]]$records$params$field = matrix(0, nrow = round(n_iterations*field_thinning), ncol = n_obs)
}
#######################################
# Field filtering for starting points #
#######################################
# This part filters the field value according to the randomized higher level parameters. It makes the starting field value consistent with starting parameters value
mcmc_update = parallel::mclapply(mc.cores = n_chains, X = lapply(chains, function(chain)list(idx = chain$idx, params = chain$params, transition_kernel_sd = chain$transition_kernel_sd)), function(chain)
{
set.seed(chain$idx)
# Initializing compressed inverse Cholesky using GpGp package. This form is mostly used in parameter updating
compressed_sparse_chol = GpGp::vecchia_Linv(c(1, exp(chain$params$log_range), 1, 0), "matern_isotropic", locs, vecchia_approx$NNarray)
# From compressed inverse Cholesky, the diagonal of the approximated precision matrix is computed. It is used in field updating
sparse_precision_diag = (compressed_sparse_chol[vecchia_approx$NNarray_non_NA]^2)%*%Matrix::sparseMatrix(i = seq(length(vecchia_approx$sparse_chol_column_idx)), j = vecchia_approx$sparse_chol_column_idx, x = rep(1, length(vecchia_approx$sparse_chol_row_idx)))
# Uncompressed form of the inverse Cholesky. This form is mostly used in field updating
sparse_chol = Matrix::sparseMatrix(i = vecchia_approx$sparse_chol_row_idx, j = vecchia_approx$sparse_chol_column_idx, x = compressed_sparse_chol[vecchia_approx$NNarray_non_NA])
field = chain$params$field
for(i in seq(600))
{
for (color in unique(vecchia_approx$coloring))
{
color_idx = which(vecchia_approx$coloring == color)
#see Rue and Held's manual about GMRF : conditional distribution
gmrf_cond_precision = sparse_precision_diag[color_idx]/exp(chain$params$log_scale)+1/exp(chain$params$log_noise_variance)
gmrf_cond_mean = chain$params$beta_0 - (gmrf_cond_precision^(-1))*
(as.vector(Matrix::t(sparse_chol[,color_idx])%*%(sparse_chol[,-color_idx]%*%(field[-color_idx]-chain$params$beta_0)))/exp(chain$params$log_scale)
-(1/exp(chain$params$log_noise_variance))*(observed_field[color_idx]-chain$params$beta_0))
field[color_idx] = rnorm(length(color_idx), gmrf_cond_mean, 1/sqrt(gmrf_cond_precision))
}
}
return(field)
})
# replace field values for chains
for(i in seq(n_chains))
{
chains[[i]]$params$field = mcmc_update[[i]]
}
# End of initializing
for(i in seq(n_chains) ) chains[[i]]$iterations[2] = Sys.time()- t_begin
print(paste("Setup done,", as.numeric(Sys.time()- t_begin, units = "secs"), "s elapsed" ))
iter = 1
#################
# MCMC SAMPLING #
#################
while(iter<n_iterations)
{
print(iter)
# determine how many iterations will go on
n_iterations_update = min(iter + n_join, n_iterations+1)-iter
# export informations on number of update to the chains
#update chains
mcmc_update = parallel::mclapply(mc.cores = n_chains, X = lapply(chains, function(chain)list(idx = chain$idx, params = chain$params, transition_kernel_sd = chain$transition_kernel_sd)), function(chain)
{
####################
# Initialize stuff #
####################
# set seed
set.seed(100*iter+chain$idx)
# copy iteration variable
iter_begin = iter
##################
# Vecchia factor #
##################
# Initializing compressed inverse Cholesky using GpGp package. This form is mostly used in parameter updating
compressed_sparse_chol = GpGp::vecchia_Linv(c(1, exp(chain$params$log_range), 1, 0), "matern_isotropic", locs, vecchia_approx$NNarray)
# From compressed inverse Cholesky, the diagonal of the approximated precision matrix is computed. It is used in field updating
sparse_precision_diag = (compressed_sparse_chol[vecchia_approx$NNarray_non_NA]^2)%*%Matrix::sparseMatrix(i = seq(length(vecchia_approx$sparse_chol_column_idx)), j = vecchia_approx$sparse_chol_column_idx, x = rep(1, length(vecchia_approx$sparse_chol_row_idx)))
# Uncompressed form of the inverse Cholesky. This form is mostly used in field updating
sparse_chol = Matrix::sparseMatrix(i = vecchia_approx$sparse_chol_row_idx, j = vecchia_approx$sparse_chol_column_idx, x = compressed_sparse_chol[vecchia_approx$NNarray_non_NA])
###################
# Storing results #
###################
# this part re-creates a small portion of the $records objects of each chain. It fills it with chain states during the run, and then updates each chain with the new values
records = list()
# storing chain results
records$params$beta_0 = rep(0, n_iterations_update)
records$params$log_scale = rep(0, n_iterations_update)
records$params$log_noise_variance = rep(0, n_iterations_update)
records$params$log_range = rep(0, n_iterations_update)
#matrix instead of vector since there is n field parameters = better thin the field
records$params$field = matrix(0, nrow = sum(round(seq(iter_begin, iter_begin+n_iterations_update-1)*field_thinning) == (seq(iter_begin, iter_begin+n_iterations_update-1)*field_thinning)), ncol = n_obs)
# acceptance results, used to tune proposal kernels
records$acceptance$covariance_parameters = matrix(0, n_iterations_update, 2)
records$acceptance$log_noise_variance = rep(0, n_iterations_update)
records$acceptance$beta_0 = rep(0, n_iterations_update)
#################
# Gibbs sampler #
#################
# The Gibbs sampler is run for a given number of iterations
for(iter in seq(1, n_iterations_update))
{
###########################################
# covariance parameters : scale and range #
###########################################
#scale and range are block updated
# adaptative random walk kernel if iteration is smaller than 3000
if((iter_begin + iter <5000) & ((iter_begin + iter) / 200 == ((iter_begin+iter) %/% 200)))
{
if(mean(records$acceptance$covariance_parameters[seq(iter-199, iter), 2]) < .2) chain$transition_kernel_sd$covariance_params = chain$transition_kernel_sd$covariance_params * .8
if(mean(records$acceptance$covariance_parameters[seq(iter-199, iter), 2]) > .3) chain$transition_kernel_sd$covariance_params = chain$transition_kernel_sd$covariance_params / .8
}
# proposing new values
innovation = rnorm(2, 0, chain$transition_kernel_sd$covariance_params)
new_log_range= chain$params$log_range+innovation[1]
new_log_scale= chain$params$log_scale+innovation[2]
#delayed acceptance
# ll(obs[1:nobs]/theta) = ll(obs[1:n_delayed_acceptance]/ theta) + ll(obs[n_delayed_acceptance:nobs]/obs[1:n_delayed_acceptance], theta)
#step 1 : ll(obs[1:n_delayed_acceptance]/ theta)
new_compressed_sparse_chol = GpGp::vecchia_Linv(c(1, exp(new_log_range), 1, 0), "matern_isotropic", locs[1:n_delayed_acceptance,], vecchia_approx$NNarray[1:n_delayed_acceptance,])
ll_ratio = ll_compressed_sparse_chol(Linv= new_compressed_sparse_chol, log_scale = new_log_scale, field = chain$params$field[1:n_delayed_acceptance] - chain$params$beta_0, NNarray = vecchia_approx$NNarray[1:n_delayed_acceptance,]) -
ll_compressed_sparse_chol(Linv= compressed_sparse_chol[1:n_delayed_acceptance,], log_scale = chain$params$log_scale, field = chain$params$field[1:n_delayed_acceptance] - chain$params$beta_0, NNarray = vecchia_approx$NNarray[1:n_delayed_acceptance,])
#step 2 : ll(obs[n_delayed_acceptance+1:nobs]/obs[1:n_delayed_acceptance], theta)
pc_prior_ratio = (new_log_range-chain$params$log_range)*(-ncol(locs)/2-1) +
log(pc_prior_range[2])*pc_prior_range[1]^(ncol(locs)/2)*sqrt(8)*(exp(new_log_range)^(-ncol(locs)/2)-exp(chain$params$log_range)^(-ncol(locs)/2)) +
log(pc_prior_sd[2])/pc_prior_sd[1]*(exp(new_log_scale/2)-exp(chain$params$log_scale/2))
if(pc_prior_ratio+ll_ratio>log(runif(1)))
{
records$acceptance$covariance_parameters[iter,1] = 1
#ll ratio calculated by : ll(obs[n_delayed_acceptance:nobs]/obs[1:n_delayed_acceptance], theta) = ll(obs[1:nobs]/theta) - ll(obs[1:n_delayed_acceptance]/ theta)
new_compressed_sparse_chol = GpGp::vecchia_Linv(c(1, exp(new_log_range), 1, 0), "matern_isotropic", locs, vecchia_approx$NNarray)
ll_ratio =
# ll with new parameters
ll_compressed_sparse_chol(Linv= new_compressed_sparse_chol, log_scale = new_log_scale, field = chain$params$field - chain$params$beta_0, NNarray = vecchia_approx$NNarray)-
# ll with old parameters
ll_compressed_sparse_chol(Linv= compressed_sparse_chol, log_scale = chain$params$log_scale, field = chain$params$field - chain$params$beta_0, NNarray = vecchia_approx$NNarray)-
# ll in pre acceptance
ll_ratio
#final acceptance
if(ll_ratio>log(runif(1)))
{
records$acceptance$covariance_parameters[iter,2] = 1
#parameter updating
chain$params$log_range = new_log_range
chain$params$log_scale = new_log_scale
#updating Vecchia cholesky
compressed_sparse_chol = new_compressed_sparse_chol
#Used in field updating
sparse_chol = Matrix::sparseMatrix(i = vecchia_approx$sparse_chol_row_idx, j = vecchia_approx$sparse_chol_column_idx, x = compressed_sparse_chol[vecchia_approx$NNarray_non_NA])
sparse_precision_diag = (compressed_sparse_chol[vecchia_approx$NNarray_non_NA]^2)%*%Matrix::sparseMatrix(i = seq(length(vecchia_approx$sparse_chol_column_idx)), j = vecchia_approx$sparse_chol_column_idx, x = rep(1, length(vecchia_approx$sparse_chol_row_idx)))
}
}
##############
# Field mean #
##############
# adaptative random walk kernel
if((iter_begin + iter <5000) & ((iter_begin + iter) / 200 == ((iter_begin+iter) %/% 200)))
{
if(mean(records$acceptance$beta_0[seq(iter-199, iter)]) < .2) chain$transition_kernel_sd$beta_0 = chain$transition_kernel_sd$beta_0 * .8
if(mean(records$acceptance$beta_0[seq(iter-199, iter)]) > .3) chain$transition_kernel_sd$beta_0 = chain$transition_kernel_sd$beta_0 / .8
}
# proposing a new parameter
innovation = rnorm(1, 0, chain$transition_kernel_sd$beta_0)
ll_ratio =
# ll with new parameters
ll_compressed_sparse_chol(Linv= compressed_sparse_chol, log_scale = chain$params$log_scale, field = chain$params$field - chain$params$beta_0 - innovation, NNarray = vecchia_approx$NNarray)-
# ll with old parameters
ll_compressed_sparse_chol(Linv= compressed_sparse_chol, log_scale = chain$params$log_scale, field = chain$params$field - chain$params$beta_0 , NNarray = vecchia_approx$NNarray)
# acceptance step
if(ll_ratio >log(runif(1)))
{
records$acceptance$beta_0[iter] = 1
chain$params$beta_0 = chain$params$beta_0 + innovation
}
##################
# Noise variance #
##################
# adaptative random walk kernel
if((iter_begin + iter <5000) & ((iter_begin + iter) / 200 == ((iter_begin+iter) %/% 200)))
{
if(mean(records$acceptance$log_noise_variance[seq(iter-199, iter)]) < .2) chain$transition_kernel_sd$log_noise_variance = chain$transition_kernel_sd$log_noise_variance * .8
if(mean(records$acceptance$log_noise_variance[seq(iter-199, iter)]) > .3) chain$transition_kernel_sd$log_noise_variance = chain$transition_kernel_sd$log_noise_variance / .8
}
# proposing a new parameter
innovation = rnorm(1, 0, chain$transition_kernel_sd$log_noise_variance)
# acceptance step
#ll ratio : observed signal ~ n_obs(mu = simulated signal, sigma = noise variance * In)
if(sum(dnorm(x = observed_field, mean = chain$params$field, sd = exp(0.5*(chain$params$log_noise_variance + innovation)), log = T)-
dnorm(x = observed_field, mean = chain$params$field, sd = exp(0.5*chain$params$log_noise_variance) , log = T))
>log(runif(1)))
{
records$acceptance$log_noise_variance[iter] = 1
chain$params$log_noise_variance = chain$params$log_noise_variance+innovation
}
#########
# Field #
#########
for (color in unique(vecchia_approx$coloring))
{
color_idx = which(vecchia_approx$coloring == color)
#see Rue and Held's manual about GMRF : conditional distribution
gmrf_cond_precision = sparse_precision_diag[color_idx]/exp(chain$params$log_scale)+1/exp(chain$params$log_noise_variance)
gmrf_cond_mean = chain$params$beta_0 - (gmrf_cond_precision^(-1))*
(as.vector(Matrix::t(sparse_chol[,color_idx])%*%(sparse_chol[,-color_idx]%*%(chain$params$field[-color_idx] - chain$params$beta_0)))/exp(chain$params$log_scale)
-(1/exp(chain$params$log_noise_variance))*(observed_field[color_idx]-chain$params$beta_0))
chain$params$field[color_idx] = rnorm(length(color_idx), gmrf_cond_mean, 1/sqrt(gmrf_cond_precision))
}
######################
# Saving chain state #
######################
records$params$log_scale[iter] = chain$params$log_scale
records$params$beta_0[iter] = chain$params$beta_0
records$params$log_noise_variance[iter] = chain$params$log_noise_variance
records$params$log_range[iter] = chain$params$log_range
if(round((iter_begin+iter)*field_thinning) == ((iter_begin+iter)*field_thinning)) records$params$field[match(0, records$params$field[,1])[1],] = chain$params$field - chain$params$beta_0
}
chain$records = records
return(chain)
})
#################################
# End of 1 Gibbs sampler update #
#################################
###################
# Updating chains #
###################
for(i in seq(n_chains))
{
chains[[i]]$transition_kernel_sd = mcmc_update[[i]]$transition_kernel_sd
chains[[i]]$params = mcmc_update[[i]]$params
for(a in ls(chains[[i]]$records))
{
for(b in ls(chains[[i]][[a]]))
{
if(is.vector(chains[[i]][["records"]][[a]][[b]]))
{
chains[[i]][["records"]][[a]][[b]][seq(iter, iter+n_iterations_update-1)] = mcmc_update[[i]][["records"]][[a]][[b]]
}
if(is.matrix(chains[[i]][["records"]][[a]][[b]])&(b!="field"))
{
chains[[i]][["records"]][[a]][[b]][seq(iter, iter+n_iterations_update-1),] = mcmc_update[[i]][["records"]][[a]][[b]]
}
if("field" == b)
{
field_idx = ((seq(n_iterations_update) + iter)*field_thinning)[sapply(seq(n_iterations_update) + iter, function(i)(round(i*field_thinning) ==( i*field_thinning)))]
chains[[i]][["records"]][[a]][[b]][field_idx,] = mcmc_update[[i]][["records"]][[a]][[b]]
}
}
}
}
iter = iter+n_iterations_update
for(i in seq(n_chains) ) chains[[i]][["iterations"]] = rbind(chains[[i]][["iterations"]], c(iter, as.numeric(Sys.time()-t_begin, units = "secs")))
##################################
# Gelman-Brooks-Rubin diagnostic #
##################################
# computing within and between variance matrices for higher level parameters
within_variance = lapply(chains, function(chain)
var(sapply(c("log_range", "log_scale", "log_noise_variance", "beta_0"), function(name)chain$records$params[[name]])[seq(burn_in*(iter-1), iter-1),])
)
within_variance = Reduce("+", within_variance)/n_chains
means = sapply(chains, function(chain)
apply(sapply(c("log_range", "log_scale", "log_noise_variance", "beta_0"), function(name)chain$records$params[[name]])[seq(burn_in*(iter-1), iter-1),], 2, mean)
)
between_variance = var(t(means))
# multivariate diagnostic
MPSRF = (iter - 2) / (iter - 1) + (n_chains + 1) / n_chains * svd(solve(within_variance)%*%between_variance)$d[1]
names(MPSRF) = "Multivariate"
# univariate diagnostics
Individual_PSRF = (iter - 2) / (iter - 1) + (n_chains + 1) / n_chains * 1/diag(within_variance)*diag(between_variance)
# show diagnostics
print ("Gelan-Rubin-Brooks diag : ")
print(c(MPSRF, Individual_PSRF))
#########################
# EFFECTIVE SAMPLE SIZE #
#########################
# prints Effective Sample Size for each high-level parameter
ESS = sapply(c("log_range", "log_scale", "log_noise_variance", "beta_0"), function(name)sapply(chains, function(chain)coda::effectiveSize(
chain$records$params[[name]][seq(burn_in*(iter-1), iter-1)])))
ESS = rbind(ESS, apply(ESS, 2, sum))
row.names(ESS) = c(sapply(seq(n_chains), function(i) paste("chain", i, sep = "_")), "total")
print("ESS")
print(ESS)
#########
# PLOTS #
#########
# plots chains of each high-level parameter. There will be as many plots as high-level parameters, and as many curves in each plot as chains.
# plot starts at burn-in
par(mfrow = c(4, 1))
# loop over parameters
for(name in c("log_range", "log_scale", "log_noise_variance", "beta_0"))
{
to_be_plotted = lapply(chains, function(chain)chain$records$params[[name]][seq(burn_in*(iter-1), iter-1)])
plot(seq(burn_in*(iter-1), iter-1), seq(burn_in*(iter-1), iter-1), type = "n", xlab = "iteration", ylab = name, main = name,
ylim = c(min(unlist(to_be_plotted)), max(unlist(to_be_plotted))))
col = 1
# loop over chains
for(x in to_be_plotted)
{
lines(seq(burn_in*(iter-1), iter-1), x, col = col)
col = col+1
}
}
#############
# ESTIMATES #
#############
# creating an empty list to store the estimates
estimates = list()
# field
estimates$field = do.call(cbind, list(
"mean" = apply(FUN = mean, MARGIN = 2,
do.call(rbind,
lapply(chains, function(chain) chain$records$params$field[seq(field_thinning*burn_in*(iter-1), field_thinning*(iter-1)),])
)),
"q025" = apply(FUN = function(x)quantile(x, c(0.025)), MARGIN = 2,
do.call(rbind,
lapply(chains, function(chain) chain$records$params$field[seq(field_thinning*burn_in*(iter-1), field_thinning*(iter-1)),])
)),
"q975" = apply(FUN = function(x)quantile(x, c(0.975)), MARGIN = 2,
do.call(rbind,
lapply(chains, function(chain) chain$records$params$field[seq(field_thinning*burn_in*(iter-1), field_thinning*(iter-1)),])
)),
"sd" = apply(FUN = sd, MARGIN = 2,
do.call(rbind,
lapply(chains, function(chain) chain$records$params$field[seq(field_thinning*burn_in*(iter-1), field_thinning*(iter-1)),])
))))
#parameters
#log-params
range_sample = do.call(c, lapply(chains, function(chain) chain$records$params[["log_range"]][seq(burn_in*(iter-1), (iter-1))]))
scale_sample = do.call(c, lapply(chains, function(chain) chain$records$params[["log_scale"]][seq(burn_in*(iter-1), (iter-1))]))
noise_variance_sample = do.call(c, lapply(chains, function(chain) chain$records$params[["log_noise_variance"]][seq(burn_in*(iter-1), (iter-1))]))
beta_0_sample = do.call(c, lapply(chains, function(chain) chain$records$params[["beta_0"]][seq(burn_in*(iter-1), (iter-1))]))
estimates$log_params = t(sapply(list(range_sample, scale_sample, noise_variance_sample, beta_0_sample), function(x)
c(mean(x), quantile(x, c(.025, .975)), sd(x))))
colnames(estimates$log_params) =c("mean", "q025", "q975", "sd")
rownames(estimates$log_params) =c("log_range", "log_scale", "log_noise_variance", "beta_0")
#INLA parametrization
estimates$inla_params = t(sapply(list(exp(range_sample)*sqrt(8), exp(scale_sample/2), exp(-noise_variance_sample), beta_0_sample), function(x)
c(mean(x), quantile(x, c(.025, .975)), sd(x))))
colnames(estimates$inla_params) =c("mean", "q025", "q975", "sd")
rownames(estimates$inla_params) =c("Range_for_spat", "Stdev_for_spat", "Precision_for_he_Gaussian_observations", "beta_0")
#print the estimates
print("Estimates")
print(estimates$log_params)
cat("\n")
cat("\n")
#################
# Saving chains #
#################
# just saves the chains at each 10 joins
if(iter / n_join == 10) saveRDS(chains, "Veccchia_run_chains.RDS")
if((MPSRF<convergence_break[1])|all(Individual_PSRF<convergence_break[2]))break
}
return(list("chains" = chains, "estimates" = estimates, "locs_sample" = locs_sample))
}
###############################
# Vecchia likelihood function #
###############################
#Gaussian likelihood from Vecchia linv, for a 0-mean GP
ll_compressed_sparse_chol = function(Linv, field, NNarray, log_scale)
{
chol_field = GpGp::Linv_mult(Linv = Linv, z = field, NNarray)
sum(log(Linv[NNarray[,1],1]))-nrow(NNarray)*0.5*log_scale-0.5*sum(chol_field^2)/exp(log_scale)
}
|
7a8316f1c4c26ad9e5beb3c7d8ebb8a329ed4316
|
fd21442b4e2a67a6b621f4bf0f33591f0f546512
|
/violations.R
|
4d54a716ab234e7f29b527a889471ff6337770a8
|
[] |
no_license
|
bethsecor/code-for-america
|
b4f54acc735f0e8eda5361563a0ba9bbd54e2b7f
|
ea166a5ef53c0b15d265e0fb85aed05420fc524f
|
refs/heads/master
| 2021-01-19T04:29:52.648365
| 2016-07-07T00:03:46
| 2016-07-07T00:03:46
| 62,761,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
violations.R
|
violations.data <- read.csv("/Users/elizabethsecor/Documents/code-for-america/Violations-2012.csv", header=TRUE)
violations.data$violation_date <- as.Date(violations.data$violation_date)
results <- matrix(NA, 9, 4)
colnames(results) <- c("Category", "Number of Violations", "Earliest Violation Date", "Latest Violation Date")
category.summary <- table(violations.data$violation_category)
results[,1] <- names(category.summary)
results[,2] <- category.summary
minimums <- c(by(violations.data$violation_date, violations.data$violation_category, min))
results[,3] <- as.character(as.Date(as.numeric(minimums), origin = "1970-01-01"))
maximums <- by(violations.data$violation_date, violations.data$violation_category, max)
results[,4] <- as.character(as.Date(as.numeric(maximums), origin = "1970-01-01"))
results
write.csv(results, "/Users/elizabethsecor/Documents/code-for-america/Violation-Statistics.csv", quote=FALSE, row.names=FALSE)
|
bf4cf576c26a18eb2d16c543c92f9b858256eace
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/man/mainClass.Rd
|
d3afa41210badf370f2f2beb490d91973f6a5fcc
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| true
| 442
|
rd
|
mainClass.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mainClass.R
\name{mainClass}
\alias{mainClass}
\title{Main Class of an Object}
\usage{
mainClass(x)
}
\arguments{
\item{x}{any R object}
}
\value{
name of main class of \code{x} (vector of character of length one)
}
\description{
Returns the first element of what class(x) returns
}
\examples{
class(as.POSIXct("2022-06-02"))
mainClass(as.POSIXct("2022-06-02"))
}
|
95d5013a8a140536efd3238ecd37d66ddff81628
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkToggleButtonSetMode.Rd
|
4762ac2574b1922ee8f481b4166cd499d7d5357f
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 899
|
rd
|
gtkToggleButtonSetMode.Rd
|
\alias{gtkToggleButtonSetMode}
\name{gtkToggleButtonSetMode}
\title{gtkToggleButtonSetMode}
\description{Sets whether the button is displayed as a separate indicator and label.
You can call this function on a checkbutton or a radiobutton with
\code{draw.indicator} = \code{FALSE} to make the button look like a normal button}
\usage{gtkToggleButtonSetMode(object, draw.indicator)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkToggleButton}}}
\item{\verb{draw.indicator}}{if \code{TRUE}, draw the button as a separate indicator
and label; if \code{FALSE}, draw the button like a normal button}
}
\details{This function only affects instances of classes like \code{\link{GtkCheckButton}}
and \code{\link{GtkRadioButton}} that derive from \code{\link{GtkToggleButton}},
not instances of \code{\link{GtkToggleButton}} itself.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
037e928f01a930974140acc0c439a3c254262ab3
|
7c46e33aba1307a27da5fd1c37356d6849ed0357
|
/read_data.R
|
7c094db911667eb1e5d923d7093022c4f022def0
|
[] |
no_license
|
wmeler/ExData_Plotting1
|
6e53c177db9928d7921c61f35e239aa4f115c681
|
e2560d2c02e7b779ad3f7c252313d436181afa0b
|
refs/heads/master
| 2021-01-15T09:09:25.370644
| 2014-11-09T13:52:34
| 2014-11-09T13:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
read_data.R
|
library(dplyr)
epc <- read.csv2("../household_power_consumption.txt", na.strings="?", stringsAsFactors=FALSE)
data <- tbl_df(epc) %>%
filter(Date=="1/2/2007" | Date=="2/2/2007") %>%
mutate(datetime = as.POSIXct(strptime(paste(Date, Time, sep=" "), format="%d/%m/%Y %H:%M:%S")))
rm("epc")
|
4e1bd2b055f61337c580061222a89fb45435eb49
|
d063f29f26575dcbc658c79fc8d1fd264d81b2f4
|
/clustering/test.R
|
478a20d2d4e58b76891b36ed495ed6c864369add
|
[
"MIT"
] |
permissive
|
jiahao95/hypertension_clustering
|
81a2f479f0d71b99e85cfdec450b4462adc21ea6
|
db26ebb5e763681dfad910b42a441bffd99868f7
|
refs/heads/main
| 2023-04-01T22:23:56.592164
| 2021-04-17T07:53:08
| 2021-04-17T07:53:08
| 352,771,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,401
|
r
|
test.R
|
# setRepositories()
# check packages installed installed.packages()[1:8,]
# get library path .libPaths()
# append new libarry to the existing one .libPaths( c( .libPaths(), "/sc/arion/projects/rg_HPIMS/user/jiahao01/anaconda3/envs/r_env/lib/R/library") )
# Sys.getenv()[ grep("LIB|PATH", names(Sys.getenv())) ]
# R_LIBS_USER = '/sc/arion/projects/rg_HPIMS/user/jiahao01/anaconda3/envs/r_env/lib/R/library'
# install.packages('tidyverse')
# install.packages('data.table')
# install.packages('dplyr')
#install.packages('gplots')
# install.packages('corrplot')
library(tidyverse)
library(data.table)
library(dplyr)
library(gplots)
library(graphics)
library(chisq.posthoc.test)
setwd()
getwd()
dat <- fread('disease_cluster_table_w_etn.csv', stringsAsFactors = F)
expected_counts <- c(Cluster_1 = 205,
Cluster_2 = 358,
Cluster_3 = 1690,
Cluster_4 = 881,
Cluster_5 = 1097,
CLuster_6 = 1566)
total <- sum(expected_counts)
code_list <- unique(dat$phecode)
getwd()
sink("diagnosis_chisq_test.txt")
cat('phecode','n' ,"pct" ,
'Cluster_1', 'Cluster_2', 'Cluster_3', 'Cluster_4', 'Cluster_5', 'CLuster_6',
'Cluster_1_pct', 'Cluster_2_pct', 'Cluster_3_pct', 'Cluster_4_pct', 'Cluster_5_pct', 'Cluster_6_pct',
"N_Non_Zero_Clusters", 'p','disease_list' ,'\n', sep='\t')
for (i in code_list) {
sub <- dat %>% filter(phecode == i)
n <- nrow(sub)
tmp <- as.data.frame(table(sub$labels))
observed_counts <- c(Cluster_1 = if (length(tmp$Freq[tmp$Var1 == 0])>0) {tmp$Freq[tmp$Var1 == 0]} else {0},
Cluster_2 = if (length(tmp$Freq[tmp$Var1 == 1])>0) {tmp$Freq[tmp$Var1 == 1]} else {0},
Cluster_3 = if (length(tmp$Freq[tmp$Var1 == 2])>0) {tmp$Freq[tmp$Var1 == 2]} else {0},
Cluster_4 = if (length(tmp$Freq[tmp$Var1 == 3])>0) {tmp$Freq[tmp$Var1 == 3]} else {0},
Cluster_5 = if (length(tmp$Freq[tmp$Var1 == 4])>0) {tmp$Freq[tmp$Var1 == 4]} else {0},
Cluster_6 = if (length(tmp$Freq[tmp$Var1 == 5])>0) {tmp$Freq[tmp$Var1 == 5]} else {0}
)
N_Non_Zero_Clusters <- which(observed_counts != 0) %>% length()
if (N_Non_Zero_Clusters>1) {
erg <- chisq.test(x = observed_counts, p = expected_counts, rescale.p = TRUE)
p <- erg$p.value
} else {
p <- NA
}
cat(phecode=i, n=n, pct= scales::percent(n/total), observed_counts, scales::percent(observed_counts/expected_counts), N_Non_Zero_Clusters, p, paste(sub$dx_name %>% unique(), collapse = ';'), '\n', sep='\t')
rm(tmp, erg, p, sub, n)
}
sink()
# import ethincy and gender table
eth <- read.csv('gender_race_table.csv', header=T, stringsAsFactors=F)
eth
colnames(eth)
str(eth)
# filtering
rownames(eth) <- eth$X
filtered_eth <- eth[6:11,2:7]
filtered_eth
# convert data to table
dt <- as.table(as.matrix(filtered_eth))
# ballon plot
png('ballonplot.png')
balloonplot(t(dt), main ="ethnic groups", xlab ="", ylab="",
label = FALSE, show.margins = FALSE)
dev.off()
# mosaic plot
png('mosaicplot.png')
mosaicplot(dt, shade = TRUE, las=2,
main = "ethnic groups")
dev.off()
# compute chi square for ethnic group
chisq <- chisq.test(filtered_eth)
chisq
# check observed counts
chisq$observed
# check expected counts
round(chisq$expected, 2)
# check residuals
round(chisq$residuals,3)
# visualise
library(corrplot)
png('residualplot.png')
corrplot(chisq$residuals, is.cor = FALSE)
dev.off()
# Contibution in percentage (%)
contrib <- 100*chisq$residuals^2/chisq$statistic
round(contrib, 3)
# visualize
png('residual_in_pct.png')
corrplot(contrib, is.cor = FALSE)
dev.off()
chisq.posthoc.test(filtered_eth, method = "bonferroni")
# import medication
med <- fread('medication_cluster_table_w_etn.csv')
med$meds_upper = toupper(med$event)
med_tbl = table(med$event, med$labels)
colnames(med_tbl) <- c('cluster_1', 'cluster_2', 'cluster_3', 'cluster_4', 'cluster_5', 'cluster_6')
med_list <- unique(med$meds_upper)
sink("meds_test.txt")
cat('medications','n' ,"pct" ,
'Cluster_1', 'Cluster_2', 'Cluster_3', 'Cluster_4', 'Cluster_5', 'Cluster_6',
'Cluster_1_pct', 'Cluster_2_pct', 'Cluster_3_pct', 'Cluster_4_pct', 'Cluster_5_pct', 'Cluster_6_pct',
"N_Non_Zero_Clusters", 'p','description' ,'\n', sep='\t')
for (i in med_list) {
sub <- med %>% filter(meds_upper == i)
n <- nrow(sub)
tmp <- as.data.frame(table(sub$labels))
observed_counts <- c(Cluster_1 = if (length(tmp$Freq[tmp$Var1 == 0])>0) {tmp$Freq[tmp$Var1 == 0]} else {0},
Cluster_2 = if (length(tmp$Freq[tmp$Var1 == 1])>0) {tmp$Freq[tmp$Var1 == 1]} else {0},
Cluster_3 = if (length(tmp$Freq[tmp$Var1 == 2])>0) {tmp$Freq[tmp$Var1 == 2]} else {0},
Cluster_4 = if (length(tmp$Freq[tmp$Var1 == 3])>0) {tmp$Freq[tmp$Var1 == 3]} else {0},
Cluster_5 = if (length(tmp$Freq[tmp$Var1 == 4])>0) {tmp$Freq[tmp$Var1 == 4]} else {0},
Cluster_6 = if (length(tmp$Freq[tmp$Var1 == 5])>0) {tmp$Freq[tmp$Var1 == 5]} else {0})
N_Non_Zero_Clusters <- which(observed_counts != 0) %>% length()
if (N_Non_Zero_Clusters>1) {
erg <- chisq.test(x = observed_counts, p = expected_counts, rescale.p = TRUE)
p <- erg$p.value
} else {
p <- NA
}
cat(event=i, n=n, pct= scales::percent(n/total), observed_counts, scales::percent(observed_counts/expected_counts), N_Non_Zero_Clusters, p, paste(sub$description %>% unique(), collapse = ';'), '\n', sep='\t')
rm(tmp, erg, p, sub, n)
}
sink()
#procedures
proc <- fread('procedure_table_w_etn.csv')
head(proc)
procedure_list <- unique(proc$proc_code)
length(code_list)
procedure_tab <- as.data.frame(table(proc$proc_code))
sink("procedure_test.txt")
cat('proc_code','n' ,"pct" ,
'Cluster_1', 'Cluster_2', 'Cluster_3', 'Cluster_4', 'Cluster_5', 'CLuster_6',
'Cluster_1_pct', 'Cluster_2_pct', 'Cluster_3_pct', 'Cluster_4_pct', 'Cluster_5_pct', 'Cluster_6_pct',
"N_Non_Zero_Clusters", 'p','description' ,'\n', sep='\t')
for (i in procedure_list) {
sub <- proc %>% filter(proc_code == i)
n <- nrow(sub)
tmp <- as.data.frame(table(sub$labels))
observed_counts <- c(Cluster_1 = if (length(tmp$Freq[tmp$Var1 == 0])>0) {tmp$Freq[tmp$Var1 == 0]} else {0},
Cluster_2 = if (length(tmp$Freq[tmp$Var1 == 1])>0) {tmp$Freq[tmp$Var1 == 1]} else {0},
Cluster_3 = if (length(tmp$Freq[tmp$Var1 == 2])>0) {tmp$Freq[tmp$Var1 == 2]} else {0},
Cluster_4 = if (length(tmp$Freq[tmp$Var1 == 3])>0) {tmp$Freq[tmp$Var1 == 3]} else {0},
Cluster_5 = if (length(tmp$Freq[tmp$Var1 == 4])>0) {tmp$Freq[tmp$Var1 == 4]} else {0},
Cluster_6 = if (length(tmp$Freq[tmp$Var1 == 5])>0) {tmp$Freq[tmp$Var1 == 5]} else {0}
)
N_Non_Zero_Clusters <- which(observed_counts != 0) %>% length()
if (N_Non_Zero_Clusters>1) {
erg <- chisq.test(x = observed_counts, p = expected_counts, rescale.p = TRUE)
p <- erg$p.value
} else {
p <- NA
}
cat(phecode=i, n=n, pct= scales::percent(n/total), observed_counts, scales::percent(observed_counts/expected_counts), N_Non_Zero_Clusters, p, paste(sub$proc_description %>% unique(), collapse = ';'), '\n', sep='\t')
rm(tmp, erg, p, sub, n)
}
sink()
|
97749b65c8c884f3cff53fabe659bc0190e6b8d5
|
8be29ea4c080f939a73b868c80218e0792be4a65
|
/man/mass_and_order.Rd
|
3089e5682173e6021f8bfb659a751769b3c30d97
|
[] |
no_license
|
econtijoch/FaithLabTools
|
5ebe7ea3dcadcdd6d4169a368e832e8f04d94c99
|
e315a8f285b9ebaa2d338df04771b7b7d4dc9a35
|
refs/heads/master
| 2022-07-20T08:48:56.768749
| 2020-03-04T03:05:42
| 2020-03-04T03:05:42
| 126,224,736
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,141
|
rd
|
mass_and_order.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mass_and_order.R
\name{mass_and_order}
\alias{mass_and_order}
\title{Generate sample mass and final order of tubes. Will attempt to identify BarcodeID (length 29 generated sample description) and TubeBarcodes (length 10 number found on the Matrix Tubes) in files if possible}
\usage{
mass_and_order(empty_weights, full_weights, order = NULL, plate_name = NA)
}
\arguments{
\item{empty_weights}{.txt file with barcodes and weights of empty tubes}
\item{full_weights}{.txt file with barcodes and weights of full tubes}
\item{order}{file from 96 barcode scanner that gives final order of tubes (optional. if not provided, uses the order of tubes in the full tube weight file)}
\item{plate_name}{name of plate. If not provided, will pull name from the plate scanner file (RackID)}
}
\value{
table of weights and tubes with order of samples
}
\description{
Generate sample mass and final order of tubes. Will attempt to identify BarcodeID (length 29 generated sample description) and TubeBarcodes (length 10 number found on the Matrix Tubes) in files if possible
}
|
ad81694baf9311986d4532a6a10af144e6e3c909
|
65c5d93eec90e7e6d9b761347db40ba29cc0119d
|
/run_analysis.R
|
3c0a1416d17e7dbd0e6875b53599ca150f282ac2
|
[] |
no_license
|
jorgeiglopez/tidyDataAssignment
|
2a84e958032832edfb08a254c8e0c167d7a1884b
|
4c932face8382d7681e69fba1729e2ed82571c17
|
refs/heads/master
| 2021-05-12T02:21:34.066526
| 2018-01-16T12:07:45
| 2018-01-16T12:07:45
| 117,586,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,815
|
r
|
run_analysis.R
|
## Configuration of working directory
## setwd("./coursera-wd") << put your own wd, where the files are.
## Importe the libraries that we're gonna use
library(dplyr)
library(reshape2)
## Check if we have all the files we need.
featuresExist <- file.exists("UCI HAR Dataset/features.txt")
activitiesExist <- file.exists("UCI HAR Dataset/activity_labels.txt")
testXExist <- file.exists("UCI HAR Dataset/test/X_test.txt")
testYExist <- file.exists("UCI HAR Dataset/test/y_test.txt")
testSubjectExist <- file.exists("UCI HAR Dataset/test/subject_test.txt")
trainXExist <- file.exists("UCI HAR Dataset/train/X_train.txt")
trainYExist <- file.exists("UCI HAR Dataset/train/y_train.txt")
trainSubjectExist <- file.exists("UCI HAR Dataset/train/subject_train.txt")
if(featuresExist & activitiesExist & testXExist & testYExist & testSubjectExist
& trainXExist & trainYExist & trainSubjectExist){
features <- read.table("UCI HAR Dataset/features.txt")
activities<- read.table("UCI HAR Dataset/activity_labels.txt")
testX <- read.table("UCI HAR Dataset/test/X_test.txt")
testY <- read.table("UCI HAR Dataset/test/y_test.txt")
testSubject <- read.table("UCI HAR Dataset/test/subject_test.txt")
trainX <- read.table("UCI HAR Dataset/train/X_train.txt")
trainY <- read.table("UCI HAR Dataset/train/y_train.txt")
trainSubject <- read.table("UCI HAR Dataset/train/subject_train.txt")
} else{
message("Couldn't find some files. Please unzip all the files again")
return()
}
## 1 - Set the proper column names (features) to the dataset
colnames(testX) <- features$V2
colnames(trainX) <- features$V2
## 2 We're just gonna use Mean() and Std() columns
testX1 <- testX[,grep("mean\\(|std\\(", colnames(testX))]
trainX1 <- trainX[,grep("mean\\(|std\\(", colnames(trainX))]
## 3 Merge and select the activities and subject
actTest <- select(merge (testY, activities), V2)
actTrain <- select(merge(trainY,activities), V2)
colnames(actTest) <- "Activity"
colnames(actTrain) <- "Activity"
colnames(testSubject) <- "SubjectId"
colnames(trainSubject) <- "SubjectId"
## 4 bind the columns and after bind both datasets
testComplete <- cbind(actTest, testSubject, testX1)
trainComplete <- cbind(actTrain, trainSubject, trainX1)
merged <- rbind(testComplete, trainComplete)
## 5 Average of variables grouped by activity and subject
meltValues <- melt(merged, id=c("Activity","SubjectId"), measure.vars = 3:68)
decasted <- dcast(meltValues, Activity + SubjectId ~ variable, mean)
head(decasted)
message("The tidy data set joining train and test is ready! It's called merged.")
message("Average of variables grouped by activity and subject is Ready!")
message("To check the full list, try: View(decasted)")
write.table(merged, file = paste0(getwd(),"/tidy_data5.txt"), row.names = FALSE)
|
ac51eb2a4ecd9606b8ab20e960acc3a395deed1e
|
c32c54f47c35737ea4ba3a026c81b594fd02b1cf
|
/R/naToZeroFunction.R
|
8fc4b608e06dc02d7143cafcc36a3f50b9e04fb8
|
[] |
no_license
|
quinnpertuit/rDailyFantasy
|
cd46596122d979b5c389d67b19bc354109fa0722
|
fb00d802573c855f58d5b7b4d84f96d6724a66a6
|
refs/heads/master
| 2022-10-24T06:04:42.025973
| 2020-01-11T15:12:36
| 2020-01-11T15:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
naToZeroFunction.R
|
#' NAs to 0 function
#'
#' @param x anything to replace NAs with
#'
#' @return no NA Dataframe x
#' @export
#'
#' @examples na.zero(x)
na.zero <- function (x) {
x[is.na(x)] <- 0
return(x)
}
|
6654747ae5131929f580332929d41eaa70cf298a
|
7610db967cf364d632ed688930d3db13a1e779bd
|
/Big Mart/Mega Random Forest.R
|
83aa9670683876836a26b55dcc845aae7fda71e5
|
[] |
no_license
|
Prbn/Big-mart-Sales-Practice-Problem
|
ab79f6edb05625ff5469716f013bb756c9c7a084
|
8851f5c9c3cb6620224bccc6b151d289f053b699
|
refs/heads/master
| 2021-05-14T16:18:32.728712
| 2018-01-02T13:20:02
| 2018-01-02T13:20:02
| 116,017,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,954
|
r
|
Mega Random Forest.R
|
# Mega RANDOM FOREST REGRESSION #
#===============================#
# Setting up training and test data
trainA <- Train.Results[c('Item_Outlet_Sales','Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression2')]
testA <- Test.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression2')]
traintestA <-Train.Results[c('Overall_Mean_Sales','Outlet_Mean_Sales','Item_Mean_Sales','Mlinearmodel','OptimalMlinearmodel','SVMRegression','DecisionRegression','RandomForestRegression2')]
# Feature Scaling
# ---------------
# dataset[,] <- scale(dataset[,])
# Decision tree does not require any Fearture Scaling
# As the library used for regression already has featuring scaling built in, it does not requir feature scaling
# Regression model
# ----------------
# Using the rpart package
library(randomForest)
# Setting seed
set.seed(1234)
# Fitting Random Forest Regression to the dataset
# Using the randomForest() function under the randomForest library
# creating regressor variable to store the svr
regressor <- randomForest(x = traintestA, y = trainA$Item_Outlet_Sales, ntree = 500, nodesize = 20)
# The x argument is the independent variable and is taken in the form of a data frame.
# The y argument is the dependent variable and is taken in the form of vector
# The Ntrees is the number of trees to be made
# Info about the regressor using summary() function
summary(regressor)
# Prediction
# ----------
# Predicting a new result with linear Regression
y_pred = predict(regressor,testA)
# Saving Test results
Test.Results$MegaRandomForestRegression2 <- y_pred
Train.Results$MegaRandomForestRegression2 <- predict(regressor,traintestA)
# Saving results for submission
makecsvsubmission(Test.Results,'MegaRandomForestRegression2')
|
a511108cbad6e2566e20bfddd7b7d54831629304
|
2e19633c231d7be84c6ad0115ac166740749b2f2
|
/run_analysis.R
|
64f7527c2bd7c4b2199e9c958a80c44f74ca03f0
|
[] |
no_license
|
gmcgarvin/GettingCleaningFinal
|
1f0c2297cd4c3e3e5f28eabb743c355cb2c2b53a
|
a183377771e6448122e1cb74bbfa444f455c99f5
|
refs/heads/master
| 2021-01-19T13:31:39.809869
| 2017-04-12T23:32:37
| 2017-04-12T23:32:37
| 88,097,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,491
|
r
|
run_analysis.R
|
## Gerald.McGarvin@hotmail.com
## Getting and Cleaning Data Course Project
## modify the setwd statement below as needed for your environment:
## copy this file to the setwd location.
## To run this scriptin RStudio: source("./final_assignment.R")
setwd("C:/Users/F/Documents/Personal/Interests/Training/JHU_Data_Science_Courses/Course_3_Getting_and_Cleaning_Data/assignments")
## Setup needed env:
library(dplyr)
options(digits=8)
## create directories if not present:
if(!file.exists("./data")){dir.create("./data")}
if(!file.exists("./data/HAR")){dir.create("./data/HAR")}
## Download and unzip file:
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/HAR/har.zip")
unzip(zipfile = "./data/HAR/har.zip", exdir = "./data/HAR")
## Import Column Names and Activity Labels into DFs:
columnNames <- read.table("./data/HAR/UCI HAR Dataset/features.txt", sep = " ", stringsAsFactors=FALSE)
activityLabels <- read.table("./data/HAR/UCI HAR Dataset/activity_labels.txt", sep = " ", stringsAsFactors=FALSE, col.names=c("activityid","activitylabel"))
## TEST Data - Import activity ID for each TEST data record into DF:
activityIdTest <- read.table("./data/HAR/UCI HAR Dataset/test/y_test.txt", sep = " ", col.names="activityid")
## TEST Data - Import subject ID for each TEST data record into DF:
subjectIdTest <- read.table("./data/HAR/UCI HAR Dataset/test/subject_test.txt", sep = " ", col.names="subject")
## TEST Data - Add activity label to each activity ID:
activityTest <- mutate(activityIdTest, activitylabel = activityLabels[activityIdTest$activityid,2])
## TEST Data - Combine DF of activity ID and label with the DF of subject ID:
actSubTest <- cbind(subjectIdTest, activityTest)
## TRAIN Data - Import factivity ID for each TRAIN data record into DF:
activityIdTrain <- read.table("./data/HAR/UCI HAR Dataset/train/y_train.txt", sep = " ", col.names="activityid")
## TRAIN Data - Import subject ID for each TRAIN data record into DF:
subjectIdTrain <- read.table("./data/HAR/UCI HAR Dataset/train/subject_train.txt", sep = " ", col.names="subject")
## TRAIN Data - Add activity label to each activity ID:
activityTrain <- mutate(activityIdTrain, activitylabel = activityLabels[activityIdTrain$activityid,2])
## TRAIN Data - Combine DF of activity ID and label with the DF of subject ID:
actSubTrain <- cbind(subjectIdTrain, activityTrain)
## Create vector containing the width of each column in files to be imported:
colWidths <- rep.int(16, 561)
## TEST Data - import TEST data metrics:
TestMeasures <- read.fwf("./data/HAR/UCI HAR Dataset/test/X_test.txt", widths=colWidths, blank.lines.skip=TRUE, colClasses="numeric", col.names=columnNames[,2])
## TEST Data - create a vector of mean and sdt columns and use it to isolated them in the data:
colsMeanStd <- grep(".*[Mm][Ee][Aa][Nn]|[Ss][Tt][Dd].*", columnNames[,2])
TestMeanStd <- TestMeasures[,colsMeanStd]
## TEST Data - Now that the number of columns is far less, add the subject and activity columns:
TestMeanStd2 <- cbind(actSubTest, TestMeanStd)
colnames(TestMeanStd2) <- gsub("[.]","",colnames(TestMeanStd2))
## TRAIN Data - import TRAIN data metrics and combine with subject and activity columns:
TrainMeasures <- read.fwf("./data/HAR/UCI HAR Dataset/train/X_train.txt", widths=colWidths, blank.lines.skip=TRUE, colClasses="numeric", col.names=columnNames[,2])
TrainMeanStd <- TrainMeasures[,colsMeanStd]
TrainMeanStd2 <- cbind(actSubTrain, TrainMeanStd)
colnames(TrainMeanStd2) <- gsub("[.]","",colnames(TrainMeanStd2))
## Combine the TEST and TRAIN data:
AllMeanStd <- rbind(TestMeanStd2, TrainMeanStd2)
## Calculate the mean of each measure:
AllMeanStdBySubAct <- group_by(AllMeanStd, subject, activityid, activitylabel)
meanBySubjectActivity <- summarize_all(AllMeanStdBySubAct, mean, na.rm = TRUE)
## Write the new data to a file:
write.table(meanBySubjectActivity, file="./meanBySubjectActivity.csv", sep=",", row.names = FALSE)
## Remove objects no longer needed and call garbage collection:
rm(list = c("TestMeasures", "TestMeanStd", "TestMeanStd2", "TrainMeasures", "TrainMeanStd", "TrainMeanStd2", "AllMeanStd", "AllMeanStdBySubAct"))
rm(list = c("activityIdTest", "activityIdTrain", "activityLabels", "activityTest", "activityTrain", "actSubTest", "actSubTrain"))
rm(list = c("colsMeanStd", "columnNames", "colWidths", "fileUrl", "meanBySubjectActivity", "subjectIdTest", "subjectIdTrain"))
gc()
|
0c0b31d4a995060cd7a8dea159593cfdd9a5e1be
|
79b5bb6f63870f25b36976565814ad39348b478a
|
/shooting.r
|
5e28ea7bc4a78a19e23f6337976656756d55a2e4
|
[] |
no_license
|
akirilov/cse446
|
b296bf8ab02d35595804013b125c1da9c1bd1e2c
|
af14cd7027461ba06da4ab04c626b02369d86f6b
|
refs/heads/master
| 2020-03-30T01:19:03.647130
| 2013-06-10T13:18:26
| 2013-06-10T13:18:26
| 10,236,743
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,767
|
r
|
shooting.r
|
# Define SOFT function
soft = function(a, d)
{
sign(a) * max((abs(a) - d), 0)
}
# Define LASSO function
shooting = function(lambda, X, Y)
{
# Threshold of convergence
epsilon = 1e-6
# Scale (working this part out)
Y.orig = Y
X.orig = X
X = scale(X, scale=FALSE)
Y = scale(Y, scale=FALSE)
# Initialize weights W
W_last = matrix(0, 90, 1)
W_cur = solve(t(X) %*% X + lambda * diag(ncol(X))) %*% t(X) %*% Y
X.temp = cbind(rep(1, dim(X)[1]), X)
w_0 = (solve(t(X.temp) %*% X.temp - diag(0.5, ncol(X.temp))) %*% t(X.temp) %*% Y)[1]
diff = 999999
iters = 0
iterlim = 10 * ncol(X)
while (diff > epsilon && iters < iterlim)
{
iters = iters + 1
# Store last position
W_last = W_cur
# LASSO optimization magick
d = ncol(X)
n = nrow(X)
### TEST FUNCTION
magicFun = function()
magicX =
### END TEST FUNCTION
for (j in 1:d)
{
a_j = 2 * sum(X[,j]^2)
c_j = 2 * sum(t(X[,j]) %*% (Y - X %*% W_cur + W_cur[j] * X[,j]))
W_cur[j] = soft(c_j/a_j, lambda/a_j)
}
# Update w_0
w_0 = mean(Y.orig) - sum(colMeans(X.orig)*W_cur)
# Update difference
diff = sqrt(sum((W_last - W_cur)^2))
}
result = list(one=w_0, two=W_cur)
return(result)
}
nnzero = function(x) {
return(sum(x >= 0.01))
}
# Read data from files
setwd("~/Documents/Homework/cse446/proj")
# READ FILES - UNCOMMENT FOR FIRST READ, COMMENT OUT TO AVOID LOADING AGAIN WHEN VARIABLES IN MEMORY
##### FILES #####
# trainX = read.table("data/mri_data_train.txt")
# trainY = read.table("data/wordid_train.txt")
# trainY = trainY[,1]
#
# testX = read.table("data/mri_data_test.txt")
# testChoices = read.table("data/wordid_test.txt")
# testCorrect = as.matrix(testChoices[1])
# testWrong = as.matrix(testChoices[2])
#
# featureDict = read.table("data/wordfeature_centered.txt")
##### END FILES #####
##### BEGIN PROCESSING #####
colsToUse = 1000
trainXfinal = t(prcomp(trainX, scale = TRUE)$rotation)[,1:colsToUse]
testXfinal = t(prcomp(testX, scale = TRUE)$rotation)[,1:colsToUse]
##### BREAK #####
# trainXvars = apply(trainX, 2, var)
# colsToUse = sapply(trainXvars, function(x){x > 1.07})
# trainXfinal = matrix(nrow = 300, ncol = 0)
# testXfinal = matrix(nrow = 60, ncol = 0)
# for (i in seq(ncol(trainX))) {
# if (colsToUse[i]) {
# trainXfinal = cbind(trainXfinal, trainX[i])
# testXfinal = cbind(testXfinal, testX[i])
# }
# }
##### END PROCESSING #####
##### SHOOTING #####
W = matrix(nrow=ncol(trainXfinal), ncol=218)
w_0 = matrix(nrow=1, ncol=218)
yhat = matrix(nrow=60, ncol=218)
for (i in seq(218)) {
print("-----")
print(paste("Feature: ", i))
trainYtranslated = sapply(trainY, function(x){featureDict[x,i]})
result = shooting(0.01, trainXfinal, trainYtranslated)
w_0[i] = result$one
W[,i] = result$two
# Get errors
yhat[,i] = as.matrix(testXfinal) %*% (W[,i] + w_0[i])
mistake = 0;
for (j in seq(nrow(testCorrect))) {
yCorrect = testCorrect[j]
yWrong = testWrong[j]
dist1 = 0
dist2 = 0
for (ii in seq(i)) {
yCorrectTranslated = featureDict[yCorrect,ii]
yWrongTranslated = featureDict[yWrong,ii]
dist1 = dist1 + (yhat[j,ii] - yCorrectTranslated)^2
dist2 = dist2 + (yhat[j,ii] - yWrongTranslated)^2
}
if (dist1 > dist2) {
mistake = mistake + 1
} else if (dist1 == dist2) {
mistake = mistake + 0.5
}
}
print(paste("Mistakes: ", mistake))
print(paste("Error: ", mistake/60))
}
##### END SHOOTING #####
# create testCorrect and testWrong matrices
# then for each testX, get the smallest distance between correct
# and wrong, and return 1 if wrong, 0 o.w.
# finally sum the number of mistakes
# and divide my number of test cases (60)
#print(W)
#print(w_0)
|
875eb829692105cbf91fb5efdb589b979510e5c8
|
c51347680754745733293e00aacf7b633334c1fc
|
/R/plot.yplocation.R
|
63867c6a29945634e03f969b169b6de475958cce
|
[] |
no_license
|
cran/YplantQMC
|
771c341d00e410a0e61dbdadc02af8866d5cd198
|
dc62bfc247ba9d6dd92498e8afa00d511a36e00e
|
refs/heads/master
| 2021-01-21T21:47:33.241377
| 2016-05-23T06:34:50
| 2016-05-23T06:34:50
| 17,694,152
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
plot.yplocation.R
|
#'@method plot yplocation
#'@S3method plot yplocation
#'@importFrom maps map
plot.yplocation <- function(x, add=FALSE, col="black", ...){
if(!add)map('world', col=col, ...)
points(x$long, x$lat, pch=19, col="red", cex=1.1)
}
|
4ce9b2a6895d9e82a279f8eecb9cb1142894e844
|
0a0b89d070e515f6c4bd13a9d29f0e83197c6690
|
/R/Lecture1MimicYourBrowser/Example1_MimicGetMethod.R
|
594d5094d49d8e30313a3a09d90c24da976681e0
|
[] |
no_license
|
platoyowl/crawler101-yotta
|
49d18123f88333786fcbaabdbddcff37ea4e1842
|
e5b461fa69240bc930595f4dff40276c9cd9fd8a
|
refs/heads/master
| 2020-06-18T12:57:50.626564
| 2018-04-27T13:23:59
| 2018-04-27T13:24:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 386
|
r
|
Example1_MimicGetMethod.R
|
library(httr)
url = "http://ecshweb.pchome.com.tw/search/v3.3/all/results?q=sony&page=1&sort=rnk/dc"
res = GET(url)
res
content(res,as = "raw")
content(res,as = "text")
content(res)
library(magrittr)
res %>%
content %>%
.$prods %>%
do.call(rbind,.) %>%
as.data.frame()
df = res %>%
content %>%
.$prods %>%
do.call(rbind,.) %>%
as.data.frame()
df %>% View
|
0851b054f60adafc5d99c5a7d533aed9d96a48be
|
f42171db8d60b05629790e47560a6f237d285a3b
|
/R/run-2Locus-determSims.R
|
fdd1f83934962b1eb891b4d228597da65ec256b6
|
[
"MIT"
] |
permissive
|
colin-olito/inversionSize-ProtoSexChrom
|
9f0e3e8d3caf1824abce9811c81af7d9e787f1c4
|
a28f3661eff1e4e621774e330dd057ea81f58755
|
refs/heads/master
| 2023-04-06T21:09:00.834863
| 2023-02-07T13:42:28
| 2023-02-07T13:42:28
| 251,228,015
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,683
|
r
|
run-2Locus-determSims.R
|
################################################################
# RUN Deterministic Simulations for FIG.2
#
# R code for deterministic forward simulations. Generates
# output data as .csv files saved to ./output/data/simResults/SA/
#
#
# Author: Colin Olito
#
# NOTES:
#
#####################
## Dependencies
rm(list=ls())
############
# Y -linked
############
source('R/functions-SA-Y-2Locus-determSims.R')
# r = 1/2
detYSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations=5000,
dom = c(1/2, 1/2), r = 1/2)
# r = 0.02
detYSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations=10000,
dom = c(1/2, 1/2), r = 0.02)
# r = 0.002
detYSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations=10000,
dom = c(1/2, 1/2), r = 0.002)
############
# X-linked
############
rm(list=ls())
source('R/functions-SA-X-2Locus-determSims.R')
# r = 1/2
detXSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations = 30000,
dom = c(1/2, 1/2), r = 1/2, eq.threshold = 1e-7)
# r = 0.02
detXSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations = 20000,
dom = c(1/2, 1/2), r = 0.02, eq.threshold = 1e-7)
# r = 0.002
detXSAInversionEQFreqs(selRange = c(0,1), by=0.01, generations = 10000,
dom = c(1/2, 1/2), r = 0.002, eq.threshold = 1e-7)
selRange = c(0.02,1)
by=0.02
generations = 10000
dom = c(1/2, 1/2)
r = 0.5
eq.threshold = 1e-7
sfs <- seq(from=selRange[1], to=selRange[2], by=by)
hf=dom[1]
hm=dom[2]
sf <- sfs[6]
sm <- sfs[11]
sf <- sfs[30]
sm <- sfs[33]
par.list <- list(
selType = "SA",
selPars = c(hf, sf, hm, sm),
gen = 10000,
r = r
)
test <- detSimXInversion(par.list=par.list)
test$eqInvFreq
test$gen
str(test)
initInvFreq <- (2*sum(test$Fx.gen[1, c(2,3,5)]*c(1/2, 1 ,1/2)) + sum(test$Fy.gen[1, c(2,4)]))/3
invFreqDyn <- (2*((test$Fx.gen[,2]*1/2) + (test$Fx.gen[,3]) + (test$Fx.gen[,5]*1/2)) + (test$Fy.gen[,2] + test$Fy.gen[,4]))/3
initInvFreq - invFreqDyn[length(invFreqDyn)]
par(mfrow=c(1,2))
plot(invFreqDyn, type='l', lwd=2, ylim=c(0,1), col=2)
lines(test$Fx.gen[,1], lwd=2, col=1)
lines(test$Fx.gen[,2], lwd=2, col=3) #AI
lines(test$Fx.gen[,3], lwd=2, col=4)# II
lines(test$Fx.gen[,4], lwd=2, col=5)
lines(test$Fx.gen[,5], lwd=2, col=6) #aI
lines(test$Fx.gen[,6], lwd=2, col=9)
lines(invFreqDyn, lwd=2, col=2)
plot(invFreqDyn, type='l', lwd=2, ylim=c(0,1), col=2)
lines(test$Fy.gen[,1], lwd=2, col=1)
lines(test$Fy.gen[,2], lwd=2, col=2) #AI
lines(test$Fy.gen[,3], lwd=2, col=3)
lines(test$Fy.gen[,4], lwd=2, lty=2, col=2) #aI
lines(test$Fy.gen[,5], lwd=2, col=4)
lines(test$Fy.gen[,6], lwd=2, col=5)
|
edd131201d034c3c91fe30e23c8f8760e8411bd1
|
7f2c9d01bfb23d4446a9241ac57613175015e471
|
/Etapa_2/Lesson_3/Reto_1.R
|
8a886a4eec6c83bf1acd118630232430efdfc844
|
[] |
no_license
|
AnaNava1996/BEDU-Banco-Santander-3-caminos-Data-Analytics
|
ce462e18fe254231b52c7a6bf631a07f74961b38
|
cf7d3dc74376a433505e3986c9572250f4ab8812
|
refs/heads/master
| 2023-02-23T11:36:41.753840
| 2021-01-31T02:01:36
| 2021-01-31T02:01:36
| 308,101,201
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 772
|
r
|
Reto_1.R
|
#Carga el data set BD_Altura_Alunos.csv
data_set <- read.csv("/home/ana/Desktop/BEDU/BEDU-Banco-Santander-3-caminos-Data-Analytics/Etapa_2/Lesson_3/Reto-01/BD_Altura_Alunos.csv", sep=";")
#Realiza el histograma con la función hist(), nativa de R
head(data_set)
#realizando el histograma con la función hist()
hist(data_set$Altura,
breaks = 20,
main = " Histograma de alturas",
ylab = "Frecuencia",
xlab = "Altura",
col = "lightblue")
#Ahora realiza el histograma con la función ggplot. (Recuerda que debes instalar el paquete ggplot2)
data_set %>%
ggplot() +
aes(Aluno) +
geom_histogram(binwidth = 20, col="black", fill = "lightpink") +
ggtitle("Altura de Alumnos") +
ylab("Frecuencia") +
xlab("Altura") +
theme_light()
|
7d27775659a69503acc4232ea012384736f6d81f
|
a41bd0086c12624bff1fd6661d140075f87d5229
|
/R/qq.R
|
53c5b719eba5c64511457bc765f50a968308d1a6
|
[] |
no_license
|
cran/regclass
|
e0e019d36dbc15d05fda4965e0dc26ff5cf1ffc1
|
548f2ed1dc66fbf1a58b86f9ada26f9da05483c4
|
refs/heads/master
| 2021-01-11T23:18:12.857264
| 2020-02-21T17:00:07
| 2020-02-21T17:00:07
| 78,563,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
qq.R
|
qq <-
function(x,ax=NA,leg=NA,cex.leg=0.8) {
if (! ( head(class(x),1) %in% c("integer","numeric") ) ) { stop(paste("Error: only allows numeric variables. You passed a",class(x))) }
if(is.na(ax)) { x.label <- deparse(substitute(x)) } else { x.label <- ax }
x <- sort(x)
n <- length(x)
P <- ppoints(n)
z <- qnorm(P,mean(x),sd(x))
plot(z, x, xlab = paste("Values of",x.label,"if Normal"), ylab = "Observed Values",pch=20,cex=.8)
Q.x <- quantile(x, c(0.25, 0.75))
Q.z <- qnorm(c(0.25, 0.75), mean(x),sd(x) )
b <- as.numeric( (Q.x[2] - Q.x[1])/(Q.z[2] - Q.z[1]) )
a <- as.numeric( Q.x[1] - b * Q.z[1] )
abline(a, b, lwd=1,col="red")
conf <- 0.95
zz <- qnorm(1 - (1 - conf)/2)
SE <- (b/dnorm(z,mean(x),sd(x)) ) * sqrt(P * (1 - P)/n)
fit.value <- a + b * z
upper <- fit.value + zz * SE
lower <- fit.value - zz * SE
lines(z, upper, lty = 2,col="red",lwd=1)
lines(z, lower, lty = 2,col="red",lwd=1)
if(!is.na(leg)) { legend("topleft",c(leg),cex=cex.leg) }
}
|
07271e01d5f701a9c5101c5db91b04a9e19576ff
|
5ab37442ba2d651fb08e9e04f0dbc89d82fb91a9
|
/homework 1- Text Mining - Similarites/HW1_preprocess.r
|
1e075c4165123df69f882da653d4c664b21fbd1b
|
[] |
no_license
|
Weerdhawal/Spatial-and-Temporal-Data-Mining
|
4a0d7034c545e33f9a0a6defde2ae60982971657
|
5f18d1b215dcb539e0fe4c662c027773f536ad9f
|
refs/heads/master
| 2020-03-11T17:37:20.964190
| 2018-04-19T04:29:18
| 2018-04-19T04:29:18
| 130,152,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,439
|
r
|
HW1_preprocess.r
|
#install read text adn quanteda
library(ggplot2)
library(reshape2)
require(tm)
require(SnowballC)
require(reshape2)
require(ggplot2)
library(tm)
library(gplots)
library(RColorBrewer)
library(wordcloud)
library(proxy)
#load the corpus
all <- Corpus(DirSource("I:\\Masters\\SPRING 18\\SPATIAL AND TEMPORAL\\spatial\\homework1\\20news-18828\\20news-18828\\",
encoding="UTF-8",recursive=TRUE),readerControl=list(reader=readPlain,language="en"))
#check values
all[[1]]
#lower case
all<- tm_map(all, tolower)
#remove punctuations
all<-tm_map(all,removePunctuation)
#strip extra white spaces
all <- tm_map(all,stripWhitespace)
#apply Numbers from files
all<-tm_map(all,removeNumbers)
#now remove the standard list of stopwords, like you've already worked out
all.nostopwords <- tm_map(all, removeWords, stopwords(kind = "en"))
#remove words specified in the document
all.nostopwords<-tm_map(all.nostopwords,removeWords,c("on","in","next to","infront of","behind","between","under","through",
" around","i","me","my","mine","you","your","yours","he","him","his","she",
"her","hers","it","its","we","us","our","ours","they","their","theirs","them","easily",
"loudly","quickly","quietly","sadly","silently","slowly","always","frequently","often","once"))
#making TF matrix
tdm<-TermDocumentMatrix(all.nostopwords,control =list(weighting= weightTf,normalize = TRUE))
#REMOVE SPARSE TERMS AND CONVERT TO MATRIX
final_tdm<- removeSparseTerms(tdm, sparse = 0.99)
final_matrix<- as.matrix(final_tdm)
#TRANSPOSE MATRIX FOR ROWS=ARTICLE , COLUMN=TERM i.e to get Document term matrix
fsb<-t(final_matrix)
#FEATURE SELECTION
#SORT VALUES to get top 100 words
fsb_s<-sort(colSums(fsb),decreasing = TRUE)
fsb_d<-data.frame(word=names(fsb_s),freq=fsb_s)
top100<-head(fsb_d,100)
#create word cloud
wordcloud(words=names(fsb_s),freq=fsb_s,min.freq=1000,random.order=F)
colna<-names(fsb_s)
#
colna<-findFreqTerms(final_tdm, lowfreq = 2284,highfreq = 20334)
a<-data.frame(fsb)
top100_sel<-a[,colna]
#top100_sel1<-as.matrix(top100_sel)
top100_sel1<-as.matrix(fsb)
top100_sel1<- top100_sel1[1000:2000,]
#histogram
barplot(top100[1:100,]$freq, las = 2, names.arg = top100[1:100,]$word,col ="lightblue", main ="Most Frequent Words",ylab = "Word frequencies")
#cALCULATING SIMILARITIES
euc_dist<-dist(top100_sel1,method = "euclidean")
melted_eud_d<-melt(as.matrix(euc_dist))
ggplot(data = melted_eud_d, aes(x=Var1, y=Var2, fill=value)) +
geom_tile()+ scale_fill_gradient(low = "yellow", high = "red")
cos_dist<-dist(top100_sel1,method = "cosine")
melted_cos_d<-melt(as.matrix(cos_dist))
ggplot(data = melted_cos_d, aes(x=Var1, y=Var2, fill=value)) +
geom_tile()+ scale_fill_gradient(low = "yellow", high = "red")
jac_dist<-dist(top100_sel1,method = "jaccard")
melted_jac_d<-melt(as.matrix(jac_dist))
ggplot(data = melted_jac_d, aes(x=Var1, y=Var2, fill=value)) +
geom_tile()+ scale_fill_gradient(low = "yellow", high = "red")
#setp 7
#similarities
cos_euc<-cor(euc_dist,cos_dist,method = "pearson")
euc_jac<-cor(jac_dist,euc_dist,method = "pearson")
jac_cos<-cor(cos_dist,jac_dist,method = "pearson")
#linear regression
vec_cos<-as.vector(head(cos_dist,500))
vec_euc<-as.vector(head(euc_dist,500))
vec_jac<-as.vector(head(jac_dist,500))
df_top100<-data.frame(head(top100_sel1,500))
lr_cos_euc<-lm(vec_cos~vec_euc,df_top100)
lr_euc_jac<-lm(vec_euc~vec_jac,df_top100)
lr_jac_cos<-lm(vec_jac~vec_cos,df_top100)
#plot for Simialiteries
scatter.smooth(x=vec_euc,y=vec_cos,pch=21,col="blue",lpars =list(col = "red", lwd = 3, lty = 3),xlim=c(0,20),xlab="Euclidean",ylab="Cosine")
scatter.smooth(x=vec_cos,y=vec_jac,pch=21,col="blue",lpars =list(col = "red", lwd = 3, lty = 3),xlim=c(0,1),xlab="Cosine",ylab="Jaccard")
scatter.smooth(x=vec_jac,y=vec_euc,pch=21,col="blue",lpars =list(col = "red", lwd = 3, lty = 3),xlim=c(0,1),ylim = c(0,30),xlab="Jaccard",ylab="Euclidean")
#step 9
trial_melted_cos <- melted_cos_d
tail(trial_melted_cos[order(trial_melted_cos$value),],10)
trial_melted_jac <- melted_jac_d
tail(trial_melted_jac[order(trial_melted_jac$value),],10)
trial_melted_euc <- melted_eud_d
tail(trial_melted_euc[order(trial_melted_euc$value),],10)
#convert to csv
write.csv(top100_sel1,"top100Features.csv")
fsb<-read.csv("input.csv")
|
42b27e97aba243a56f3f7d882541b94ca67e23c4
|
07da3b195428c02f30907665be497e05460c79fd
|
/libraryimporttest.R
|
deddf0cb93e675de492e43f32bf3224832b69b5b
|
[] |
no_license
|
CyanHeTest/Insights0303
|
575412b16449c7f717393f5ceb70a1e2e8fa43ca
|
5d74856d7108da9ed7ec20e6186078075626228d
|
refs/heads/master
| 2023-03-14T11:16:39.535471
| 2021-03-03T06:51:11
| 2021-03-03T06:51:11
| 344,028,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
libraryimporttest.R
|
library(DBI)
message(packageVersion("DBI"))
#library(Ggplot2)
#packageVersion("ggplot2")
#library(BioConductor)
#message(packageVersion("bioconductor"))
library(Shiny)
message(packageVersion("shiny"))
library(Lubridate)
message(packageVersion("lubridate"))
library(Knitr)
message(packageVersion("knitr"))
library(Mlr)
message(packageVersion("mlr"))
library(RCrawler)
message(packageVersion("rcrawler"))
|
fc2eb2b422bc706fc1d5f04ed6df374e5450c8a7
|
a780373151d932f841e17eed14614b949cc248b6
|
/SEM_script_ArrowtoothFlounder.R
|
59690a67a2e04abd13bf760383f60e02685db281
|
[] |
no_license
|
NCEAS/dmx-linkages
|
56816c309aaa08277670faacec3ecabafcf08a52
|
d79983fbfba8cb86280da0c93a64c2cccb1c866f
|
refs/heads/master
| 2020-12-25T17:14:32.804002
| 2016-09-22T21:06:19
| 2016-09-22T21:06:19
| 39,415,949
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,902
|
r
|
SEM_script_ArrowtoothFlounder.R
|
# Communities of Practice
# Structural Equation Modelling Script
# Arrowtooth Flounder case study
# Colette Ward 23 Jan 2016
################################################################
library(plyr)
library(dplyr)
library(lavaan)
library(AICcmodavg)
library(psych)
# call the data assembly script
#source("commPracticeDataFormat.R")
#CPrD <- CoPrct
# or load the data from local source
CPrD <- read.csv("CoPrct.csv", header=T)
head(CPrD)
# select 1998 - 2010
CPrD1 <- CPrD %>%
filter(Year > 1997 & Year < 2011)
# log-transform and rename some variables:
CPrD2 <- CPrD1 %>%
mutate(logEuphausiids = log(SewardLineMayEuphausiids),
logPinkShrimp = log(Pink_Shrimp),
logPlckRecruits = log(Poll_Age1_recruits_millions),
logPlckAdults = log(Poll_Yr3plus_TtlBmss_1000Tons),
logArrAdult = log(ArrAdult),
logPlckTons = log(plck_tons),
logPlckVessels = log(plck_vessels),
logHlbtPounds = log(hlbt_pounds),
logArrTons = log(arth_tons),
logArrRev = log(arth_real_rev),
logArrVessels = log(arth_vessels),
logArrProcess = log(arth_processors),
logArrPrice = log(arth_real_price)) %>%
rename(NPGO = NPGO_anul_mn,
PDO = PDO_anul_mn,
WTemp = WTemp_C_AnnMn,
logAnnChl = AnnChl,
Capelin = CapeDAFIndex) %>%
select(Year, logEuphausiids, logPinkShrimp, logPlckRecruits, logPlckAdults, logHlbt, logArrAdult, logPlckTons, logPlckVessels,
logHlbtPounds, logArrTons, logArrRev, logArrVessels, logArrProcess, logArrPrice, NPGO, PDO, WTemp, logAnnChl, Capelin)
names(CPrD2)
# look at correlations among Arrowtooth variables
pairs.panels(CPrD2[,c(2:20)],smooth=F,density=T,ellipses=F,lm=T,digits=3,scale=T)
# standardize each variable to zero mean and unit variance
CPrD3 <- CPrD2 %>% colwise(scale)()
#############################################
# 1. Arrowtooth model
# see slide at: https://docs.google.com/a/noaa.gov/presentation/d/1GPMfcLRIXkg1ZEndSmMcdMFaa3IkU6Lbbs7QabwFewU/edit?usp=sharing
#############################################
# look at relationship between Adult Arrowtooth and Water Temperature
plot(CPrD2$logArrAdult ~ CPrD2$WTemp, pch=16, cex=2.5)
# Candidate models
# full model, if we had all the data:
mod.1 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logCapelin + ArrowtoothJuv
logPlckRecruits ~ logEuphausiids + ArrowtoothJuv
logCapelin ~ logEuphausiids
ArrowtoothJuv ~ logCapelin + logEuphausiids
logEuphausiids ~ logAnnChl
ArrowtoothJuv ~ LarvalAbundance
logAnnChl ~ WTemp
WTemp ~ ENSO + PDO
LarvalAbundance ~ ENSO + PDO + NPGO'
############################################################
# full model after removing nodes for which we have little/no data (Wind, Capelin, ATF larvae & juveniles)
# also, cannot have both ENSO & PDO in the model because they're highly correlated; keep ENSO because it's less correlated with the NPGO
mod.2 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + ENSO + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ WTemp
WTemp ~ ENSO'
mod.2.fit <- sem(mod.2, data=CPrD3)
summary(mod.2.fit, stand=T, rsq=T)
#lavaan (0.5-20) converged normally after 19 iterations
#Number of observations 13
#Estimator ML
#Minimum Function Test Statistic 50.442
#Degrees of freedom 18
#P-value (Chi-square) 0.000
modindices(mod.2.fit)
# Residual covarations to consider with modificaiton index:
# logArrTons ~~ logPlckRecruits 7.898
# logArrAdult ~~ logAnnChl 5.786
# logArrAdult ~~ logEuphausiids 4.002
# Direct links to consider, with modification index:
#logArrTons ~ logPlckRecruits 7.666
#logArrTons ~ ENSO 4.254
#logArrAdult ~ logAnnChl 4.002
#logPlckRecruits ~ ENSO 3.409
#logAnnChl ~ NPGO 4.140
#WTemp ~ NPGO 3.105
############################################################
# Add residual covariation between Arrowtooth harvest and Pollock recruitment:
mod.3 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + ENSO + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ WTemp
WTemp ~ ENSO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.3.fit <- sem(mod.3, data=CPrD3)
summary(mod.3.fit, stand=T, rsq=T)
#Minimum Function Test Statistic 38.156
#Degrees of freedom 17
#P-value (Chi-square) 0.002
modindices(mod.3.fit)
# Residual covarations to consider, with modificaiton index:
#logArrAdult ~~ logEuphausiids 4.002
#logArrAdult ~~ logAnnChl 5.786
# Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 4.002 (but it's hard to imagine how there is a DIRECT effect of Chl a on Adult Arrowtooth biomass)
# logArrAdult ~ WTemp 2.211
# logAnnChl ~ NPGO 4.140 (suggests effect is direct instead of via Water Temperature)
# logAnnChl ~ ENSO 1.860 (suggests effect is direct instead of via Water Temperature)
# WTemp ~ logAnnChl 1.860
# WTemp ~ NPGO 3.105
# greatest modification index for direct links to consider was logAnnChl ~ NPGO. Also note mod index for WTemp ~ NPGO
# therefore for next model: add NPGO as driver of Water Temperature and look at mod index for logAnnChl ~ NPGO
############################################################
mod.4 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + ENSO + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ WTemp
WTemp ~ ENSO + NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.4.fit <- sem(mod.4, data=CPrD3)
summary(mod.4.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 34.608
# Degrees of freedom 16
# P-value (Chi-square) 0.003
modindices(mod.4.fit)
# Residual covarations to consider, with modificaiton index:
# logArrAdult ~~ logEuphausiids 4.053
# logArrAdult ~~ logAnnChl 5.786
# logArrAdult ~~ WTemp 2.904
# logAnnChl ~~ WTemp 4.702
# Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 4.052
# logArrAdult ~ WTemp 2.904
# logAnnChl ~ NPGO 5.414
# ENSO effect on WaterTemp is not significant (p = 0.622), therefore remove it
# NPGO has a significant effect on WaterTemp (p = 0.043)
# modification index suggests NPGO also has direct effect on Chl a
# therefore add this in next model
############################################################
mod.5 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + ENSO + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ WTemp + NPGO
WTemp ~ NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.5.fit <- sem(mod.5, data=CPrD3)
summary(mod.5.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 27.847
# Degrees of freedom 16
# P-value (Chi-square) 0.033
# Note big improvement in model fit, though still not statistically significant
modindices(mod.5.fit)
# Residual covarations to consider, with modificaiton index:
# logArrAdult ~~ logEuphausiids 7.084
# logArrAdult ~~ logAnnChl 5.999
# logArrAdult ~~ WTemp 2.851 (does any)
# Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 7.084
# logArrAdult ~ WTemp 2.851
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.848 0.085 9.952 0.000 0.848 0.851
# logArrAdult ~
# logPlckRecruts 0.087 0.227 0.384 0.701 0.087 0.086
# logEuphausiids 0.402 0.230 1.749 0.080 0.402 0.390
# ENSO -0.183 0.259 -0.705 0.481 -0.183 -0.177
# NPGO -0.538 0.259 -2.077 0.038 -0.538 -0.523
# logPlckRecruits ~
# logEuphausiids 0.168 0.180 0.932 0.351 0.168 0.166
# logEuphausiids ~
# logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
# logAnnChl ~
# WTemp 0.120 0.236 0.509 0.611 0.120 0.120
# NPGO 0.719 0.236 3.046 0.002 0.719 0.719
# WTemp ~
# NPGO -0.488 0.242 -2.018 0.044 -0.488 -0.488
# From the above, looks like WaterTemperature is not important as a link between NPGO and Chl a (p = 0.61)
# therefore in next model, remove this link
############################################################
mod.6 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + ENSO + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.6.fit <- sem(mod.6, data=CPrD3)
summary(mod.6.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 20.390
# Degrees of freedom 11
# P-value (Chi-square) 0.040
modindices(mod.6.fit)
# Residual covarations to consider, with modificaiton index:
# logArrAdult ~~ logEuphausiids 7.084
# logArrAdult ~~ logAnnChl 7.084
# Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 7.084 (how do I know this is not associated with effect of NPGO?)
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.848 0.085 9.952 0.000 0.848 0.851
# logArrAdult ~
# logPlckRecruts 0.087 0.227 0.384 0.701 0.087 0.086
# logEuphausiids 0.402 0.230 1.749 0.080 0.402 0.390
# ENSO -0.183 0.259 -0.705 0.481 -0.183 -0.177
# NPGO -0.538 0.259 -2.077 0.038 -0.538 -0.523
# logPlckRecruits ~
# logEuphausiids 0.168 0.180 0.932 0.351 0.168 0.166
# logEuphausiids ~
# logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
# logAnnChl ~
# NPGO 0.661 0.208 3.174 0.002 0.661 0.661
# Covariances:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~~
# logPlckRecruts 0.347 0.156 2.222 0.026 0.347 0.784
# and no significant direct effect of ENSO on Arrowtooth Adult biomass
# therefore remove it from next model
############################################################
mod.7 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.7.fit <- sem(mod.7, data=CPrD3)
summary(mod.7.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 12.614
# Degrees of freedom 7
# P-value (Chi-square) 0.082
modindices(mod.7.fit)
# Residual covarations to consider, with modificaiton index:
# logArrAdult ~~ logEuphausiids 6.908
# logArrAdult ~~ logAnnChl 6.909
# Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 6.909
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.848 0.085 9.956 0.000 0.848 0.833
# logArrAdult ~
# logPlckRecruts 0.169 0.230 0.732 0.464 0.169 0.164
# logEuphausiids 0.380 0.233 1.631 0.103 0.380 0.366
# NPGO -0.460 0.230 -2.005 0.045 -0.460 -0.444
# logPlckRecruits ~
# logEuphausiids 0.168 0.180 0.934 0.350 0.168 0.166
# logEuphausiids ~
# logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
# logAnnChl ~
# NPGO 0.661 0.208 3.174 0.002 0.661 0.661
# from the above, looks like Pollock Recruits direct pathway is not important
# (but residual covariation with Arrowtooth Harvest suggests it's important via another route)
# next model: what happens if I remove the residual covariation between pollock recruits & Arrowtooth Harvest?
############################################################
mod.8 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logPlckRecruits + logEuphausiids + NPGO
logPlckRecruits ~ logEuphausiids
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
'
mod.8.fit <- sem(mod.8, data=CPrD3)
summary(mod.8.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 24.900
# Degrees of freedom 8
# P-value (Chi-square) 0.002
# Note model fit is worse. Therefore leave in logArrTons ~~ logPlckRecruits
# next step: remove Pollock Recruits pathway
############################################################
# note we are only left with the Euphausiid pathway
mod.9 <- 'logArrTons ~ logArrAdult
logArrAdult ~ logEuphausiids + NPGO
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.9.fit <- sem(mod.9, data=CPrD3)
summary(mod.9.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 13.990
# Degrees of freedom 9
# P-value (Chi-square) 0.123
# Compare to mod.7:
# Test statistic 12.614, df = 7, p-value = 0.082
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.828 0.084 9.866 0.000 0.828 0.867
# logArrAdult ~
# logEuphausiids 0.394 0.234 1.684 0.092 0.394 0.387
# NPGO -0.432 0.234 -1.847 0.065 -0.432 -0.424
# logEuphausiids ~
# logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
# logAnnChl ~
# NPGO 0.661 0.208 3.174 0.002 0.661 0.661
# Covariances:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~~
# logPlckRecruts 0.344 0.157 2.201 0.028 0.344 0.771
# Direct effect of Chl a on Euphausiids is not significant ... so the food web path cannot be mediating here ...?
# (but then when I remove the food web pathway below in mod.10, NPGO direct effect on ArrowtoothAdults is not significant ...)
modindices(mod.9.fit)
# Residual covariations to consider, with modification index:
# logArrAdult ~~ logAnnChl 5.837
# # Direct links to consider, with modification index:
# logArrAdult ~ logAnnChl 5.837
############################################################
# minimum model (without Euphausiid pathway):
mod.10 <- 'logArrTons ~ logArrAdult
logArrAdult ~ NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.10.fit <- sem(mod.10, data=CPrD3)
summary(mod.10.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 1.840
# Degrees of freedom 3
# P-value (Chi-square) 0.606
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.828 0.086 9.675 0.000 0.828 0.863
# logArrAdult ~
# NPGO -0.370 0.258 -1.437 0.151 -0.370 -0.370
# Covariances:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~~
# logPlckRecruts 0.344 0.157 2.201 0.028 0.344 0.771
# Note that logArrAdult ~ NPGO direct effect is not significant ...
############################################################
# try adding Adult Pollock Biomass to look at pollock fishery hypothesis,
# and remove residual covariation between Arrowtooth harvest and Pollock recruits
mod.11 <- 'logArrTons ~ logArrAdult + logPlckAdults
logArrAdult ~ logEuphausiids + NPGO
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
'
mod.11.fit <- sem(mod.11, data=CPrD3)
summary(mod.11.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 12.817
# Degrees of freedom 8
# P-value (Chi-square) 0.118
# model fit is similar to mod.9, worse than mod.10
# direct effect of Adult Pollock on Arrowtooth harvest is not significant (p = 0.22)
# NB model fit is worse when residual covariation between Pollock recruits and Arrowtooth harvest is added back in
modindices(mod.11.fit)
############################################################
# try adding Pollock harvest (tons)
# Adult Pollock Biomass and Pollock Harvest (tons) are not correlated so it's OK to add both
mod.12 <- 'logArrTons ~ logArrAdult + logPlckAdults + logPlckTons
logArrAdult ~ logEuphausiids + NPGO
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
'
mod.12.fit <- sem(mod.12, data=CPrD3)
summary(mod.12.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 32.744
# Degrees of freedom 11
# P-value (Chi-square) 0.001
# Note model fit is much worse than most previous models
# fit is just as poor when first line is logArrTons ~ logArrAdult + logPlckTons
# However, direct effect of Pollock harvest (tons) on Arrowtooth harvest is significant (p = 0.042), although weak (0.19)
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 1.071 0.115 9.314 0.000 1.071 0.885
# logPlckAdults -0.169 0.118 -1.436 0.151 -0.169 -0.137
# logPlckTons 0.239 0.117 2.035 0.042 0.239 0.194
# logArrAdult ~
# logEuphausiids 0.394 0.234 1.684 0.092 0.394 0.387
# NPGO -0.432 0.234 -1.847 0.065 -0.432 -0.424
# logEuphausiids ~
# logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
# logAnnChl ~
# NPGO 0.661 0.208 3.174 0.002 0.661 0.661
modindices(mod.12.fit)
# direct links to consider, with modification index:
# logArrAdult ~ logPlckTons 4.602
# logPlckTons ~ logEuphausiids 4.011 (yet for mod.11 above, with Adult Pollock instead of Pollock Harvest, modification index for logPlckAdults ~ logEuphausiids 0.126)
############################################################
# remove Adult Pollock Biomass
# and try removing NPGO -> Chl a -> Euphausiid pathway
mod.13 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO
'
mod.13.fit <- sem(mod.13, data=CPrD3)
summary(mod.13.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 10.668
# Degrees of freedom 3
# P-value (Chi-square) 0.014
# Still not a great fit - even though nodes are all marginally significant?
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 1.026 0.127 8.062 0.000 1.026 0.925
# logPlckTons 0.224 0.127 1.759 0.078 0.224 0.202
# logArrAdult ~
# logEuphausiids 0.394 0.237 1.664 0.096 0.394 0.394
# NPGO -0.432 0.237 -1.826 0.068 -0.432 -0.432
modindices(mod.13.fit)
# direct links to consider:
# logPlckTons ~ logArrAdult 4.788 (consider Arrowtooth predation on pollock?)
# logPlckTons ~ logArrTons 4.080
# logArrAdult ~ logPlckTons 6.731
############################################################
# Add back residual covariation between Arrowtooth harvest and Pollock Recruits
mod.14 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.14.fit <- sem(mod.14, data=CPrD3)
summary(mod.14.fit, stand=T, rsq=T)
# Minimum Function Test Statistic 10.987
# Degrees of freedom 7
# P-value (Chi-square) 0.139
# much better fit now
# Regressions:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~
# logArrAdult 0.997 0.069 14.549 0.000 0.997 0.918
# logPlckTons 0.255 0.069 3.724 0.000 0.255 0.235
# logArrAdult ~
# logEuphausiids 0.394 0.237 1.664 0.096 0.394 0.394
# NPGO -0.432 0.237 -1.826 0.068 -0.432 -0.432
# Covariances:
# Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
# logArrTons ~~
# logPlckRecruts 0.355 0.153 2.327 0.020 0.355 0.845
modindices(mod.14.fit)
# direct links to consider, with modification indices:
# logEuphausiids ~ logArrAdult 2.454 (predation?)
############################################################
# feedback from last group meeting: replace ENSO with PDO, and look at direct effect of Water Temp on ATF adult biomass
# add these to base model from mod.11 (ie mod.13, which is the best so far, before cutting out the Euphausiid link)
mod.15 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO + PDO + WTemp
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO + PDO
'
mod.15.fit <- sem(mod.15, data=CPrD3)
summary(mod.15.fit, stand=T, rsq=T)
# model fit is much worse
lavaan (0.5-20) converged normally after 19 iterations
Number of observations 13
Estimator ML
Minimum Function Test Statistic 38.167
Degrees of freedom 13
P-value (Chi-square) 0.000
Regressions:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons ~
logArrAdult 1.026 0.127 8.086 0.000 1.026 0.906
logPlckTons 0.224 0.126 1.781 0.075 0.224 0.200
logArrAdult ~
logEuphausiids 0.312 0.209 1.493 0.135 0.312 0.315
NPGO -0.707 0.274 -2.585 0.010 -0.707 -0.714
PDO -0.156 0.247 -0.631 0.528 -0.156 -0.157
WTemp -0.420 0.239 -1.753 0.080 -0.420 -0.423
logEuphausiids ~
logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
logAnnChl ~
NPGO 0.563 0.241 2.340 0.019 0.563 0.563
PDO -0.184 0.241 -0.763 0.445 -0.184 -0.184
Variances:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons 0.189 0.074 2.550 0.011 0.189 0.163
logArrAdult 0.524 0.205 2.550 0.011 0.524 0.578
logEuphausiids 0.920 0.361 2.550 0.011 0.920 0.996
logAnnChl 0.498 0.195 2.550 0.011 0.498 0.539
R-Square:
Estimate
logArrTons 0.837
logArrAdult 0.422
logEuphausiids 0.004
logAnnChl 0.461
############################################################
# no significant effect of PDO on logArrAdult nor logAnnChl, therefore remove it:
mod.16 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO + WTemp
logEuphausiids ~ logAnnChl
logAnnChl ~ NPGO
'
mod.16.fit <- sem(mod.16, data=CPrD3)
summary(mod.16.fit, stand=T, rsq=T)
# model is not significant
lavaan (0.5-20) converged normally after 19 iterations
Number of observations 13
Estimator ML
Minimum Function Test Statistic 26.710
Degrees of freedom 11
P-value (Chi-square) 0.005
Regressions:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons ~
logArrAdult 1.026 0.126 8.127 0.000 1.026 0.908
logPlckTons 0.224 0.126 1.781 0.075 0.224 0.199
logArrAdult ~
logEuphausiids 0.331 0.212 1.559 0.119 0.331 0.332
NPGO -0.625 0.243 -2.571 0.010 -0.625 -0.628
WTemp -0.415 0.243 -1.707 0.088 -0.415 -0.417
logEuphausiids ~
logAnnChl 0.062 0.277 0.223 0.824 0.062 0.062
logAnnChl ~
NPGO 0.661 0.208 3.174 0.002 0.661 0.661
Variances:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons 0.189 0.074 2.550 0.011 0.189 0.161
logArrAdult 0.539 0.212 2.550 0.011 0.539 0.589
logEuphausiids 0.920 0.361 2.550 0.011 0.920 0.996
logAnnChl 0.520 0.204 2.550 0.011 0.520 0.563
R-Square:
Estimate
logArrTons 0.839
logArrAdult 0.411
logEuphausiids 0.004
logAnnChl 0.437
modindices(mod.16.fit)
# covariation:
logArrAdult ~~ logEuphausiids 5.972
logArrAdult ~~ logAnnChl 5.972
# direct links:
logArrAdult ~ logAnnChl 5.972
# suggests link should be direct, not via covariation???
logEuphausiids ~ NPGO 0.316
logEuphausiids ~~ logAnnChl 0.316
# note there is no suggestion of a direct link between NPGO and Euphausiids (which is the pathway from NPGO to Arrowtooth adults)
############################################################
# remove NPGO -> Chl a -> Euphausiid pathway because Chl a -> Euphausiids is not significant
# (very similar to mod.13, but with Water Temp addition)
mod.17 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO + WTemp
'
mod.17.fit <- sem(mod.17, data=CPrD3)
summary(mod.17.fit, stand=T, rsq=T)
# model is still not significant
lavaan (0.5-20) converged normally after 20 iterations
Number of observations 13
Estimator ML
Minimum Function Test Statistic 9.961
Degrees of freedom 4
P-value (Chi-square) 0.041
Regressions:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons ~
logArrAdult 1.026 0.130 7.908 0.000 1.026 0.941
logPlckTons 0.224 0.130 1.726 0.084 0.224 0.205
logArrAdult ~
logEuphausiids 0.331 0.218 1.518 0.129 0.331 0.331
NPGO -0.625 0.243 -2.569 0.010 -0.625 -0.625
WTemp -0.415 0.247 -1.682 0.093 -0.415 -0.415
Variances:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons 0.189 0.074 2.550 0.011 0.189 0.172
logArrAdult 0.539 0.212 2.550 0.011 0.539 0.584
R-Square:
Estimate
logArrTons 0.828
logArrAdult 0.416
############################################################
# add back residual covariation between Pollock Recruits and Arrowtooth Adult Biomass
mod.18 <- 'logArrTons ~ logArrAdult + logPlckTons
logArrAdult ~ logEuphausiids + NPGO + WTemp
#Residual covaration
logArrTons ~~ logPlckRecruits
'
mod.18.fit <- sem(mod.18, data=CPrD3)
summary(mod.18.fit, stand=T, rsq=T)
# Model is now significant
lavaan (0.5-20) converged normally after 25 iterations
Number of observations 13
Estimator ML
Minimum Function Test Statistic 10.686
Degrees of freedom 9
P-value (Chi-square) 0.298
Regressions:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons ~
logArrAdult 0.997 0.070 14.273 0.000 0.997 0.935
logPlckTons 0.255 0.070 3.653 0.000 0.255 0.239
logArrAdult ~
logEuphausiids 0.331 0.218 1.518 0.129 0.331 0.331
NPGO -0.625 0.243 -2.569 0.010 -0.625 -0.625
WTemp -0.415 0.247 -1.682 0.093 -0.415 -0.415
Covariances:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons ~~
logPlckRecruts 0.355 0.153 2.327 0.020 0.355 0.845
Variances:
Estimate Std.Err Z-value P(>|z|) Std.lv Std.all
logArrTons 0.191 0.075 2.550 0.011 0.191 0.183
logArrAdult 0.539 0.212 2.550 0.011 0.539 0.584
logPlckRecruts 0.923 0.362 2.550 0.011 0.923 1.000
R-Square:
Estimate
logArrTons 0.817
logArrAdult 0.416
# Chi Square Difference Test *** ONLY for nested models! *** (the following are nested)
# Compare model fits:
anova(mod.16.fit, mod.17.fit, mod.18.fit)
Chi Square Difference Test
Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
mod.17.fit 4 190.89 194.84 9.9609
mod.18.fit 9 214.65 219.73 10.6863 0.7254 5 0.9815430
mod.16.fit 11 232.51 238.72 26.7103 16.0239 2 0.0003315 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Warning message:
In lavTestLRT(object = <S4 object of class "lavaan">, SB.classic = TRUE, :
lavaan WARNING: some models are based on a different set of observed variables
aictab(list(mod.7.fit, mod.9.fit, mod.10.fit, mod.11.fit, mod.12.fit, mod.13.fit, mod.14.fit), second.ord=T, sort=T)
# now calculate and plot residuals ...
|
9093c237faaa65d84b9f02ac879e3c562f5b81c6
|
37493450800f7ca3c3a9454fd546e0c62d1e2302
|
/src/CleanPredict.R
|
175d700aabcf265e746df072efaa691b1986e617
|
[
"MIT"
] |
permissive
|
FoolBridgeRectifier/Predict-Future-Sales
|
eafe0686df4d78e873ce9c42ea9a27fdb8c09f1c
|
baa39826fc1baab9946149ad4370fadef85a05f4
|
refs/heads/master
| 2021-02-25T19:25:23.966144
| 2020-03-06T16:01:38
| 2020-03-06T16:01:38
| 245,461,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,950
|
r
|
CleanPredict.R
|
library(ggplot2)
library(dplyr)
# rm(list = ls())
shop = read.csv("shops.csv")
item = read.csv("items.csv")
sales = read.csv("sales_train_v2.csv")
cat = read.csv("item_categories.csv")
# Merging Datasets
out=merge(sales,item,by="item_id")
out=select(out,-c(item_name))
# Eliminating Missing Values
out=out[out$item_cnt_day>=0,]
out=out[out$item_price!=-1,]
out2=out
# Eliminating Extreme Outliers.
out=out[out$item_cnt_day<200,]
# Plotting Outliers
plot(out$item_cnt_day,out$date_block_num)
plot(out2$item_cnt_day,out2$date_block_num)
# Getting Date and Day values
out$date=as.character(out$date)
month = substr(out$date, start = 4, stop = 5)
month=as.numeric(month)
out=cbind(out,month)
# Aggregating per month
outmon=aggregate(out$item_cnt_day, list(out$item_id,out$shop_id,out$date_block_num,out$item_category_id,out$month,out$item_price), sum)
colnames(outmon)=c("item_id","shop_id","date_block_num","item_category_id","month","item_price","item_cnt_month")
train=outmon[outmon$date_block_num!=33,]
test=outmon[outmon$date_block_num==33,]
outmonstor=outmon
# Creating Features
# Average according to respective features
outmon=outmonstor
itemavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_id),mean)
colnames(itemavg)=c("item_id","itemavg")
outmon=merge(outmon,itemavg,by="item_id")
len=length(unique(out$item_id))
monavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$month),sum)
colnames(monavg)=c("month","monavg")
monavg$monavg=monavg$monavg/len
outmon=merge(outmon,monavg,by="month")
len=length(unique(out$item_id))
shopavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$shop_id),sum)
colnames(shopavg)=c("shop_id","shopavg")
shopavg$shopavg=shopavg$shopavg/len
outmon=merge(outmon,shopavg,by="shop_id")
len=length(unique(out$item_id))
costavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_price),sum)
colnames(costavg)=c("item_price","costavg")
costavg$costavg=costavg$costavg/len
outmon=merge(outmon,costavg,by="item_price")
catavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_category_id),sum)
colnames(catavg)=c("item_category_id","catavg")
for(i in nrow(catavg)){
catavg$catavg=catavg$catavg/length(unique(out[out$item_category_id==catavg[i,]$item_category_id,]$item_id))
}
outmon=merge(outmon,catavg,by="item_category_id")
# Decide the order based on above averages
itemavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_id),mean)
colnames(itemavg)=c("item_id","itemord")
itemavg=itemavg[order(itemavg$itemord) , ]
itemavg$itemord=c(1:nrow(itemavg))
outmon=merge(outmon,itemavg,by="item_id")
len=length(unique(out$item_id))
monavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$month),sum)
colnames(monavg)=c("month","monord")
monavg$monord=monavg$monord/len
monavg=monavg[order(monavg$monord) , ]
monavg$monord=c(1:nrow(monavg))
outmon=merge(outmon,monavg,by="month")
len=length(unique(out$item_id))
shopavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$shop_id),sum)
colnames(shopavg)=c("shop_id","shopord")
shopavg$shopord=shopavg$shopord/len
shopavg=shopavg[order(shopavg$shopord) , ]
shopavg$shopord=c(1:nrow(shopavg))
outmon=merge(outmon,shopavg,by="shop_id")
len=length(unique(out$item_id))
costavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_price),sum)
colnames(costavg)=c("item_price","costord")
costavg$costord=costavg$costord/len
costavg=costavg[order(costavg$costord) , ]
costavg$costord=c(1:nrow(costavg))
outmon=merge(outmon,costavg,by="item_price")
catavg = aggregate(outmon[outmon$date_block_num!=33,]$item_cnt_month,list(outmon[outmon$date_block_num!=33,]$item_category_id),sum)
colnames(catavg)=c("item_category_id","catord")
for(i in nrow(catavg)){
catavg$catord=catavg$catord/length(unique(out[out$item_category_id==catavg[i,]$item_category_id,]$item_id))
}
catavg=catavg[order(catavg$catord) , ]
catavg$catord=c(1:nrow(catavg))
outmon=merge(outmon,catavg,by="item_category_id")
# Normalizing values
outnorm=outmon
outnorm$count_1=as.integer(outnorm$item_cnt_month%%10) # Seperated to 100s, 10s and 1s for easier classification
outnorm$count_10=as.integer(outnorm$item_cnt_month/10)%%10
outnorm$count_100=as.integer(outnorm$item_cnt_month/100)
outnorm$shopavg=(outnorm$shopavg*100000)/max(outnorm$shopavg)
outnorm$catavg=(outnorm$catavg*100000)/max(outnorm$catavg)
outnorm$monavg=(outnorm$monavg*100000)/max(outnorm$monavg)
outnorm$costavg=(outnorm$costavg*100000)/max(outnorm$costavg)
outnorm$itemavg=(outnorm$itemavg*100000)/max(outnorm$itemavg)
trainnorm=outnorm[outnorm$date_block_num!=33,]
testnorm=outnorm[outnorm$date_block_num==33,]
testnormstor=testnorm
# Model 1 - Random Forest
library(randomForest)
require(caTools)
r1=proc.time()
dectrain=trainnorm[trainnorm$date_block_num>30,] # For best results we took from month 30 to 32
dectrain=select(dectrain,-c(item_cnt_month,count_1,count_100,itemord,catord,monord,shopord,costord,item_category_id,item_price,shop_id,month,item_id,date_block_num))
rf2 <- randomForest(
count_10 ~ .,
data=dectrain
)
dectrain=trainnorm[trainnorm$date_block_num>30,]
dectrain=select(dectrain,-c(item_cnt_month,count_1,count_10,itemord,catord,monord,shopord,costord,item_category_id,item_price,shop_id,month,item_id,date_block_num))
rf3 <- randomForest(
count_100 ~ .,
data=dectrain
)
dectrain=trainnorm[trainnorm$date_block_num>30,]
dectrain=select(dectrain,-c(item_cnt_month,count_10,count_100,count_100,itemord,catord,monord,shopord,costord,item_category_id,item_price,shop_id,month,item_id,date_block_num))
rf5 <- randomForest(
count_1 ~ .,
data=dectrain
)
r2=proc.time()
output=testnorm
dectest=testnorm
dectest=select(dectest,-c(item_cnt_month,count_1,count_10,count_100,item_category_id,item_price,shop_id,month,item_id,date_block_num))
pred100 = round(predict(rf3, newdata=dectest))
output=cbind(output,pred100)
dectest=testnorm
dectest=select(dectest,-c(item_cnt_month,count_1,count_10,count_100,item_category_id,item_price,shop_id,month,item_id,date_block_num))
pred10 = round(predict(rf2, newdata=dectest))
output=cbind(output,pred10)
dectest=testnorm
dectest=select(dectest,-c(item_cnt_month,count_1,count_10,count_100,item_category_id,item_price,shop_id,month,item_id,date_block_num))
pred1 = round(predict(rf5, newdata=dectest))
output=cbind(output,pred1)
output$pred100=output$pred100*100
output$pred10=output$pred10*10
pred=output$pred1+output$pred10+output$pred100
output=cbind(output,pred)
# random forest output
x=aggregate(output$item_cnt_month, list(output$item_id,output$shop_id), sum)
y=aggregate(output$pred, list(output$item_id,output$shop_id), sum)
plot(x[,3],type = 'l',col="red",xlab="item_id",ylab="count",main="Random Forest Actual Output")
plot(y[,3],type = 'l',col="blue",xlab="item_id",ylab="count",main="Random Forest Predicted Output")
# MSE and RMSE
mse1=x[,3]-y[,3]
derr=mse1
dmeanerr=mse1
mse1=mse1^2
len=length(mse1)
dmeanerr=sum(dmeanerr)/len
mse1=sum(mse1)/len
rmse=sqrt(mse1)
# Model 2 - KNN
library(class)
rmsestor=10
r3=proc.time()
month=22 # For best results we took from month 22 to 32
knnout=outnorm
knntrain=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,13:17]
knncl1=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,18]
knntest=knnout[knnout$date_block_num==33,13:17]
kpred1=knn(knntrain,knntest,cl=knncl1,k=length(unique(knncl1)))
knntrain=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,13:17]
knncl10=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,19]
knntest=knnout[knnout$date_block_num==33,13:17]
kpred10=knn(knntrain,knntest,cl=knncl10,k=length(unique(knncl10)))
knntrain=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,13:17]
knncl100=knnout[knnout$date_block_num!=33&knnout$date_block_num>month,20]
knntest=knnout[knnout$date_block_num==33,13:17]
kpred100=knn(knntrain,knntest,cl=knncl100,k=length(unique(knncl100)))
output=knnout[knnout$date_block_num==33,]
kpred=as.numeric(as.character(kpred1))+(as.numeric(as.character(kpred10))*10)+(as.numeric(as.character(kpred100))*100)
koutput=cbind(output,kpred1,kpred10,pred100,kpred)
r4=proc.time()
# Output KNN
kx=aggregate(koutput$kpred, list(koutput$item_id,koutput$shop_id), sum)
ky=aggregate(koutput$item_cnt_month, list(koutput$item_id,koutput$shop_id), sum)
plot(ky[,3],type = 'l',col="red",xlab="item_id",ylab="count",main="KNN Actual Output")
plot(kx[,3],type = 'l',col="blue",xlab="item_id",ylab="count",main="KNN Predicted Output")
# MSE and RMSE
kmse=kx[,3]-ky[,3]
kerr=kmse
kmeanerr=kmse
kmse=kmse^2
klen=length(kmse)
kmeanerr=sum(kmeanerr)/klen
kmse=sum(kmse)/klen
krmse=sqrt(kmse)
# Random Forest MSE
print(mse1)
# KNN MSE
print(kmse)
# Random Forest RMSE
print(rmse)
# KNN RMSE
print(krmse)
# Random Forest Time taken in seconds
print((r2-r1)[3])
# KNN Time taken in seconds
print((r4-r3)[3])
# T value
var=sum(((kerr-derr)-kmeanerr-dmeanerr)^2)/klen
t = (dmeanerr-kmeanerr)/sqrt((var)/klen)
print(t)
# T table value for 394 dof and 95 percent is 1.984
# by table p value is 1.645 for 90 percent
# 1.282 for 80 percent
t.test(kerr, derr, paired = TRUE, alternative = "two.sided",conf.level = 0.95,var.equal = TRUE)
# by R t.test p is 2.2e-16
# T value is greater
# Hence we ignore null hypothesis
# MODEL 1 and 2 are significantly different
|
cd7077be2b075c4a78cf4edff30bfb45e8910302
|
6100369c1e847c6cc16875ab919e8e22868b1094
|
/man/NumWordsRulesExtractor.Rd
|
3cfbb9b439f9b1da1c1111a1a5733b4d02448eb4
|
[
"Apache-2.0"
] |
permissive
|
mannau/boilerpipeR
|
47956f44d470c6dbb574530c4f0fe4d81daa8aea
|
5cbc092cac7717a776bef1b54d3021959c4bcc7b
|
refs/heads/master
| 2021-06-04T04:48:57.487486
| 2021-05-19T09:05:01
| 2021-05-19T09:05:01
| 15,524,787
| 22
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 744
|
rd
|
NumWordsRulesExtractor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Extractor.R
\name{NumWordsRulesExtractor}
\alias{NumWordsRulesExtractor}
\title{A quite generic full-text extractor solely based upon the number of words per block (the current, the previous and the next block).}
\usage{
NumWordsRulesExtractor(content, ...)
}
\arguments{
\item{content}{Text content as character}
\item{...}{additional parameters}
}
\value{
extracted text as character
}
\description{
A quite generic full-text extractor solely based upon the number of words per block (the current, the previous and the next block).
}
\examples{
data(content)
extract <- NumWordsRulesExtractor(content)
}
\seealso{
\code{\link{Extractor}}
}
\author{
Mario Annau
}
|
1f0beb5c51765097747c2e6c3108dd05910cfaa1
|
78e288ddbd2db83437def953c09765401751746e
|
/results_visualisation/rucs_align.R
|
072cbb9d52af2f6c893093aab4bba1a3e400969c
|
[] |
no_license
|
karelbilek/CzeRusT
|
d09ddb09ca550695e2276148bd6aee2af967398b
|
9a62772802d183695e6b58cb4d2a01ed31beba90
|
refs/heads/master
| 2021-05-28T02:29:53.827487
| 2014-05-24T15:56:59
| 2014-05-24T15:56:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
rucs_align.R
|
png("rucs_align.png")
BLEU<-c(12.23, 12.25, 12.34, 12.3, 12.64, 12.23)
OOV<-c(1.8, 1.4, 1.6, 1.7, 1.8, 1.9)
natpisi<-c("lemma", "stem3", "stem4", "stem5", "stem6", "stem7")
plot(BLEU,OOV, xlim=c(12,13), ylim=c(1,2), ylab="OOV (in percents)")
text(BLEU, OOV, natpisi, pos=4)
dev.off()
|
f2f5c771222bba0fa936b50c43c3aafdb3cbf3f4
|
f1190d86024c592454befe549429123891445749
|
/Plants/Curves_Light_ACi_MesoExpt2013.R
|
56312f9df2ed5396feace224a534d47b629d4386
|
[] |
no_license
|
reblake/MesoExpt2013
|
2a3f0b9782a7eb351edf939630dc4d03e2d32ddd
|
a3e03c27ae805b4e4a68ee884623e9c695bc371e
|
refs/heads/master
| 2022-06-13T01:06:39.835122
| 2022-06-07T19:08:14
| 2022-06-07T19:08:14
| 43,384,020
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
Curves_Light_ACi_MesoExpt2013.R
|
######################################################
### Light and A-Ci Curves
### Meso Expt 2013
### Script by Rachael Blake, May 2016
######################################################
# Load libraries
library(plyr) ; library(ggplot2) ; library(dplyr) ; library(grid) ; library(scales)
library(car) ; library(gridExtra)
curves <- read.csv("C:/Users/rblake/Documents/LSU/MesoExp_2013/LICOR_Files_Meso_Expt/LICOR_CURVES_MesoExpt 2013.csv", header=TRUE)
head(curves) ; str(curves)
a_ci <- curves %>%
filter(Curve_type == "A-Ci",
Bucket.Number %in% c(67,6,70,38,10,73,84,86,77,37))
ggplot(data=a_ci, aes(x=Ci, y=Photo, group=Bucket.Number, color=Bucket.Number)) +
geom_line(size=1)
lite <- curves %>%
filter(Curve_type == "Light",
Bucket.Number %in% c(6,74,24,73,21,76))
ggplot(data=lite, aes(x=Ci, y=Photo, group=Bucket.Number, color=Bucket.Number)) +
geom_line(size=1)
|
ec84fde121bf0e7ccc587cd128a24686bf80eefe
|
16efe6aebcbf49e0baccf17439f25a2236b3c397
|
/temp/r-shiny-sports-analytics-app-master/server_fourfactors.R
|
6da237516415ee196a7e635fd104b93c9f11f479
|
[] |
no_license
|
yourfriend-gaurav-gurjar/nba-datavis
|
794fb64400c8b7d9fd71fa33979bde3c9fb6c10c
|
d120a7da08b4cf0b2ead677893f40b1ac484aa6a
|
refs/heads/main
| 2023-01-22T07:13:11.729303
| 2020-12-05T05:12:07
| 2020-12-05T05:12:07
| 318,691,495
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,068
|
r
|
server_fourfactors.R
|
## ======================================================== ##
# Copyright (C) 2017 Nicholas Canova
#
# This launches the server side code for the
# player xy charts tool
## ======================================================== ##
# the logo
output$ff.logo <- renderImage({
# When input$n is 1, filename is ./images/image1.jpeg
filename <- 'Images/teamlogos.jpg'
this.width <- session$clientData$output_sc.logo_width
this.height <- this.width / (1365 / 1024) # the file ratio
# Return a list containing the filename
list(src = filename,
width = this.width,
height = this.height)
}, deleteFile = FALSE)
# the ghostplot
output$ff.ghostplot <- renderPlot({
df <- data.frame()
ggplot(df) + geom_point() + xlim(0, 10) + ylim(0, 100)
})
# the reactive UI based on singleteam or not
output$ff.teamid <- renderUI({
if(input$ff.singleteamindicator.input == TRUE) {
selectizeInput("ff.team.input", label = "Select Team:", choices = ff.teams,
selected = 'GSW', multiple = FALSE)
} else{
return(NULL)
}
})
output$ff.teamids <- renderUI({
if(input$ff.singleteamindicator.input == FALSE) {
selectizeInput("ff.teams.input", label = "Select Team(s):", choices = ff.teams,
selected = 'GSW', multiple = TRUE, options = list(maxItems = 4))
} else{
return(NULL)
}
})
# the four factors chart
output$fourfactors <- renderPlotly({
# set the input values
req(input$ff.singleteamindicator.input); # req(input$ff.checkbox)
single_team_evaluation <- input$ff.singleteamindicator.input
window.width <- session$clientData$output_ff.ghostplot_width
show.labels <- input$ff.checkbox
print(input$ff.checkbox)
print(show.labels)
if(single_team_evaluation == TRUE) {
req(input$ff.team.input)
team.ids <- input$ff.team.input
} else {
req(input$ff.teams.input)
team.ids <- input$ff.teams.input
}
# call the chart
drawFourFactorsChart(team.stats, team.ids, single_team_evaluation, show.labels, color.pal.df, window.width)
})
|
2d3aac638e6b87fcf037ad8baeabf059339d49d9
|
a95598a8ee7fd6159ac4fa926ee877ce31138793
|
/r/V2_data_cleaning/00_run_all_data_cleaning.R
|
cbde29cd4d9536cbf22d11dd38d0bab2c20f3546
|
[
"CC0-1.0"
] |
permissive
|
amygimma/comix_shared_analyses
|
4b2a72bac3a4531843a1c5ab7e9cfaec97d96579
|
9c1ba7d2d0e8a132d006f1d16d95051303b8099b
|
refs/heads/master
| 2023-04-25T23:54:24.347367
| 2021-05-10T10:38:47
| 2021-05-10T10:38:47
| 287,931,327
| 0
| 2
|
CC0-1.0
| 2021-05-10T10:35:10
| 2020-08-16T11:35:13
|
R
|
UTF-8
|
R
| false
| false
| 2,437
|
r
|
00_run_all_data_cleaning.R
|
library(data.table)
if(file.exists("r/user_setup.R")) source("r/user_setup.R")
data_path <- "data"
if (!is.null(USER_DATA_PATH)) data_path <- USER_DATA_PATH
# =================
# =================
# CLEAN & SAVE
# =================
# =================
# OPTIONAL: SAVING TO FILR MAY THROW ERRORS IF CONNECTION IS SLOW, MANUALLY MOVE TO FILR
SAVE_LOCAL <- TRUE
waves_list <- list(
list(panel_ = "panel_e", wave_ = "wave_1", spss_ref_ = "PEW1"),
list(panel_ = "panel_e", wave_ = "wave_2", spss_ref_ = "PEW2"),
list(panel_ = "panel_e", wave_ = "wave_3", spss_ref_ = "PEW3"),
list(panel_ = "panel_e", wave_ = "wave_4", spss_ref_ = "PEW4"),
list(panel_ = "panel_e", wave_ = "wave_5", spss_ref_ = "PEW5"),
list(panel_ = "panel_e", wave_ = "wave_6", spss_ref_ = "PEW6"),
list(panel_ = "panel_e", wave_ = "wave_7", spss_ref_ = "PEW7"),
list(panel_ = "panel_e", wave_ = "wave_8", spss_ref_ = "PEW8"),
list(panel_ = "panel_f", wave_ = "wave_1", spss_ref_ = "PFW1"),
list(panel_ = "panel_f", wave_ = "wave_2", spss_ref_ = "PFW2"),
list(panel_ = "panel_f", wave_ = "wave_3", spss_ref_ = "PFW3"),
list(panel_ = "panel_f", wave_ = "wave_4", spss_ref_ = "PFW4"),
list(panel_ = "panel_f", wave_ = "wave_5", spss_ref_ = "PFW5"),
list(panel_ = "panel_f", wave_ = "wave_6", spss_ref_ = "PFW6"),
list(panel_ = "panel_f", wave_ = "wave_7", spss_ref_ = "PFW7")
list(panel_ = "panel_f", wave_ = "wave_1", spss_ref_ = "PFW1"),
list(panel_ = "panel_f", wave_ = "wave_2", spss_ref_ = "PFW2"),
list(panel_ = "panel_f", wave_ = "wave_3", spss_ref_ = "PFW3"),
list(panel_ = "panel_f", wave_ = "wave_4", spss_ref_ = "PFW4"),
list(panel_ = "panel_f", wave_ = "wave_5", spss_ref_ = "PFW5"),
list(panel_ = "panel_f", wave_ = "wave_5", spss_ref_ = "PFW5")
)
scripts_path <- file.path("r", "V2_data_cleaning")
country_code_ <- "uk"
CLEANING_SCRIPT <- T
SAVE_LOCAL <- T
for (wave_list in waves_list) {
panel_ <- wave_list$panel_
wave_ <- wave_list$wave_
spss_ref_ <- wave_list$spss_ref_
message(spss_ref_)
source(file.path(scripts_path, "dm01_rename_spss.R"))
#Clean adult data
message("Cleaning adult data")
panel_ <- wave_list$panel_
source(file.path(scripts_path, "dm02_data_clean.R"))
# Clean child data
message("Cleaning child data")
panel_ <- paste0(panel_, "c")
source(file.path(scripts_path, "dm02_data_clean.R"))
}
source(file.path("r", "V2_data_cleaning", "dm04_combine_survey_files.R"))
|
3a5e84b763edac62dd633af0ea6fc55be2801cbf
|
1348830c4ac089d25b841bbb87283937d6362769
|
/MDS 570 - 2019 Fall A/code snippets.R
|
390ed4cb4e805375c025a4457fb9373e4a8fd5ba
|
[] |
no_license
|
megancusey/DataScienceCoursework
|
1a14938bc2070f21c64af44e92881e1ebe3e9c60
|
77b0799055e2d09f3fa98a318b52b96f27f5744c
|
refs/heads/master
| 2020-08-26T22:43:12.674912
| 2020-06-15T02:49:39
| 2020-06-15T02:49:39
| 217,170,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
code snippets.R
|
## Received temp directory error below indicating the tmp directory was deleted
## fix: dir.create(tempdir())
## remove scientic notation
## options(scipen = 999)
## remove variables/clear workspace:
## rm(list = ls())
|
8ee2e8d43504fcdf2bb41d2dcfe659ea8d650e6b
|
9c2296c877a283325c3998f4b4574cf7574c0512
|
/test_multcomp.R
|
fa1f6ce598baf9cc50bc6db920aa085242f40831
|
[] |
no_license
|
gosianow/drimseq_package_devel_tests
|
e7bee32bc8111d295f4e3c8598aad98cf1a9a178
|
c555beaf708f38f984e015e6bf53114ce129481f
|
refs/heads/master
| 2021-01-18T15:08:32.488731
| 2017-07-18T13:04:03
| 2017-07-18T13:04:03
| 56,255,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
test_multcomp.R
|
library(multcomp)
hsb2 <- read.csv("http://www.ats.ucla.edu/stat/data/hsb2.csv")
hsb2$ses <- factor(hsb2$ses)
hsb2$female <- factor(hsb2$female)
m1 <- lm(read ~ socst + ses * female, data = hsb2)
summary(m1)
model.matrix(read ~ socst + ses * female, data = hsb2)
# difference between ses = 2 and ses =3 when female = 0
K <- matrix(c(0, 0, 1, -1, 0, 0, 0), 1)
t <- glht(m1, linfct = K)
summary(t)
# difference between ses = 2 and ses =3 when female = 1
K <- matrix(c(0, 0, 1, -1, 0, 1, -1), 1)
t <- glht(m1, linfct = K)
summary(t)
|
f7afff7d29604aba7fa245a29cc41d37b7ceba77
|
ef6d2589cafddaad55752dbfd0b1a5d75f875b86
|
/man/watersupply.Rd
|
9a14812b7250978a6b1076fd946796accf91ebe8
|
[] |
no_license
|
lauraing/climatechangeimpacts
|
33a47f4ea1e2efa01c937c7a988f4528b601ea2c
|
fd0cf528e27a48b87941974c066be2fa396dd217
|
refs/heads/master
| 2021-03-13T04:44:51.136307
| 2020-03-23T20:12:47
| 2020-03-23T20:12:47
| 246,639,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,088
|
rd
|
watersupply.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/watersupply.R
\name{watersupply}
\alias{watersupply}
\title{watersupply}
\usage{
watersupply(
data = "",
years = 1952:2018,
area = 100,
total_depth = 200,
start_depth = 100
)
}
\arguments{
\item{data}{data frame with columns year, month, day, daily_rain}
\item{years}{the years in the data frame you want to specify for the time period (default is 1952 to 2018)}
\item{area}{the surface area of the reservoir}
\item{total_depth}{the total depth of the reservoir}
\item{start_depth}{the depth at the beginning of the year range in years}
}
\value{
Returns a list containing,
\describe{
\item{end_vol}{Volume of water in reservoir at the end of the time period (square feet)}
\item{pct_full}{Percent of reservoir volume occupied at the end of the time period}
\item{pct_change}{Percent change in reservoir volume occupied over the time period}
}
}
\description{
Calculate annual total precipitation each year for UCSB during a specified time period.
}
\author{
Laura Ingulsrud & Keene Morrow
}
|
c6137167375f299a788f3ce85d02508a66bb9da0
|
438a5ea1725e733c9fedde81ad0c039eb82843e1
|
/App.R
|
496d45ec8245f70fb6dfcb5c4150f46c5fba73de
|
[] |
no_license
|
mefacer/simba2
|
4f2e2d9c67ec8383ea32f35e60948e25662c2e68
|
c97a719547a32d91fc169838242a08da82df7c5f
|
refs/heads/master
| 2020-05-02T14:47:57.386290
| 2020-04-15T09:48:08
| 2020-04-15T09:48:08
| 178,021,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
App.R
|
#Aixo es per treballar desde el Rstudio
source('global.R') # Script per obrir tots els paquets necesaris
source("ui.R") # User Interface
source("server.R") #Server
options(warn=-1)
shinyApp(ui = ui, server = server, options = c(launch.browser=T))
|
519be97bbae93836a927d89ce731a8af68e93e84
|
210882441930a37964e124a71f71ae4636cf2ad9
|
/plot3.R
|
f74d42ea43577d0c0daf49a46405c0bf78e82e79
|
[] |
no_license
|
erwinetorres/Exploratory_data_analysis_Project-2
|
c0c84267b9c3635544ad6d901f1763dca61aec15
|
9fededecbc0f4b423a5c39714121f4ac1f165131
|
refs/heads/master
| 2021-09-10T05:32:20.518217
| 2018-03-21T05:40:14
| 2018-03-21T05:40:14
| 126,127,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,505
|
r
|
plot3.R
|
# PLOT 3. Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City? Which have seen increases in emissions from 1999–2008? Use the ggplot2 plotting system to make a plot answer this question.
## Preliminaries. Set working directory. Make sure it contains the two .rds files.
getwd()
setwd("C:/Users/Acer/Documents/DATA SCIENCE_DOST and COURSERA/Exploratory Data Analysis/Project 2")
## Download PM2.5 and classification coad data set into R using the readRDS function.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset emissions of Baltimore City and aggregate the sum by year
Baltimore_NEI <- NEI[NEI$fips=="24510",]
Agg_Baltimore_NEI <- aggregate(Emissions ~ year, Baltimore_NEI,sum)
# Generate a base plot in the working directory
png("plot3.png",width=600,height=500,units="px")
# Subset emissions of Baltimore City by type of emission.
Baltimore_Source <- aggregate(Baltimore_NEI[c("Emissions")],
list(type = Baltimore_NEI$type, year = Baltimore_NEI$year), sum)
# Generate line graph using the ggplot2 plotting syistem.
library(ggplot2)
qplot(year, Emissions, data = Baltimore_Source,
color = type, geom= "line")+
ggtitle("Figure 3. Total PM2.5 emissions in Baltimore City by source, 1999 - 2008.") +
xlab("Year") + ylab("PM2.5 emissions (Tons)")
dev.off()
# End
|
22de074ba2ef28f2d083f20ea4cbb15e94cecae1
|
031954f00cb6a0c0761d3f3040711350524d8fbf
|
/man/getTargetAnnotationStats-methods.Rd
|
1d587d7c7e1e718556c364f14eabc36c7318e836
|
[] |
no_license
|
katwre/genomation
|
881c4f4fa716f0f9befc31181a69ddb831332d47
|
d18cad2e4e45d097631ff843d4927bfaf99b6f64
|
refs/heads/master
| 2021-01-18T17:14:41.553781
| 2018-01-30T16:35:52
| 2018-01-30T16:35:52
| 33,183,415
| 2
| 0
| null | 2015-03-31T12:11:28
| 2015-03-31T12:11:27
| null |
UTF-8
|
R
| false
| true
| 1,648
|
rd
|
getTargetAnnotationStats-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readAnnotate.R
\docType{methods}
\name{getTargetAnnotationStats}
\alias{getTargetAnnotationStats}
\alias{getTargetAnnotationStats,AnnotationByFeature-method}
\title{Get the percentage of target features overlapping with annotation from AnnotationByFeature}
\usage{
getTargetAnnotationStats(x,percentage=TRUE,precedence=TRUE)
\S4method{getTargetAnnotationStats}{AnnotationByFeature}(x, percentage = TRUE,
precedence = TRUE)
}
\arguments{
\item{x}{a \code{AnnotationByFeature} object}
\item{percentage}{TRUE|FALSE. If TRUE percentage of target
features will be returned. If FALSE,
number of target features will be returned}
\item{precedence}{TRUE|FALSE. If TRUE there will be a hierachy of annotation
features when calculating numbers
(with promoter>exon>intron precedence)
That means if a feature overlaps with a promoter it will be counted as
promoter overlapping only, or if it is overlapping with a an exon
but not a promoter, #' it will be counted as exon overlapping only whether or
not it overlaps with an intron.}
}
\value{
a vector of percentages or counts showing quantity of target features
overlapping with annotation
}
\description{
This function retrieves percentage/number of target features
overlapping with annotation
}
\examples{
data(cage)
bed.file=system.file("extdata/chr21.refseq.hg19.bed", package = "genomation")
gene.parts = readTranscriptFeatures(bed.file)
cage.annot=annotateWithGeneParts(cage, gene.parts, intersect.chr=TRUE)
getTargetAnnotationStats(cage.annot)
}
|
f113081c8999cc3e59132b294a9450fce13cf521
|
dde2b8dd95bb8fca23616ce7989926d9f0d2aef0
|
/man/train_test_split.Rd
|
3c9762eb5cb25faa4d2dc9535d77356865d4d2ae
|
[
"MIT"
] |
permissive
|
bioinfo00/xnnet0
|
29ca5cd464840aaffa6f7691fe5d406eadb2ddc1
|
d09d0de6fa3b1fc6361875265453bb8e2f2078c7
|
refs/heads/master
| 2022-12-12T02:59:56.067214
| 2020-09-11T17:04:07
| 2020-09-11T17:04:07
| 294,013,766
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 794
|
rd
|
train_test_split.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xnnet_functions_v0.1.R
\name{train_test_split}
\alias{train_test_split}
\title{Splitting data in training and test set}
\usage{
train_test_split(X, y, training_fraction = 0.7, rnd_seed = 666)
}
\arguments{
\item{X}{matrix with samples as rows and genes as features}
\item{y}{binary label for each sample in X}
\item{training_fraction}{fraction of data for training}
\item{rnd_seed}{random seed for reproducible results}
}
\value{
A list containing \code{(X_train, y_train, X_test, y_test)}.
}
\description{
This function splits a dataset X and corresponding
labels y into a training and test set
}
\examples{
data("GSE37250") #load Tubercolosis dataset
GSE37250_split = train_test_split(GSE37250$X, GSE37250$y)
}
|
18fa981b9ff6677ca32fbeba08e98c196c169de4
|
9bf5d376a037ea97e6e5bab796ef6bc5ec6afd86
|
/R Tutorial - #12 - Repeated measures and mixed design ANOVA.R
|
2c920c9507231bdd79a501eda26d43381e860cb0
|
[] |
no_license
|
Sergiommrr/R-Tutorial-All-Files
|
828df4a50aefecbeb423de4d73d97fc219114707
|
7694598cc2f0442f15d88b7f07fea7065fdf71e8
|
refs/heads/main
| 2023-03-20T05:26:12.832613
| 2020-12-16T02:09:33
| 2020-12-16T02:09:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,453
|
r
|
R Tutorial - #12 - Repeated measures and mixed design ANOVA.R
|
# R Tutorial - #12 - Repeated measures and mixed design ANOVA
# By: Aaron Prosser, MD MSc | Last edited: 14-Dec-2020
# The following code is open-source. Feel free to edit/share at your pleasure.
# All the data sets and R code for this tutorial series can be downloaded here:
https://github.com/Statistics4Doctors
# Part 1 - Example: Psychotherapy Trial
# Part 2 - Descriptive statistics and preliminary plot
# Part 3 - One-way repeated measures ANOVA
# Part 4 - Mixed design ANOVA
# Install these packages, if you haven't already:
install.packages("openxlsx")
install.packages("ggplot2")
install.packages("psych")
install.packages("emmeans")
install.packages("dplyr")
install.packages("nortest")
install.packages("afex")
install.packages("ggfortify")
install.packages("ggResidpanel")
# Load the packages we're using in this tutorial:
library(openxlsx)
library(ggplot2)
library(psych)
library(emmeans)
library(dplyr)
library(nortest)
library(afex)
library(ggfortify)
library(ggResidpanel)
# Disable scientific notation:
options(scipen=999, digits=4)
# Set directory:
dir <- "C:/Users/Aaron/iCloudDrive/Projects/Statistics for Doctors/R Tutorial/"
# -------- #
# Part 1 # Example: Psychotherapy Trial
# -------- #
# This is NOT real data. It is simulated data for teaching purposes.
# N = 134 in total, 67 in each intervention group
# RCT to assess the efficacy of outpatient psychotherapy vs. wait list control
# to improve psychosocial functioning (i.e., capacity for work, love, and play)
# in adult patients with chronic depression.
# Outcome measure:
# - Psychosocial functioning: 0 to 100 point scale (higher score = better functioning)
# Predictor variables:
# - Intervention: Wait list control group vs. Psychotherapy group
# - Time: baseline (t0), 1 mo (t1), 6 mo (t6), 12 mo (t12)
# Import data and create a data frame:
file <- "psychotherapy_trial.xlsx"
path <- paste(dir,file,sep="")
import <- read.xlsx(path, sheet = 1, startRow = 1)
dat <- as.data.frame(import)
head(dat, 20)
# Factor the predictors:
dat$intervention <- factor(dat$intervention, levels=c("Wait list", "Psychotherapy"),
c("Wait list", "Psychotherapy"))
dat$intervention <- relevel(dat$intervention, ref="Wait list")
dat$time <- factor(dat$time, levels=c("t0", "t1", "t6", "t12"),
labels=c("t0", "t1", "t6", "t12"))
dat$time <- relevel(dat$time, ref="t0")
attributes(dat$intervention)
attributes(dat$time)
# -------- #
# Part 2 # Descriptive statistics and preliminary plot
# -------- #
# Is the data balanced or unbalanced (i.e., same #s per level of factor or not)?
table(dat$intervention, dat$time)
# Any missing data?
isTRUE(is.na(dat))
# Descriptive statistics of all the time points and intervention groups
wl_t0 <- subset(dat$psychosocial_fx, dat$intervention=="Wait list" & dat$time=="t0")
wl_t1 <- subset(dat$psychosocial_fx, dat$intervention=="Wait list" & dat$time=="t1")
wl_t6 <- subset(dat$psychosocial_fx, dat$intervention=="Wait list" & dat$time=="t6")
wl_t12 <- subset(dat$psychosocial_fx, dat$intervention=="Wait list" & dat$time=="t12")
psy_t0 <- subset(dat$psychosocial_fx, dat$intervention=="Psychotherapy" & dat$time=="t0")
psy_t1 <- subset(dat$psychosocial_fx, dat$intervention=="Psychotherapy" & dat$time=="t1")
psy_t6 <- subset(dat$psychosocial_fx, dat$intervention=="Psychotherapy" & dat$time=="t6")
psy_t12 <- subset(dat$psychosocial_fx, dat$intervention=="Psychotherapy" & dat$time=="t12")
dat_descriptives <- data.frame(wl_t0, wl_t1, wl_t6, wl_t12,
psy_t0, psy_t1, psy_t6, psy_t12)
# Get the descriptive stats for each time point and intervention group
dat_descriptives <- as.data.frame(describe(dat_descriptives))
# Add some helpful columns to the data frame in preparation for the plot
dat_descriptives$time <- rep(c("t0", "t1", "t6", "t12"), 2)
dat_descriptives$time_num <- rep(c(0, 1, 6, 12), 2)
dat_descriptives$intervention <- c(rep(c("Wait list"), 4),
rep(c("Psychotherapy"), 4))
# Plot the group-level effects
ggplot() +
coord_cartesian(ylim=c(0,100), xlim=c(0,12)) +
scale_y_continuous(expand=c(0, 0), breaks=seq(from=0, to=100, by=10)) +
scale_x_continuous(breaks=c(0, 1, 6, 12), minor_breaks=NULL) +
ylab('Psychosocial functioning') +
xlab('Time (Months)') +
theme(legend.position="bottom") +
geom_jitter(data=dat, shape=16, size=1, width=0.2, alpha=0.4,
aes(x=time_num, y=psychosocial_fx, col=intervention)) +
geom_point(data=dat_descriptives, size=4,
position=position_dodge(0.25),
aes(x=time_num, y=mean, col=intervention)) +
geom_line(data=dat_descriptives, size=1,
aes(x=time_num, y=mean, col=intervention))
# -------- #
# Part 3 # One-way repeated measures ANOVA
# -------- #
# Select only the patients in the psychotherapy arm of the RCT
dat_psy <- subset(dat, dat$intervention=="Psychotherapy")
table(dat_psy$time)
# How to specify a repeated measures using aov_4():
# Data must be in LONG format.
# Outcome = dependent variable
# Factor A = within-subject factor
# Factor B = between-subject factor
# ID = participant ID column
# aov_4(outcome ~ A*B + (rm predictors | ID), type=3, data=my_data)
m <- aov_4(psychosocial_fx ~ time + (time|ID), type=3, data=dat_psy)
summary(m)
# Get the Sphericity-corrected degrees of freedom (df)
nice(m,
intercept=TRUE,
correction=
"GG") # Greenhouse-Geisser correction (default)
#"HF") # Huynh-Feldt correction
#"none") # No df correction
# Plot the data:
emmip(m, ~ time, CIs=TRUE,
xlab="Time (Months)",
ylab="Psychosocial functioning")
# IMPORTANT: Read the afex package PDF, p. 30, regarding why model should be set to
# "multivariate" for repeated measures data: https://cran.r-project.org/web/packages/afex/afex.pdf
# Post-hoc comparisons and estimated marginal means:
posthocs <- emmeans(m, specs=pairwise ~ time, model="multivariate",
adjust="tukey") # Tukey's HSD
#adjust="bonferroni") # Bonferroni
#adjust="fdr") # FDR (Benjamini–Hochberg) adjustment
#adjust="none") # No adjustment
posthocs$emmeans # Estimated marginal means (EMM)
posthocs$contrasts %>% summary(infer = TRUE) # Post-hoc comparisons
# Extract residuals/predicted values:
new_dat <- fitted(m, append=TRUE)
residuals_tmp <- residuals(m, append=TRUE)
new_dat$.residuals <- residuals_tmp$.residuals
head(new_dat, 20)
# Parametric test assumptions:
# Histogram of the residuals
ggplot(data=new_dat, aes(x=.residuals)) +
ylab('Frequency') +
xlab(bquote("Residuals")) +
geom_histogram(color="black", fill="white", binwidth=7)
# Diagnostic plots
# Unfortunately, at present, someone needs to develop code so we can run the
# same autoplot() function we did to get the diagnostic plots for the one-way
# and factorial ANOVAs and linear regressions:
autoplot(m$lm, which=c(1,2,3,4))
# Similarly:
plot(m$lm)
# But! We can use the resid_auxpanel() function from the ggResidpanel package
# to get a nice plot similar to the autoplot() function:
resid_auxpanel(residuals=new_dat$.residuals,
predicted=new_dat$.fitted,
smoother=TRUE, qqbands=TRUE, bins = 50,
plots=c("resid", "qq", "index", "hist"))
# If you wanted to, you could manually plot the scale-location and Cook's distance
# graphs yourself since you have all the data to calculate & plot these.
# Calculate skew and kurtosis of the residuals
describe(new_dat$.residuals, type=1)
# Statistical tests of the normality of the residuals
sf.test(new_dat$.residuals) # Shapiro-Francia test
ad.test(new_dat$.residuals) # Anderson-Darling test
lillie.test(new_dat$.residuals) # Lilliefors (Kolmogorov-Smirnov) test
# Planned contrasts
# Step 1 - Run the model
m <- aov_4(psychosocial_fx ~ time + (time|ID), type=3, data=dat_psy)
# Step 2 - Get the estimated marginal means (EMM)
EMM <- emmeans(m, specs= ~ time, model="multivariate")
# Step 3 - Define the contrasts using the Seven Rules
# Max. number of orthogonal contrasts:
k - 1 = 4 - 1 = max. of 3 contrasts
# What's the order of the group means?
attributes(dat_psy$time)
EMM
# OK, so we need to build our max. 3 orthogonal contrasts based on this order:
contr <- list(
baseline_vs_FU = c(-1, 1/3, 1/3, 1/3), # Baseline vs. any follow-up time
early_vs_late = c(0, -1, 0.5, 0.5), # Early (t1) vs. late (t6 or t12) follow-up times
t6_vs_t12 = c(0, 0, -1, 1) # t6 vs. t12 month follow-up
)
# Step 4 - Apply the contrasts to the EMM
contrast(EMM, method=contr) %>% summary(infer = TRUE)
# -------- #
# Part 4 # Mixed design ANOVA
# -------- #
m <- aov_4(psychosocial_fx ~ time*intervention + (time|ID), type=3, data=dat)
summary(m)
# Get the Sphericity-corrected degrees of freedom (df)
nice(m,
intercept=TRUE,
correction=
"GG") # Greenhouse-Geisser correction (default)
#"HF") # Huynh-Feldt correction
#"none") # No df correction
# Plot the data:
emmip(m, intervention ~ time, CIs=TRUE,
xlab="Time (Months)",
ylab="Psychosocial functioning")
# Post-hoc comparisons and estimated marginal means:
posthocs <- emmeans(m, specs=pairwise ~ time*intervention, model="multivariate",
adjust="tukey") # Tukey's HSD
#adjust="bonferroni") # Bonferroni
#adjust="fdr") # FDR (Benjamini–Hochberg) adjustment
#adjust="none") # No adjustment
posthocs$emmeans # Estimated marginal means (EMM)
posthocs$contrasts %>% summary(infer = TRUE) # Post-hoc comparisons
# Extract residuals/predicted values:
new_dat <- fitted(m, append=TRUE)
residuals_tmp <- residuals(m, append=TRUE)
new_dat$.residuals <- residuals_tmp$.residuals
head(new_dat, 20)
# Parametric test assumptions:
# Histogram of the residuals
ggplot(data=new_dat, aes(x=.residuals)) +
ylab('Frequency') +
xlab(bquote("Residuals")) +
geom_histogram(color="black", fill="white", binwidth=7)
# Diagnostic plots
resid_auxpanel(residuals=new_dat$.residuals,
predicted=new_dat$.fitted,
smoother=TRUE, qqbands=TRUE, bins = 50,
plots=c("resid", "qq", "index", "hist"))
# Calculate skew and kurtosis of the residuals
describe(new_dat$.residuals, type=1)
# Statistical tests of the normality of the residuals
sf.test(new_dat$.residuals) # Shapiro-Francia test
ad.test(new_dat$.residuals) # Anderson-Darling test
lillie.test(new_dat$.residuals) # Lilliefors (Kolmogorov-Smirnov) test
# Planned contrasts
# Step 1 - Run the model
m <- aov_4(psychosocial_fx ~ time*intervention + (time|ID), type=3, data=dat)
# Step 2 - Get the estimated marginal means (EMM)
EMM <- emmeans(m, specs= ~ time*intervention, model="multivariate")
# Step 3 - Define the contrasts using the Seven Rules
# Max. number of orthogonal contrasts:
k - 1 = (4 x 2 group means) - 1 = max. of 7 contrasts
# What's the order of the group means?
EMM
# OK, so we need to build our max. 7 orthogonal contrasts based on this order:
contr <- list(
psy_vs_wl = c(-0.25, -0.25, -0.25, -0.25, # Psychotherapy vs. wait list control conditions
0.25, 0.25, 0.25, 0.25),
psy_baseline_vs_FU = c(0, 0, 0, 0, # Psychotherapy: Baseline vs. any follow-up time
-1, 1/3, 1/3, 1/3),
wl_baseline_vs_FU = c(-1, 1/3, 1/3, 1/3, # Wait list: Baseline vs. any follow-up time
0, 0, 0, 0),
psy_vs_wl_earlyTx = c(0,-1, 0, 0, # Difference in functioning early in treatment?
0, 1, 0, 0),
psy_vs_wl_lateTx = c(0, 0, -0.5, -0.5, # Difference in functioning late in treatment?
0, 0, 0.5, 0.5),
psy_vs_wl_6moTx = c(0, 0, -1, 0, # Difference in functioning after 6 months of treatment?
0, 0, 1, 0),
psy_vs_wl_12moTx = c(0, 0, 0, -1, # Difference in functioning after 12 months of treatment?
0, 0, 0, 1)
)
# Step 4 - Apply the contrasts to the EMM
contrast(EMM, method=contr) %>% summary(infer = TRUE)
|
d0ef4048f7d8524cb60e89093b3b8ec05274c34e
|
175d4dd001f06ce44f615ae76c6b7ac35376dcf3
|
/Behavioural and CORT data scripts/CORT/Analysis_with_CORT.R
|
ca21d081481252e437a4e0a71a20aad97352ac81
|
[] |
no_license
|
LenaGschossmann/RT-qPCR-quantification-project
|
cba1386fb4130de69c124d5ec9148defd1b92ac6
|
5a02974ac4e69cc712ea9c98bb222bde11504b5c
|
refs/heads/master
| 2020-03-23T10:44:09.828322
| 2018-09-19T08:32:44
| 2018-09-19T08:32:44
| 141,459,495
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 4,639
|
r
|
Analysis_with_CORT.R
|
##### Include CORT
# source('C:/Users/Gschossmann/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/Analysis Code/PCR/qPCR_GeneExpressionAssay/getCORT.R')
source('C:/Users/lena_/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/Analysis Code/CORT/getCORT.R')
savepath = 'C:/Users/lena_/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/PCR/Mito_gene_expression/analysis/R_analysis'
############## Get CORT data
fileX = 'Cohort VR/Data Males Project 1-CORT.csv'
filepathCORT = paste('C:/Users/lena_/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/Behavioural tests', fileX, sep='/')
cort = getCORT(filepathCORT)
############## Gene Expression data: get data_corrected_tot: data.frame with final data from all genes
#...after all parts have been saved separately:
x1=read.csv(paste(savepath, 'part1_data_corrected.csv', sep='/'))
x2=read.csv(paste(savepath, 'part2_data_corrected.csv', sep='/'))
x3=read.csv(paste(savepath, 'part3_data_corrected.csv', sep='/'))
data_corrected_tot = rbind(x1, rbind(x2, x3))
############## Behavioural Data
#xxx
##############write CORT data into other data.frame
#work with all data
data_with_CORT = data_corrected_tot
for(iR in 1:nrow(data_with_CORT)){
data_with_CORT$CORT[iR] = cort$CORT[cort$ID == data_with_CORT$Animal[iR]]
}
####################################### LinRegression & Pearson Correlation
tmpGOI = unique(data_with_CORT$Gene)
LinRegGOI = data.frame(matrix(ncol=4, nrow=length(tmpGOI)))
colnames(LinRegGOI) = c('Gene', 'Slope', 'Intercept', 'Observations')
corr_coeffsGOI = data.frame(matrix(ncol=4, nrow=length(tmpGOI)))
colnames(corr_coeffsGOI) = c('Gene', 'R', 'p', 'Observations')
for(iG in 1:length(tmpGOI)){
tmpData = data_with_CORT[data_with_CORT$Gene == tmpGOI[iG],]
#Pearson
pearson_corr = rcorr(tmpData$Rel_quantity,tmpData$CORT, type='pearson')
corr_coeffsGOI$Gene[iG] = as.character(tmpGOI[iG])
corr_coeffsGOI$R[iG] = pearson_corr$r[1,2]
corr_coeffsGOI$p[iG] = round(pearson_corr$P[1,2], digits=6)
corr_coeffsGOI$Observations[iG] = nrow(tmpData)
#LinReg
mod = lm(Rel_quantity ~ CORT, data=tmpData)
LinRegGOI$Gene[iG] = as.character(tmpGOI[iG])
LinRegGOI$Slope[iG] = mod$coefficients[2]
LinRegGOI$Intercept[iG] = mod$coefficients[1]
LinRegGOI$Observations[iG] = nrow(tmpData)
}
#Output
savepath = 'C:/Users/lena_/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/PCR/Mito_gene_expression/analysis/R_analysis'
write.xlsx(LinRegGOI, paste(savepath,'LinReg_CORT_GOI.xlsx',sep='/'))
write.xlsx(corr_coeffsGOI, paste(savepath,'Corr_coefficients_CORT_GOI.xlsx',sep='/'))
#
############ SPLIT GROUPS - LinRegression & Pearson Correlation
tmpGOI = as.character(unique(data_with_CORT$Gene))
tmpGroups = as.character(unique(data_with_CORT$Group))
LinRegGOI_groups= data.frame(matrix(ncol=5, nrow=length(tmpGOI)*length(tmpGroups)))
colnames(LinRegGOI_groups) = c('Gene', 'Group', 'Slope', 'Intercept', 'Observations')
corr_coeffsGOI_groups = data.frame(matrix(ncol=5, nrow=length(tmpGOI)*length(tmpGroups)))
colnames(corr_coeffsGOI_groups) = c('Gene','Group', 'R', 'p', 'Observations')
idx=0
for(iG in tmpGOI){
for(iGr in tmpGroups){
idx=idx+1
tmpData = data_with_CORT[data_with_CORT$Gene == iG & data_with_CORT$Group == iGr,]
#Pearson
if(nrow(tmpData) < 5){
corr_coeffsGOI_groups$Gene[idx] = iG
corr_coeffsGOI_groups$Group[idx] = iGr
corr_coeffsGOI_groups$R[idx] = NA
corr_coeffsGOI_groups$p[idx] = NA
corr_coeffsGOI_groups$Observations[idx] = nrow(tmpData)
}else{
pearson_corr = rcorr(tmpData$Rel_quantity,tmpData$CORT, type='pearson')
corr_coeffsGOI_groups$Gene[idx] = iG
corr_coeffsGOI_groups$Group[idx] = iGr
corr_coeffsGOI_groups$R[idx] = pearson_corr$r[1,2]
corr_coeffsGOI_groups$p[idx] = round(pearson_corr$P[1,2], digits=6)
corr_coeffsGOI_groups$Observations[idx] = nrow(tmpData)
}
#LinReg
mod = lm(Rel_quantity ~ CORT, data=tmpData)
LinRegGOI_groups$Gene[idx] = iG
LinRegGOI_groups$Group[idx] = iGr
LinRegGOI_groups$Slope[idx] = mod$coefficients[2]
LinRegGOI_groups$Intercept[idx] = mod$coefficients[1]
LinRegGOI_groups$Observations[idx] = nrow(tmpData)
}
}
#Output
savepath = 'C:/Users/lena_/Dropbox/studies/Osnabrück/Universität/Bachelorarbeit_DroBo/experiments/PCR/Mito_gene_expression/analysis/R_analysis'
write.xlsx(LinRegGOI_groups, paste(savepath,'LinReg_CORT_GOI_groups.xlsx',sep='/'))
write.xlsx(corr_coeffsGOI_groups, paste(savepath,'Corr_coefficients_CORT_GOI_groups.xlsx',sep='/'))
#
|
bee469cea818932b4aac6c62e78d5cb13b6e6030
|
b9f566ec503bae8a4ff7de204bb51f7e10efad85
|
/data/2021/2021-09-01/ejemplo.R
|
d3cd0d769b6715c1a6e0deed0ed52844d957d7a3
|
[] |
no_license
|
BESTDATASCIENCE/manos-a-la-data
|
11b759e7f5b0c7fb160b68e071e65bf6b2f6c726
|
609c63f060ac917d827d1450ab3f2f5a5febb24b
|
refs/heads/master
| 2021-09-11T14:59:03.524628
| 2021-09-04T06:50:21
| 2021-09-04T06:50:21
| 244,521,797
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
ejemplo.R
|
library(readxl)
library(dplyr)
library(tidyr)
library(readr)
library(dplyr)
library(vroom)
library(readstata13)
library(foreign)
library(here)
options(scipen=999)
library(XML)
library(RCurl)
library(rlist)
library(htmltab)
setwd(here::here("data/2021/2021-09-01/"))
bd <- readxl::read_xlsx("2-Registro de Proveedores ME.xlsx")
|
f801b9b63753dc7c680ffb2782d2736b495b0ef5
|
80e10d3af490e953600abc36743dea3e034db595
|
/scripts/variation_coefficient.R
|
635b5dec56c3bb2c52b99b1f3922131c0ee39ca6
|
[] |
no_license
|
dmitrav/reGRiE
|
486f46203a60e88990a28a440110559bce4647d3
|
9d1ce444ba04a6caba11bf7cd2f293bff06bd6c3
|
refs/heads/master
| 2021-01-21T11:15:10.744604
| 2018-02-21T10:39:01
| 2018-02-21T10:39:01
| 83,536,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
variation_coefficient.R
|
rep_data = read.csv("/Volumes/Seagate Expansion Drive/simulations/reGRiE2/Kr_60sec_repression_on/rep_variances_related_to_mean.txt", header = F, sep = " ")
norep_data = read.csv("/Volumes/Seagate Expansion Drive/simulations/reGRiE2/Kr_60sec_repression_off/variances_related_to_mean.txt", header = F, sep = " ")
rep_data$values = sqrt(rep_data$V1) / rep_data$V2
norep_data$values = sqrt(norep_data$V1) / norep_data$V2
joint_df = data.frame(time = seq(5,60,5), values = rep_data$values, type = "С репрессией")
joint_df = rbind(joint_df, data.frame(time = seq(5,60,5), values = norep_data$values, type = "Без репрессии"))
library(ggplot2)
ggplot(joint_df, aes(x = time, y = values, colour = type)) +
geom_line() + geom_point() +
scale_x_continuous(breaks = seq(5,60,5)) +
scale_y_continuous(limits = c(0.05,0.13), breaks = seq(0.05,0.13,0.01)) +
xlab("Время, с") +
ylab("Значения коэффициента вариации") +
guides(colour = guide_legend(title = "Эксперименты:"))
|
4805fdd406240b443cd2072a3b71f18450c79398
|
83a90e05e4124f5cf85f7ba300b0f3180acfc9a3
|
/man/nn_classification_cpp.Rd
|
591f148b8fd20d7572baa15263ded63dc6fdf859
|
[
"MIT"
] |
permissive
|
kmacdon/kd-tree-rcpp
|
ca358845a4e120dc18d1616d321aba6e192b99fe
|
11d12887f653652e064918c47ffce491a0d90bc6
|
refs/heads/master
| 2021-03-02T16:01:03.090755
| 2020-03-08T20:30:21
| 2020-03-08T20:30:21
| 245,882,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 540
|
rd
|
nn_classification_cpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{nn_classification_cpp}
\alias{nn_classification_cpp}
\title{Nearest Neighbor Classification}
\usage{
nn_classification_cpp(train, test, classes)
}
\arguments{
\item{train}{A numeric matrix of training points}
\item{test}{A numeric matrix of test points}
\item{classes}{A integer vector of training point classifications}
}
\description{
Finds the closest neighbor for each point in the test matrix
and returns a vector of classifications
}
|
879b44dedb40e2a0517a0325d131625073f646f8
|
e33bcfe36f738c73163d3c197660f159eaf7e2e7
|
/R Cheatsheet/package doc/mirt/Examples/Example_02.R
|
073c72bb7de592b9434159c89426881536981431
|
[] |
no_license
|
kingmbc/TestGit
|
e6237f5f45c2109ebe83384890e49853c7bae82d
|
7a1508aa945cf6433d70efc2d7c6e4cc505ef444
|
refs/heads/master
| 2020-12-02T21:24:40.732679
| 2017-07-05T11:38:47
| 2017-07-05T11:38:47
| 96,312,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,833
|
r
|
Example_02.R
|
#----------------------------------------------------------------
# Example 2 - Multidimensional IRT, itemtypes, and fit statistics
#----------------------------------------------------------------
library(mirt)
#use SAT12 data, adopted from the TESTFACT software, which has not yet been scored.
# (treat the 8's, which are the code for missing, as incorrect for now)
help(SAT12)
head(SAT12)
key <- c(1,4,5,2,3,1,2,1,3,1,2,4,2,1,5,3,4,4,1,4,3,3,4,1,3,5,1,3,1,5,4,5)
data <- key2binary(SAT12, key = key)
head(data)
#extract 1 and 2 traits (exploratory) with 3PL models, but fix guessing parameters to .1
mod1 <- mirt(data, 1, guess = .1)
mod2 <- mirt(data, 2, guess = .1)
mod1
mod2
#compare models
anova(mod1, mod2)
#summary function provides rotating options/factor loadings metric
help('summary-method')
summary(mod1)
summary(mod2)
summary(mod2, rotate = 'varimax')
summary(mod2, suppress = .2)
# coef
coef(mod2)
coef(mod2, simplify=TRUE)
coef(mod2, simplify=TRUE, rotate = 'oblimin') #also accepts rotate option
# some 3d item plots
itemplot(mod1, 1)
itemplot(mod2, 1)
itemplot(mod2, 1, rotate = 'oblimin')
# define a confirmatory model from summary(mod2, suppress = .2)
help(mirt.model)
model <- mirt.model('F1 = 1-11,13-32
F2 = 4,7,9-15,21,23-25,27,29
COV = F1*F2')
cmod <- mirt(data, model, guess = .1, SE=TRUE)
coef(cmod)
summary(cmod)
itemplot(cmod, 1)
itemplot(cmod, 1, drop.zeros = TRUE) #drop zeros from plot, lowers dimensions
#--------------------------------------------------------------------------
# item 12, 30, and 32 appears to have very small commonalities and should be investigated.
# For diagnostic reasons we should look to see if these have been miss-scored, or if
# the distractors provide any useful information to warrant keeping them in the test.
newdata <- data
newdata[,c(12, 30, 32)] <- as.matrix(SAT12[,c(12, 30, 32)])
newdata[newdata == 8] <- NA #treat the 8's as missing data
head(newdata)
#fit a nominal model to suspicious items, and 2PL with fixed guessing elsewhere
itemtype <- rep('2PL', 32)
pick <- c(12,30,32)
itemtype[pick] <- 'nominal'
itemtype
mod <- mirt(newdata, 1, itemtype = itemtype, guess = .1)
key[pick]
coef(mod)[pick]
itemplot(mod, 12) #generally poor, categories 3 and 4 appear to be functioning the same (both correct?)
itemplot(mod, 30) #correctly scored, distractor patterns give next to no information
itemplot(mod, 32) #looks to be incorrectly scored! key was 5, but 3rd category appears correct
# all at once
plot(mod, type = 'trace', which.items = c(12,30,32))
#--------------------
# possible to use more exploratory investigation with non-parametric kernel smoothing
library(KernSmoothIRT)
help(ksIRT)
# on scored data
ks_scored <- ksIRT(data, key=1, format=1)
plot(ks_scored, item=c(1,2,12,30,32))
# on hybrid data
key2 <- key
key2[-c(12,30,32)] <- 1
ks <- ksIRT(na.omit(newdata), key=key2, format=1)
plot(ks, item=c(1,12,30,32))
# fix the key according to what mirt suggested
key2[32] <- 3
ks <- ksIRT(na.omit(newdata), key=key2, format=1)
plot(ks, item=32)
ks <- ksIRT(key2binary(SAT12, key2), key=1, format=1)
plot(ks, item=32)
# Did TESTFACT provide the wrong SAT12 key for item 32? Does item 12 may contain 2 correct answers?
# Worth checking out the original items that were administered for clues.
###########
# same result when considering multidimensional model (run if you don't believe me!)
# newmodel to let suspicious items load on both factors, for diagnostic purposes
newmodel <- mirt.model('F1 = 1-32
F2 = 4,7,9-15,21,23-25,27,29,30,32
COV = F1*F2')
mod2 <- mirt(newdata, newmodel, itemtype = itemtype, guess = .1)
coef(mod2) #again, cagetory 3 (i.e., ak2) is the highest for item 32, indicating it is the most correct
###########
#use corrected key for item 32
key[32] <- 3
data <- key2binary(SAT12, key)
newmod <- mirt(data, 1, guess = .1)
coef(newmod)[['Item.32']]
summary(newmod, suppress = .2)
itemplot(newmod, 32)
#--------------------------------------------------------------------------
## Diagnostics
#### How well do the above models fit the data?
# pretty well actually
help(M2)
M2(mod1)
M2(mod2)
fscores(mod1, method = 'EAPsum')
# item fit statistics
help(itemfit)
(ii <- itemfit(mod1))
p.adjust(ii$p.S_X2, 'fdr')
(ii <- itemfit(newmod))
p.adjust(ii$p.S_X2, 'fdr')
# S-X2 total score tables
SX2_tabs <- itemfit(newmod, S_X2.tables = TRUE)
SX2_tabs$O.org[[1]]
SX2_tabs$O[[1]]
SX2_tabs$E[[1]]
# item residual covaration
help('residuals-method')
residuals(newmod)
residuals(newmod, tables=TRUE)
mod_Rasch <- mirt(data, 1, 'Rasch')
anova(mod_Rasch, mod1) #expect a bad fit given how much the slopes vary
M2(mod_Rasch)
itemfit(mod_Rasch, method = 'ML')
itemfit(mod_Rasch, method = 'ML', empirical.plot = 2, empirical.CI = .99)
|
1fd49432fb25e7760c1cb09be0d6446ffda4c951
|
ecbdf787eba311c4629b2ce230818af98289dc46
|
/cachematrix.R
|
3f1de0373422209656d5a59e767c2b178086042b
|
[] |
no_license
|
michelle-marie/ProgrammingAssignment2
|
0d3e2b93cded837c6a8a59a1d25ee438a28afa10
|
61af4a7d58e5dcda6e8ca01fe8c31cfb179977b3
|
refs/heads/master
| 2021-01-15T23:12:15.579833
| 2015-08-23T16:21:13
| 2015-08-23T16:21:13
| 41,165,093
| 0
| 0
| null | 2015-08-21T16:07:43
| 2015-08-21T16:07:43
| null |
UTF-8
|
R
| false
| false
| 1,060
|
r
|
cachematrix.R
|
## the makeCacheMatrix function is used to create a matrix
## the cacheSolve function is used to return the inverse of that matrix
## the makeCacheMatrix function sets the value of the matrix, gets the value of the matrix, sets the value of the inverse, and gets the value of the inverse
makeCacheMatrix <- function(x = matrix) {
## main function
m <- NULL
set <- function(y) { ## set is a function that changes the matrix stored in the main function
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve)
m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...)
{
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
}
|
b9cdd97051593aa770e91ab8e484c290876407d9
|
ddb0e8f036a03a58d5db82391ff438c9c944c92e
|
/man/get_heroes.Rd
|
d768431adb2b17e7b2eaf1e3e21a43d10bde709a
|
[] |
no_license
|
rosdyana/ROpenDota
|
7e38ec628acd77285da6c9dad80999a93c4514cf
|
00c75deae31f9dbd1d9bfb5b7781b362326e6a18
|
refs/heads/master
| 2021-01-20T08:30:44.660653
| 2018-10-31T03:49:13
| 2018-10-31T03:49:13
| 90,157,568
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 303
|
rd
|
get_heroes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_heroes.R
\name{get_heroes}
\alias{get_heroes}
\title{Get total status for specific player id}
\usage{
get_heroes()
}
\description{
Get total status for specific player id
}
\examples{
\dontrun{
heroes <- get_heroes()
}
}
|
d0186f282f8425a4b377c6d7cfaca8320e48ab5d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rcdd/examples/lpcdd.Rd.R
|
9c268e727e145b6764b28e26511990d94a383490
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
lpcdd.Rd.R
|
library(rcdd)
### Name: lpcdd
### Title: linear programming with exact arithmetic
### Aliases: lpcdd
### Keywords: misc
### ** Examples
# first two rows are inequalities, second two equalities
hrep <- rbind(c("0", "0", "1", "1", "0", "0"),
c("0", "0", "0", "2", "0", "0"),
c("1", "3", "0", "-1", "0", "0"),
c("1", "9/2", "0", "0", "-1", "-1"))
a <- c("2", "3/5", "0", "0")
lpcdd(hrep, a)
# primal inconsistent problem
hrep <- rbind(c("0", "0", "1", "0"),
c("0", "0", "0", "1"),
c("0", "-2", "-1", "-1"))
a <- c("1", "1")
lpcdd(hrep, a)
# dual inconsistent problem
hrep <- rbind(c("0", "0", "1", "0"),
c("0", "0", "0", "1"))
a <- c("1", "1")
lpcdd(hrep, a, minimize = FALSE)
|
b87723608a81d285904d97108b57b05ab183e7e4
|
4a84b2982614109deb230cfb6f9513a9dc2516f1
|
/old:unusedRscripts/lecture10.R
|
2aa2c21e5bcfbc188392bda54e65cda719ae82a3
|
[] |
no_license
|
M0nica/topic-detection
|
42f29fde2804b821de71abcef58e904ee062bf11
|
f0d5c8e6c8b307d58a460608b58f19d09ea5734d
|
refs/heads/master
| 2021-01-21T01:46:58.701808
| 2016-06-09T14:28:43
| 2016-06-09T14:28:43
| 55,653,334
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
lecture10.R
|
library(ggplot2)
df <- data.frame(x = c(1:100))
df$y <- 2 + 3 * df$x + rnorm(100, sd = 40)
#data has a x and y, aes define x and y variable - layers added to plot w/ pluses
#smoothing geometry -
p <- ggplot(data = df, aes(x = x, y = y)) + geom_smooth(method = "lm", se=TRUE, color="black", formula = y ~ x) + geom_point()
p
|
29a937efeb8b229e14c58f6eca8053c732170ded
|
3486b7a9d9a2035f053c748f9d94aa6898e3e3dd
|
/PoinsonousMushrooms_RuleLearners.R
|
e0eee965d000580d1cd6618733b7496de123f691
|
[] |
no_license
|
mhlaghari/portfolio
|
0a924f1beb71e006e8fc7118f47407b49b8da336
|
a1d4338ecc20624c6bf09443dd4f1b319092336a
|
refs/heads/main
| 2023-06-28T22:18:06.365827
| 2021-07-31T15:11:33
| 2021-07-31T15:11:33
| 376,900,793
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,567
|
r
|
PoinsonousMushrooms_RuleLearners.R
|
#Identifying Poisonous Mushrooms with Rule Learners
# Each year, many people fall ill and sometimes even die
# from ingesting poisonous wild mushrooms.
# Since many mushrooms are very similar to each other
# in appearance, occasionally even experienced mushroom
# gatherers are poisoned.
# Unlike the identification of harmful plants,
# such as a poison oak or poison ivy,
# there are no clear rules like "leaves of three,
# let them be" for identifying whether a wild mushroom
# is poisonous or edible. Complicating matters, many
# traditional rules such as "poisonous mushrooms are
# brightly colored" provide dangerous or misleading
# information. If simple, clear, and consistent rules
# were available for identifying poisonous mushrooms,
# they could save the lives of foragers.
#
# Lantz, Brett. Machine Learning with R: Expert techniques for predictive modeling, 3rd Edition (p. 158). Packt Publishing. Kindle Edition.
mushrooms <- read.csv('mushrooms.csv', stringsAsFactors = TRUE)
str(mushrooms)
str(mushrooms$veil_type)
View(mushrooms)
mushrooms$veil_type <- NULL
(mushrooms$type=='edible')
mushrooms$type<- factor(mushrooms$type, levels = c('e','p'), labels = c('edible','poisonous'))
table(mushrooms$type)
install.packages("OneR")
library(OneR)
mushroom_1R <- OneR(type ~ ., data = mushrooms)
mushroom_1R
mushroom_1R_pred <- predict(mushroom_1R, mushrooms)
table(actual = mushrooms$type, predicted = mushroom_1R_pred)
library(RWeka)
install.packages('RWeka')
mushroom_jrip <- JRip(type ~ ., data=mushrooms)
mushroom_jrip
-say
|
5d96e573b4234917f6c7487b3a7a95b90a4212bd
|
be6e0a5b11fe08b1ff89468821cf121bfbe34877
|
/run_analysis.R
|
a9a938fe1db5d7b5a9f7d117e20d806c4688857c
|
[] |
no_license
|
jguerra000/Coursera-GCLD
|
b5e374106071feb6048572638bd882ee6c7ef388
|
ff2154d0ae2de62465f0992b1c2259c22edc7cb5
|
refs/heads/master
| 2016-09-06T01:21:22.230891
| 2014-08-25T09:56:13
| 2014-08-25T09:56:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,982
|
r
|
run_analysis.R
|
#Solution to the project of Getting and Cleaning data subject
start.time <- Sys.time()
#Train data
X_train_data <- read.table("./UCI HAR Dataset//train//X_train.txt")
Y_train_data <- read.table("./UCI HAR Dataset//train//y_train.txt")
Subject_train_data <- read.table("./UCI HAR Dataset/train/subject_train.txt")
Body_acc_X_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_acc_x_train.txt")
Body_acc_Y_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_acc_y_train.txt")
Body_acc_Z_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_acc_z_train.txt")
Body_gyr_X_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_gyro_x_train.txt")
Body_gyr_Y_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_gyro_y_train.txt")
Body_gyr_Z_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//body_gyro_z_train.txt")
Total_acc_X_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//total_acc_x_train.txt")
Total_acc_Y_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//total_acc_y_train.txt")
Total_acc_Z_train_data <- read.table("./UCI HAR Dataset/train/Inertial Signals//total_acc_z_train.txt")
#Test data
X_test_data <- read.table("./UCI HAR Dataset//test//X_test.txt")
Y_test_data <- read.table("./UCI HAR Dataset//test//y_test.txt")
Subject_test_data <- read.table("./UCI HAR Dataset/test/subject_test.txt")
Body_acc_X_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_acc_x_test.txt")
Body_acc_Y_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_acc_y_test.txt")
Body_acc_Z_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_acc_z_test.txt")
Body_gyr_X_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_gyro_x_test.txt")
Body_gyr_Y_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_gyro_y_test.txt")
Body_gyr_Z_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//body_gyro_z_test.txt")
Total_acc_X_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//total_acc_x_test.txt")
Total_acc_Y_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//total_acc_y_test.txt")
Total_acc_Z_test_data <- read.table("./UCI HAR Dataset/test/Inertial Signals//total_acc_z_test.txt")
tictoc <- Sys.time() - start.time
## Put all train data together
train_data <- cbind(Y_train_data, Subject_train_data, X_train_data)
## Put all test data together
test_data <- cbind(Y_test_data, Subject_test_data, X_test_data)
## Put train and test data together
all_data <- rbind (train_data, test_data)
## Select features of mean or std
features_data <- read.table("./UCI HAR Dataset//features.txt")
features_selected <- features_data[grepl( "mean()", features_data[,2]) | grepl( "std()", features_data[,2]),]
##Extract only the selected features data
all_selected_data <- all_data[,c(1,2, 2 +features_selected[,1])]
## Translate code of activities by their name
all_selected_data[(all_selected_data[,1] == 1),1] <- "WALKING"
all_selected_data[(all_selected_data[,1] == 2),1] <- "WALKING_UPSTAIRS"
all_selected_data[(all_selected_data[,1] == 3),1] <- "WALKING_DOWNSTAIRS"
all_selected_data[(all_selected_data[,1] == 4),1] <- "SITTING"
all_selected_data[(all_selected_data[,1] == 5),1] <- "STANDING"
all_selected_data[(all_selected_data[,1] == 6),1] <- "LAYING"
# Appropriately labels the data set with descriptive variable names.
# Using the names included in features.txt
colnames(all_selected_data) <- c("Activity", "Subject", as.character(features_selected[,2]))
# Creates a second, independent tidy data set with the average of each variable
# for each activity and each subject.
melted_data <- melt(all_selected_data, id = c("Activity", "Subject"))
clean_data <- dcast(melted_data, Activity + Subject ~ variable, mean)
write.table(clean_data, file = "./clean_data.txt", row.names = FALSE)
clean_data
|
3f836df2f98203c6fee019f7532307adb2af3ba3
|
6216f5a073bc205aa05206244bf4a5abcc0cdf20
|
/man/matW.Rd
|
113f088cffd82c4a9d87670762c37be254c40c13
|
[] |
no_license
|
cran/TAHMMAnnot
|
d94ee791c436d1cebb58c899566231407a4ab8ee
|
0a528c554d48d6ad0564ba5c99187a0b829ef0b7
|
refs/heads/master
| 2021-01-22T06:53:50.323937
| 2013-01-07T00:00:00
| 2013-01-07T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
rd
|
matW.Rd
|
\name{matW}
\alias{matW}
\title{Estimator of the dispersion matrix of a group k. }
\description{
Compute the product matrices for the estimator of the dispersion matrix of a group k.
}
\usage{
matW(x,k,mu)
}
\arguments{
\item{x }{The matrix (n*2) of the data. }
\item{k }{The number of the component. }
\item{mu }{The mean vector of the bidimensionnal Gaussian density. }
}
\details{
We compute only the product of matrices. It is not exactly the estimator of W because the product with tau is missing.
}
\value{
The product of matrices for the estimator of the dispersion matrix of a group k.
}
\keyword{internal}
|
ac8603d115d378358ad59776ce29aa78f128ce41
|
97f1e3e6e908a83489e4243268ba539316196176
|
/data-raw/mutli_component_image.R
|
b974986d9754385aa993f129ccf772fdfcb37e76
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsRCore
|
1c3d1da3bea84859da7d18f54c34ae13d2af8619
|
8e234fd1363c0d618f9dc21c9566f3d5464655a2
|
refs/heads/master
| 2023-05-24T23:53:30.886217
| 2023-05-22T02:52:39
| 2023-05-22T02:52:39
| 83,897,912
| 8
| 22
| null | 2023-05-22T02:52:40
| 2017-03-04T14:09:48
|
C++
|
UTF-8
|
R
| false
| false
| 502
|
r
|
mutli_component_image.R
|
library(ANTsRCore)
set.seed(1234)
dims = c(30, 30)
n = prod(dims)
r <- runif( n, min = 0, max = 255)
g = runif( n, min = 0, max = 255)
b = runif( n, min = 0, max = 255)
sums = (r + g + b)
r = floor(r/sums * 255)
g = floor(g/sums * 255)
b = floor(b/sums * 255)
dim(r) <- dims
dim(g) = dims
dim(b) = dims
r <- as.antsImage(r)
g <- as.antsImage(g)
b <- as.antsImage(b)
multi_component_image = mergeChannels( list(r,g,b) )
antsImageWrite(multi_component_image, "inst/extdata/multi_component_image.nii.gz")
|
d46106df06bca845f91d2cd2d1f456ab3fb4a5bd
|
c4fa15f5f9b5859efe33857e5cce3878b01220de
|
/Occupancy_Mod_28.05.R
|
1656ca533bfb66e0d49e96b7e823453b4da8d0d8
|
[
"MIT"
] |
permissive
|
icorei/Occup-model
|
d27537671efaba841ec8c67f7f5cc88cc940f76a
|
21841e2ea42a455c5ad2a0448e9e8b5b9655d3a8
|
refs/heads/master
| 2022-12-06T13:01:17.044665
| 2020-08-25T08:08:07
| 2020-08-25T08:08:07
| 290,153,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,367
|
r
|
Occupancy_Mod_28.05.R
|
####################### sp ###################################
setwd("D:/PHD_statistic/Occupancy")
data.dir <- "~D:/PHD_statistic/Occupancy"
EventosGranSabana_mam <- read.csv2("D:/PHD_statistic/EventosGranSabana/Eventos_GS_mambird4.csv")
camerasGS <- read.csv2("D:/PHD_statistic/TODO/Cameras_short_2019.csv")
GS <- merge(EventosGranSabana_mam,camerasGS,by.x=c("bloque","periodo","camara"),
by.y=c("bloque","period","camera"),all.y=T)
tt <- table(paste(GS$bloque,GS$periodo,GS$camara),GS$species)
head(tt)
###2# data from camera traps dividied in weeks
require(chron)
fecha1 <-chron(dates.=as.character(camerasGS[,"fecha.act"]),
times.=as.character(camerasGS[,"hora.act"]),
format = c(dates = "y-m-d", times = "h:m:s"),)
dias.mes <-cumsum(c(0,31,28,31,30,31,30,31,31,30,31,30))
dayofyear <- as.numeric(as.character(years(fecha1))) +
((dias.mes[months(fecha1)]+as.numeric(days(fecha1)))/365)
table(cut(dayofyear,seq(1,365,16)))
fecha2 <-chron(dates.=as.character(camerasGS[,"fecha.desact.real"]),
times.=as.character(camerasGS[,"hora.desact.real"]),
format = c(dates = "y-m-d", times = "h:m:s"),)
dayofyear <- dias.mes[months(fecha2)]+as.numeric(days(fecha2))
cut(dayofyear,seq(1,365,16))
periodos <- paste(rep(c(2016,2015),c(14,9)),levels(cut(dayofyear,c(seq(1,365,16),366),include.lowest=T)))
periodos <- periodos[c(15:23,1:14)]
periodo.inst <- paste(as.character(years(fecha1)),
cut(dias.mes[months(fecha1)]+as.numeric(days(fecha1)),c(seq(1,365,16),366),include.lowest=T))
table(factor(periodo.inst,levels=periodos[3:16]))
camerasGS$inst <- factor(periodo.inst,levels=periodos[3:16])
periodo.des <- paste(as.character(years(fecha2)),
cut(dias.mes[months(fecha2)]+as.numeric(days(fecha2)),c(seq(1,365,16),366),include.lowest=T))
camerasGS$des <- factor(periodo.des,levels=periodos[3:16])
table(factor(periodo.des,levels=periodos[3:16]))
EventosGranSabana_mam$cdg <- paste(EventosGranSabana_mam$bloque,EventosGranSabana_mam$periodo,EventosGranSabana_mam$camara)
EventosGranSabana_mam$fecha <- with(EventosGranSabana_mam,
chron(dates.=sprintf("%s/%s/%s",mes,dia,ano),times.=EventosGranSabana_mam$hora.ini),
format = c(dates = "m-d-Y", times = "h:m:s"))
EventosGranSabana_mam$periodo.evento <- paste(as.character(years(EventosGranSabana_mam$fecha)),
cut(dias.mes[months(EventosGranSabana_mam$fecha)]+as.numeric(days(EventosGranSabana_mam$fecha)),c(seq(1,365,16),366),include.lowest=T))
#require(chron)
fecha1 <-chron(dates.=as.character(camerasGS[,"fecha.act"]),
times.=as.character(camerasGS[,"hora.act"]),
format = c(dates = "y-m-d", times = "h:m:s"),)
dias.mes <-cumsum(c(0,31,28,31,30,31,30,31,31,30,31,30))
dayofyear <- as.numeric(as.character(years(fecha1))) +
((dias.mes[months(fecha1)]+as.numeric(days(fecha1)))/365)
table(cut(dayofyear,seq(1,365,16)))
fecha2 <-chron(dates.=as.character(camerasGS[,"fecha.desact.real"]),
times.=as.character(camerasGS[,"hora.desact.real"]),
format = c(dates = "y-m-d", times = "h:m:s"),)
dayofyear <- dias.mes[months(fecha2)]+as.numeric(days(fecha2))
cut(dayofyear,seq(1,365,16))
periodos <- paste(rep(c(2016,2015),c(14,9)),levels(cut(dayofyear,c(seq(1,365,16),366),include.lowest=T)))
periodos <- periodos[c(15:23,1:14)]
periodo.inst <- paste(as.character(years(fecha1)),
cut(dias.mes[months(fecha1)]+as.numeric(days(fecha1)),c(seq(1,365,16),366),include.lowest=T))
table(factor(periodo.inst,levels=periodos[3:16]))
camerasGS$inst <- factor(periodo.inst,levels=periodos[3:16])
periodo.des <- paste(as.character(years(fecha2)),
cut(dias.mes[months(fecha2)]+as.numeric(days(fecha2)),c(seq(1,365,16),366),include.lowest=T))
camerasGS$des <- factor(periodo.des,levels=periodos[3:16])
table(factor(periodo.des,levels=periodos[3:16]))
EventosGranSabana_mam$cdg <- paste(EventosGranSabana_mam$bloque,EventosGranSabana_mam$periodo,EventosGranSabana_mam$camara)
EventosGranSabana_mam$fecha <- with(EventosGranSabana_mam,
chron(dates.=sprintf("%s/%s/%s",mes,dia,ano),times.=EventosGranSabana_mam$hora.ini),
format = c(dates = "m-d-Y", times = "h:m:s"))
EventosGranSabana_mam$periodo.evento <- paste(as.character(years(EventosGranSabana_mam$fecha)),
cut(dias.mes[months(EventosGranSabana_mam$fecha)]+as.numeric(days(EventosGranSabana_mam$fecha)),c(seq(1,365,16),366),include.lowest=T))
############## elecion de especie
with(subset(EventosGranSabana_mam,species %in% "N.nasua"),table(cdg,periodo.evento))
slc <- subset(EventosGranSabana_mam,species %in% "N.nasua")
mtz <- matrix(NA,nrow=nrow(camerasGS),ncol=14)
for (k in 1:nrow(mtz)) {
mtz[k,as.numeric(camerasGS$inst[k]):as.numeric(camerasGS$des[k])] <- 0
}
rownames(mtz) <- paste(camerasGS$bloque,camerasGS$period,camerasGS$camera)
colnames(mtz) <- periodos[3:16]
for (k in 1:nrow(slc)) {
mtz[slc$cdg[k],slc$periodo.evento[k]]<- 1
}
### Model of occupancy of MacKenzie
## grafica
require(unmarked)
UMF <- unmarkedFrameOccu(mtz)
plot(UMF, panels=4)
fm00 <- occu(~ 1 ~ 1, UMF)
ranef(fm00)
# psi solo
logit.psi <- beta[1]
psi <- exp(logit.psi) / (1 + exp(logit.psi))
psi
##3## VARIABLES ##
##bosque
load("D:/PROJECTS/Gran Sabana/Metodologia/redisenomuestral/rasters_GS.rda")
vbsq <- raster("D:/PROJECTS/Gran Sabana/Metodologia/GS_studyarea_Izza/TREE/MOD44B.2010.GS.TREE.tif")
camerasGS$bosque <- extract(vbsq,camerasGS[,c("lon","lat")])
##fuego
camerasGS$fuego <- (camerasGS$fuego.celda)
##caza
camerasGS$caza <- (camerasGS$caza.celda2)
##conuco
camerasGS$conuco <- (camerasGS$conuco.dist.m)
## Covariables sitio
covar3 <- data.frame(bosque=camerasGS$bosque,
caza=camerasGS$caza.celda2,
fuego=camerasGS$fuego.celda,
conuco=camerasGS$ln.conuco.dis,
bufer=camerasGS$ln.buf.frag,
com=camerasGS$ln.comun)
siteCovs(UMF) <- covar3
##UMF@siteCovs$bosque <- scale(UMF@siteCovs$bosque, center = TRUE, scale = TRUE)
##modelos
fm00 <- occu(~ 1 ~ 1, UMF)
#fm0b <- occu(~ 1 ~ bosque, UMF)
#fm0f <- occu(~ 1 ~ fuego, UMF)
fm0c <- occu(~ 1 ~ caza, UMF)
fm0m <- occu(~ 1 ~ com, UMF)
fm0u <- occu(~ 1 ~ bufer, UMF)
fm0n <- occu(~ 1 ~ conuco, UMF)
fm.uc <- occu(~ 1 ~ bufer+caza, UMF)
#fm.uf <- occu(~ 1 ~ bufer+fuego, UMF)
fm.un <- occu(~ 1 ~ bufer+conuco, UMF)
fm.um <- occu(~ 1 ~ bufer+com, UMF)
fmm0 <- occu(~ com ~ 1, UMF)
fmmu <- occu(~ com ~ bufer, UMF)
#fmcf <- occu(~ caza ~ fuego, UMF)
fmList <- fitList(Null=fm00,
.caza=fm0c, .conuco=fm0n, .com=fm0m, .bufer=fm0u,
.bufercaza=fm.uc, .buferconuco=fm.un, com.bufer=fmmu)
# Model selection
modSel(fmList, nullmod="Null")
# Extract coefficients and standard errors
coef(fmList)
SE(fmList)
|
6af42f51c4774ebd1bf7dbbe57d390d78bc296d5
|
9da7ff9073e6775a34e9432ae33f6dac36891a42
|
/R/subway.R
|
a927ba1c815eb9d751c9679269cbffe27a005b80
|
[] |
no_license
|
eik4862/Subway
|
9f2cb24e611769226275aa209589042c257c42c0
|
64c672132a8619afe30338942bca560d5646a6c7
|
refs/heads/master
| 2023-08-15T05:16:10.235607
| 2020-05-25T17:35:02
| 2020-05-25T17:35:02
| 266,563,864
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
subway.R
|
#' Passenger count data of Seoul subway
#'
#' A dataset containing the number of passengers who get on and off
#' from Seoul subway.
#' Data is collected automatically by credit card DB system at each station
#' and recorded daily from 1/1/2015 to 30/4/2020.
#'
#' @format A data frame with 1111007 rows and 6 variables:
#' \describe{
#' \item{date}{recorded date}
#' \item{code}{station code}
#' \item{on}{number of passengers who get on the subway}
#' \item{off}{number of passengers who get off the subway}
#' \item{total}{total number of passengers who get on and off the subway, sum of \code{on} and \code{off}}
#' \item{diff}{difference in the number of passengers who get on and off the subway, \code{on} - \code{off}}
#' }
#' @source \url{https://data.seoul.go.kr/dataList/OA-12914/S/1/datasetView.do#}
"subway"
|
d30fa5325b64b6578aa0320aaac0645658eb5e8d
|
04db7b70be3aa96405647e51b0da7119d010ff8c
|
/packages/deloitteUtility/R/create_division.R
|
b9eef11385a43995871efe81fa5cffe4a8896fbd
|
[] |
no_license
|
jeff1evesque/interview-deloitte
|
529c88ee30b5b9bfebc12448cc4ff385aaaca9de
|
d3bc833e5033f78d758da5a8b7b8864903124d79
|
refs/heads/master
| 2020-03-19T04:21:20.150302
| 2018-06-07T22:43:02
| 2018-06-07T22:43:02
| 135,819,176
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
create_division.R
|
##
## strReverse.R, reverses a string.
##
## Note: https://stackoverflow.com/a/13613183
##
create_division <- function(vector) {
return(
lapply(
vector,
function (x) {
if (!as.numeric(trimws(x))) {
return(NA)
} else if (x < 14) {
return(1)
} else if (x >= 14 & x < 19) {
return(2)
} else {
return(x%/%10)
}
}
)
)
}
|
b82659b66631ff3d9c0039c66b052b5bb5554c30
|
8c1333fb9fbaac299285dfdad34236ffdac6f839
|
/financial-analytics/ch1/solution-04.R
|
9be7e4c6ee165935fd367e1721e06baa7adaa38f
|
[
"MIT"
] |
permissive
|
cassiopagnoncelli/datacamp-courses
|
86b4c2a6d19918fc7c6bbf12c51966ad6aa40b07
|
d05b74a1e42b119efbbf74da3dfcf71569c8ec85
|
refs/heads/master
| 2021-07-15T03:24:50.629181
| 2020-06-07T04:44:58
| 2020-06-07T04:44:58
| 138,947,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 467
|
r
|
solution-04.R
|
# Calculate income statement
income_statement <- assumptions
income_statement$revenue <- income_statement$unit_sales * price_per_unit
income_statement$expenses <- income_statement$unit_sales * (cogs_per_unit + labor_per_unit)
income_statement$earnings <- income_statement$revenue - income_statement$expenses - income_statement$depreciation
# Summarize cumulative earnings
sum(income_statement$earnings)
sum(income_statement$earnings) / sum(income_statement$revenue)
|
2dcacb968cd320bf4ef38f42c55beec68b9106b5
|
73df6be0fec5bac3ec5ca552438568a663e769e3
|
/man/is.nan.data.frame.Rd
|
4061834754d34941f85c0256e8c1dc6f4aaab6e3
|
[] |
no_license
|
kasaha1/kasaBasicFunctions
|
1fecefd905520ce5e0ad7de07d8f8c66fda9e6d3
|
d572d2953842cbc82d98ad2730a18dd90c7a1dd7
|
refs/heads/master
| 2022-07-13T09:37:40.737163
| 2022-06-27T05:18:33
| 2022-06-27T05:18:33
| 90,565,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 271
|
rd
|
is.nan.data.frame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FunctionsForGenes.R
\name{is.nan.data.frame}
\alias{is.nan.data.frame}
\title{checkig NaN}
\usage{
\method{is.nan}{data.frame}(x)
}
\arguments{
\item{x}{datafram}
}
\description{
checkig NaN
}
|
cb19def928eb1486f873bc8620198935ed9a04af
|
86ec024e1424b69cc1d7d4470c66e5d1e7c52fdc
|
/man/schart_dist.Rd
|
ac7da41ea67ceda38af31003e268115873a189b1
|
[] |
no_license
|
wanglaboratory/SChart
|
e8e8ed79bb66b3330e64b43c168667d9bad92ea8
|
7e0ebfe2f57616f810dd739fd0aed3ff641788e5
|
refs/heads/main
| 2023-07-02T19:11:21.296733
| 2021-08-10T02:42:47
| 2021-08-10T02:42:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,138
|
rd
|
schart_dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schart.R
\name{schart_dist}
\alias{schart_dist}
\title{Title Calculate the distance between sc and st}
\usage{
schart_dist(
st_sc_int,
int_assay = "traint",
reduction = "pca",
intp = T,
intp_pnt = 10000,
intp_lin = F,
nPCs = 30,
ntree = 1000,
keep_model = T
)
}
\arguments{
\item{st_sc_int}{Seurat traint object}
\item{int_assay}{Name of integration assay}
\item{reduction}{Dimension reduction method used, usually pca}
\item{intp}{If TRUE, do interpolation}
\item{intp_pnt}{Interpolation point number}
\item{intp_lin}{If TRUE, use linear interpolation}
\item{nPCs}{Number of PCs used for SChart}
\item{ntree}{Number of trees in random forest}
\item{keep_model}{If TRUE, return the trained random forest model}
}
\value{
A list of 1. schart_distance matrix; 2. trained random forest model (optional)
}
\description{
Title Calculate the distance between sc and st
}
\examples{
dist_test <- schart_dist(st_sc_int=st_sc_int, int_assay='traint', reduction='pca', intp = T, intp_pnt=10000, intp_lin=F, nPCs=30, ntree=1000, keep_model=T)
}
|
367691d8fc1f22cad884d61b79f1da4eefa4d895
|
dd0694436304f9c6d4ac43a3e668e46131dfa2a5
|
/plot2.R
|
ea80f7c232207472cf6a54f3f2f01c9dc401ed15
|
[] |
no_license
|
mycheong/ExData_Plotting1
|
6d350ed9ce68fec536a750d1b2c889a233a083bc
|
0cb86064214069d14b38598b80873c5c876eb3a3
|
refs/heads/master
| 2021-01-22T12:18:45.497855
| 2015-12-09T16:53:02
| 2015-12-09T16:53:02
| 47,702,370
| 0
| 0
| null | 2015-12-09T16:06:18
| 2015-12-09T16:06:15
| null |
UTF-8
|
R
| false
| false
| 608
|
r
|
plot2.R
|
header <- read.csv("household_power_consumption.txt", header=T, sep=";", nrow=1)
header <- names(header)
y <- read.table("household_power_consumption.txt", sep=";", skip=66000, nrows=4000, na.strings = "?")
colnames(y)<- header
y$Date <- as.Date(y$Date, format="%d/%m/%Y")
y <- subset(y, y$Date>"2007-01-31" & y$Date<"2007-02-03")
xlabel <- c("Thu", "Fri", "Sat")
## Plot 2
##
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(y$Global_active_power, type="l", ylab="Active Global Power (kilowatts)", xaxt="n", xlab="")
axis(side=1, at =c(1, 1441, 2880), labels=xlabel)
dev.off()
|
e83d92af104c01fd20780d5918b4d7a4cce0cc59
|
fa331c18551d255f3be4ccfd253ad2febcae73ca
|
/pre-processing/step_xx_transform_by_GCP_vector.R
|
2689258e294b7b5b8a5bd84b7e9a680c84106c36
|
[] |
no_license
|
nailayasmin/safe_pc_tchad
|
eec511e131e04a14909dd7b8ffedbe6309f58b15
|
b02ba009236180074b8c58050bf9f53a4e0acb52
|
refs/heads/master
| 2020-03-23T05:00:32.603389
| 2017-09-22T09:57:51
| 2017-09-22T09:57:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,445
|
r
|
step_xx_transform_by_GCP_vector.R
|
####################################################################################
####### Object: Shift imagery by vectorial lines
####### Author: remi.dannunzio@fao.org
####### Update: 2017/09/04
####################################################################################
##########################################################################################
#### Shift is VISUALLY ASSESSED (GCP in QGIS)
# #################### DETERMINE DATASET DIR FOR EACH SAME DATE ACQUISITIONS
# qb04_east_dir <- paste0(rawimgdir,"GSI_MDA/056880814010_01/056880814010_01_P001_PSH/")
# qb04_west_dir <- paste0(rawimgdir,"GSI_MDA/056880814010_01/056880814010_01_P002_PSH/")
#
# #################### MERGE SAME DATES ACQUISITIONS INTO MOSAICS
# system(sprintf("gdal_merge.py -o %s -co COMPRESS=LZW -co BIGTIFF=YES -v %s",
# paste0(procimgdir,"tmp_aoi1_2004_east.tif"),
# paste0(qb04_east_dir,"*.TIF")
# ))
#
# #################### COMPRESS - EAST
# system(sprintf("gdal_translate -co COMPRESS=LZW -ot Byte -co BIGTIFF=YES %s %s",
# paste0(procimgdir,"tmp_aoi1_2004_east.tif"),
# paste0(procimgdir,"aoi1_2004_east.tif")
# ))
#
# #################### REPROJECT IN UTM - EAST
# system(sprintf("gdalwarp -t_srs EPSG:32633 -co COMPRESS=LZW %s %s",
# paste0(procimgdir,"aoi1_2004_east.tif"),
# paste0(procimgdir,"aoi1_2004_east_utm.tif")
# ))
#
# #################### MERGE SAME DATES ACQUISITIONS INTO MOSAICS - WEST
# system(sprintf("gdal_merge.py -o %s -co COMPRESS=LZW -co BIGTIFF=YES -v %s",
# paste0(procimgdir,"tmp_aoi1_2004_west.tif"),
# paste0(qb04_west_dir,"*.TIF")
# ))
#
# #################### COMPRESS - WEST
# system(sprintf("gdal_translate -co COMPRESS=LZW -ot Byte -co BIGTIFF=YES %s %s",
# paste0(procimgdir,"tmp_aoi1_2004_west.tif"),
# paste0(procimgdir,"aoi1_2004_west.tif")
# ))
#
# #################### REPROJECT IN UTM - WEST
# system(sprintf("gdalwarp -t_srs EPSG:32633 -co COMPRESS=LZW %s %s",
# paste0(procimgdir,"aoi1_2004_west.tif"),
# paste0(procimgdir,"aoi1_2004_west_utm.tif")
# ))
#
# #################### CLEAN TMP FILES
# system(sprintf("rm -r %s",
# paste0(procimgdir,"tmp_aoi1_2004_*.tif")
# ))
#################### READ SHIFT FILE AS SHAPEFILE
shp_shift <- readOGR(paste0(shift_dir,"GCP_lines.shp"),"GCP_lines")
##### Run for one block first, to check all runs fine
block <- "west"
#################### Loop through blocks of image
for(block in c("west","east")){
### Select only shifts for that block
shift <- shp_shift[shp_shift@data$image == block,]
nb_vec <- length(shift)
### Initialize shifting data.frame and translate shapefile into "origin >> destination" coordinates set
v <- as.data.frame(matrix(nrow = 0,ncol=4))
for(i in 1:nb_vec){
line <- shift[i,]@lines[[1]]@Lines[[1]]@coords
start_x <- line[1,1]
start_y <- line[1,2]
stop_x <- line[2,1]
stop_y <- line[2,2]
v <- rbind(v,c(start_x,start_y,stop_x,stop_y))
}
names(v) <- c("start_x","start_y","stop_x","stop_y")
#################### Check that the GCP points are correct
shift_x <- v[,1]-v[,3]
shift_y <- v[,2]-v[,4]
plot(shift_x,shift_y)
#################### Find a display for 2 times the number of available vectors
p <- primeFactors(nb_vec*2)
lines <- prod(p[1:(length(p)-1)])
cols <- p[length(p)]
dev.off()
par(mfrow = c(lines,cols))
par(mar=c(0,0,0,0))
#################### Plot each GCP pair point in a 100m square box
for(i in 1:nb_vec){
lp <- list()
e<-extent((v[i,1]+v[i,3])/2-50,
(v[i,1]+v[i,3])/2+50,
(v[i,2]+v[i,4])/2-50,
(v[i,2]+v[i,4])/2+50)
poly <- Polygons(list(Polygon(cbind(
c(e@xmin,e@xmin,e@xmax,e@xmax,e@xmin),
c(e@ymin,e@ymax,e@ymax,e@ymin,e@ymin))
)),"box")
lp <- append(lp,list(poly))
## Transform the list into a SPDF PRIMER ERROR
box <-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:length(lp)),
data.frame("box"),
match.ID = F
)
rasname <- paste0(procimgdir,"aoi1_2004_",block,"_utm.tif")
nir <- crop(raster(rasname,4),box)
grn <- crop(raster(rasname,2),box)
red <- crop(raster(rasname,1),box)
stack <- stack(nir,red,grn)
plot(box)
plotRGB(stack,stretch="hist",add=T)
points(v[i,1],v[i,2],col="yellow")
points(v[i,3],v[i,4],col="green")
rasname <- paste0(procimgdir,"aoi1_2016_spot.TIF")
nir <- crop(raster(rasname,4),box)
grn <- crop(raster(rasname,2),box)
red <- crop(raster(rasname,1),box)
stack <- stack(nir,red,grn)
plot(box)
plotRGB(stack,stretch="hist",add=T)
points(v[i,1],v[i,2],col="yellow")
points(v[i,3],v[i,4],col="green")
}
#################### Generate gdaltransform equation from all initial points
equ_1 <- paste0("(",
paste0(
lapply(1:nb_vec,function(i){paste("echo",v[i,1],v[i,2])}),
collapse = ";"),
")"
)
#################### COMPUTE LOCAL COORDINATES OF ORIGIN POINTS
system(sprintf("%s | gdaltransform -i %s > %s",
equ_1,
paste0(procimgdir,"aoi1_2004_",block,"_utm.tif"),
paste0(procimgdir,"coord_",block,"_to_spot.txt")
))
local <- read.table(paste0(procimgdir,"coord_",block,"_to_spot.txt"))
#################### Generate gdal_translate equation from all local initial points to final points
equ_2 <- paste("-gcp",
lapply(1:nb_vec,function(i){paste(local[i,1],local[i,2],v[i,3],v[i,4],sep=" ")}),
collapse = " "
)
#################### TRANSLATE ORIGIN IMG
system(sprintf("gdal_translate %s %s %s",
equ_2,
paste0(procimgdir,"aoi1_2004_",block,"_utm.tif"),
paste0(procimgdir,"tmp_aoi1_2004_",block,"_utm_shift.tif")
))
#################### FINAL REWARP
system(sprintf("gdalwarp -r bilinear -t_srs EPSG:32633 %s %s",
paste0(procimgdir,"tmp_aoi1_2004_",block,"_utm_shift.tif"),
paste0(procimgdir,"aoi1_2004_",block,"_utm_shift.tif")))
}
#################### Clean
system(sprintf("rm -r %s",
paste0(procimgdir,"tmp_aoi1_2004_*.tif")
))
|
3d227fd26890f8ecded6b9618140301699140267
|
4f28f7befb82c29503ba119205b6cae124ebb98b
|
/Shiny_Functions_v1.R
|
84b48bc285334082f1a9949be68ef14acd1c9136
|
[] |
no_license
|
gmw12/shiny_PD
|
45119d65c3c8d26d8c803b3da583e69b6c6c26bc
|
fd0cf94e65786e769a305657eb3623f7a0188bec
|
refs/heads/master
| 2023-08-05T08:57:29.328670
| 2023-07-20T18:04:58
| 2023-07-20T18:04:58
| 241,447,255
| 2
| 2
| null | 2023-05-19T14:40:42
| 2020-02-18T19:21:03
|
R
|
UTF-8
|
R
| false
| false
| 821
|
r
|
Shiny_Functions_v1.R
|
cat(file=stderr(), "load Shiny functions", "\n")
source("Shiny_Startup_v1.R")
source("Shiny_UpdateWidgets_v1.R")
source("Shiny_Observers_v1.R")
source("Shiny_Setup_v1.R")
source("Shiny_Filter_v1.R")
source("Shiny_Transform_v1.R")
source("Shiny_Stats_v1.R")
source("Shiny_Norm_v1.R")
source("Shiny_Impute_v1.R")
source("Shiny_Rollup_v1.R")
source("Shiny_Plots_v1.R")
source("Shiny_Preprocess_v1.R")
source("Shiny_File_v1.R")
source("Shiny_QC_v1.R")
source("Shiny_Hide_v1.R")
source("Shiny_Render_v1.R")
source("Shiny_Overview_v1.R")
source("Shiny_MotifX_v1.R")
source("Shiny_Wiki_v1.R")
source("Shiny_ViSEAGO_v1.R")
source("Shiny_Pathway_v1.R")
source("Shiny_String_v1.R")
source("Shiny_MailMerge_v1.R")
source("Shiny_TMTSPQC_v1.R")
source("Shiny_Interactive_v1.R")
source("Shiny_MVA_v1.R")
source("Shiny_Tables_v1.R")
|
04cbb2cf745b0160305f8b01e6b5b443aa760c6d
|
fff0cb76ba6061982d0bd1f44a3739512162ee7a
|
/man/COUSCOus-internal.Rd
|
c03b672584583e7a607b4fd6c2c02d49b3246435
|
[] |
no_license
|
cran/COUSCOus
|
28378f906ddd936e2ec9ea4e7b85ead4737a03c5
|
78b56b63445cd54cf69a631401c41aeb53f694b0
|
refs/heads/master
| 2021-01-10T13:17:11.814006
| 2016-02-28T16:26:52
| 2016-02-28T16:26:52
| 52,727,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
rd
|
COUSCOus-internal.Rd
|
\name{COUSCOus-internal}
\title{Internal COUSCOus functions}
\alias{generate.rho}
\alias{precision}
\alias{prediction}
\alias{preprocessing}
\alias{shrink.S}
\description{Internal COUSCOus functions}
\author{
Reda Rawi \cr
Maintainers: Reda Rawi <redarawi@gmx.de>
}
\details{These are not intended for usage. Please refer to \code{COUSCOus()}
}
|
5a770eb3c534bc39ce8251fb249d9761b8372ece
|
35e702005356610d62e60fcf11efdaaca770398e
|
/checkWGS.R
|
9a09cd23a4f2df77c45be4027e90090b3b5a6cc7
|
[] |
no_license
|
jingg46/Research-oral-cleft
|
744d1a23ca59f4aec4acacd8eaf52961b49e8f83
|
db42552e66cad11ec3cebb220858800a866507fb
|
refs/heads/master
| 2020-03-11T03:38:46.903604
| 2018-07-21T18:29:59
| 2018-07-21T18:29:59
| 129,754,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,280
|
r
|
checkWGS.R
|
## check on HW-equilibrium in variants included in colTDT output from WGS
load("/dcl01/beaty/data/gmkf/euro/vcfs/filtered/chr8_geno_filt_12_06_2017.rda")
## which rows of the genotype matrix contain the parents?
parRows<-c(seq(1, nrow(chr8_geno_filt), by=3), seq(2, nrow(chr8_geno_filt), by=3))
## check mafs and fraction of missing calls
mafs<-colSums(chr8_geno_filt[parRows, ], na.rm=TRUE)/(2*colSums(!is.na(chr8_geno_filt[parRows,])))
fracMissing<-colSums(is.na(chr8_geno_filt[parRows,]))/length(parRows)
summary(fracMissing)
sum(fracMissing > 0.05)
## Calculate Hardy-Weinberg equlilbrium p-value
## allele frequencies for non-ref (p) and ref (q) alleles
p<-mafs
q<-1-mafs
n<-colSums(!is.na(chr8_geno_filt[parRows,]))
## observed hom non-ref, het, and hom-ref counts
oaa<-colSums(chr8_geno_filt[parRows,] == 2, na.rm=TRUE)
oAa<-colSums(chr8_geno_filt[parRows,] == 1, na.rm=TRUE)
oAA<-colSums(chr8_geno_filt[parRows,] == 0, na.rm=TRUE)
## expected hom non-ref, het, and hom-ref counts
eaa<-n*p^2
eAa<-2*n*p*q
eAA<-n*q^2
HWOut<-pchisq((oaa-eaa)^2/eaa + (oAa-eAa)^2/eAa + (oAA-eAA)^2/eAA, df=1)
## conservatively check p < 0.05 (usually it would not be this strict, but you would use a Bonferroni corrected p-value)
## this does not affect many positions
sum(HWOut < 0.05)
|
08f6a938b67e24dc09eb4e351cd1ceff48bea8b0
|
4dcd8b0721ac81767e7c57fa5530b4cc71592ac6
|
/ex/week2/checkOutlet_sigma.R
|
9dbbfb45a5d24275c4ae7f0e2616c8d59c017cd3
|
[] |
no_license
|
gdwangh/80240372XDataMining
|
1ceffe6ad6898d5435084c82a652899971af415e
|
a5ef7c8e0c4808ed3ba7614c10785f693af41b34
|
refs/heads/master
| 2020-04-18T03:49:08.470187
| 2016-12-18T16:30:20
| 2016-12-18T16:30:20
| 67,460,595
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 767
|
r
|
checkOutlet_sigma.R
|
# 3*sigma or 6*sigma法,判断极限值
# input
# data : 需要判断是否有极限值的数据
# k:正常范围的上下限为k个sigma
# type:
# ‘B' -- 返回true or false,是否是极限值
# ’M‘ --- 用最大最小值k*sigma or -k*sigma,替换极限值
# output:
# 对应data的每个值,返回
# True or False --- 是否是极限值
# or
# k*sigma or -k*sigma
checkOutlet_sigma<-function(data, k, out_type='logic') {
m = mean(data)
sd = sd(data)
min = m - k * sd
max = m + k * sd
if (out_type == 'logic') {
(data < min) | (data > max)
} else {
sapply(data, function(x) {
if (x < min) min
else
if (x > max) max
else x
})
}
}
|
ca865e30fb6b60f44eaffe95570b5f44c2b18835
|
b2f01680e78cf5b64c779425790e10b31ab82fc0
|
/A_plot/heatmap_3.R
|
8277b2566970b2dcdf38e4bf04c353040adffe01
|
[] |
no_license
|
stormlovetao/rnaSeq
|
fcdfca39434aaa26dc5ef17ed84ea99b8743f7ff
|
61b5051024f701a983acb52278fef4a9a03af1dc
|
refs/heads/master
| 2021-01-10T03:30:55.952913
| 2017-03-07T21:22:35
| 2017-03-07T21:22:35
| 47,236,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,323
|
r
|
heatmap_3.R
|
# Based on heatmap_2.R
# columns are sorted by SMGEBTCH(Genotype or Expression Batch ID, Batch when DNA/RNA from a sample was analyzed)
# columns are annotated by SMGEBTCH
# replace SMGEBTCH to SMNABTCH,SMNABTCHD
library(pheatmap)
# Read the table into dataframe
table = read.csv(file = '/Users/Tao/BrainMicrobiome/Tables/gtex_table_subjects_reads_1_withSampleID.xls', sep = '\t', header = FALSE, stringsAsFactors = FALSE)
# Transform the data frame into a matrix.
# The colnames are composed by "subjectID brainRegion"
# The rownames are viruses names
table_mat = data.matrix(table[4:nrow(table),2:ncol(table)])
rownames(table_mat) = table[4:nrow(table),1]
cnames = table[2,]
colnames(table_mat) = cnames[-1]
# Shrink the source matrix to make sure each row and column contain at least one number greater than threshold
table_mat_selected = table_mat[,apply(table_mat,MARGIN = 2, function(x) any(x>10))]
table_mat_selected = table_mat_selected[apply(table_mat_selected, MARGIN = 1, function(x) any(x>10)),]
table_mat_selected[table_mat_selected>50] = 60 # for purpose of drawing legend
# filter phages Geobacillus virus E3, Lactococcus Phage ASCC191
table_mat_selected = table_mat_selected[rownames(table_mat_selected) != "Geobacillus virus E3",]
table_mat_selected = table_mat_selected[rownames(table_mat_selected) != "Lactococcus Phage ASCC191",]
# Set annotation for rows
# annotation_table = read.table(file = '/Users/Tao/BrainMicrobiome/Tables/gtex_virusesAnno.txt',
# sep = '\t', header = TRUE, stringsAsFactors = FALSE)
# rownames(annotation_table) = annotation_table[,1]
# annotation_table = annotation_table[,-1]
# annotation_table[annotation_table==""] = 'NA'
# annotation_row = annotation_table
# # Sort rows of table_mat_selected
# table_mat_selected = table_mat_selected[rownames(annotation_row),] # sort by solid order
# Read GTEx samples' attributes
gtex_table = read.csv(file = '/Users/Tao/BrainMicrobiome/Tables/GTEx_Data_V6_Annotations_SampleAttributesDS.txt', sep = '\t', header = TRUE, stringsAsFactors = FALSE)
rownames(gtex_table) = gtex_table[,1]
gtex_table = gtex_table[colnames(table_mat_selected),] # Only keep those we use. rownames(gtex_table)==colnames(table_mat_selected)
# Set column annotation = batch id
gtex_table_smgebtch = gtex_table$SMGEBTCH
#gtex_table_smgebtch = gtex_table$SMCENTER
gtex_table_smgebtch[gtex_table_smgebtch==""] = "NoValue"
annotation_col = data.frame(SMGEBTCH = gtex_table_smgebtch)
# annotation_col = data.frame(SMCENTER = gtex_table_smgebtch)
rownames(annotation_col) = rownames(gtex_table) # or = colnames(table_mat_selected)
# sort table_mat_selected by the order of SMGEBTCH/SMNABTCH
table_mat_selected = table_mat_selected[,order(gtex_table_smgebtch)]
# Set annotation colors
# ann_colors = list(
# Taxonomy1 = c('Herpesviridae, Family' = 'forestgreen', 'Papillomaviridae, Family' = "lightpink",
# 'Gammaretrovirus, Genus' = "dodgerblue4", 'Coronavirinae, Subfamily'= "burlywood1",
# 'Bunyaviridae, Family' = "burlywood3", 'Bromoviridae, Family' = "lightpink3",
# 'Baculoviridae, Family' ="lightpink2", 'Tobamovirus, Genus' = "deeppink",
# 'Potexvirus, Genus' = "goldenrod4", 'Picornavirales, Order' = "dodgerblue1",
# 'Parvoviridae, Family' = "deepskyblue", 'NA' = "white"),
# Taxonomy2 = c('Roseolovirus, Genus' = "red", 'Simplexvirus, Genus' = 'blue',
# 'Varicellovirus, Genus' = 'yellow','NA' = "white")
# )
# Set legend breaks and colors
my_breaks = c(0,10,20,30,40,50,max(table_mat_selected))
my_palette = colorRampPalette(c('white', "cyan","green","orange","yellow","red"))(6)
table_mat_selected = table_mat_selected[,apply(table_mat_selected,MARGIN = 2, function(x) any(x>10))]
table_mat_selected = table_mat_selected[apply(table_mat_selected,MARGIN = 1, function(x) any(x>10)),]
# Draw Heatmap
pheatmap(table_mat_selected,color = my_palette, breaks = my_breaks,
fontsize_row = 5,fontsize_col = 5, border_color = 'white',
legend_breaks = c(10,20,30,40,50,60), legend_labels = c(10,20,30,40,50,'50+'),
cluster_rows = TRUE, cluster_cols = FALSE,
annotation_col = annotation_col, fontsize = 5,
annotation_colors = ann_colors, main = "GTEx(reads>10)")
|
827cad634236c5c75759cbcea01963b50022a149
|
4a7989f482a9fe85370e9014b836e7c985b4ff66
|
/R/gui.R
|
9548c593e35bda1ad63ee00a107bf253fb25549e
|
[] |
no_license
|
lucrainville/PhD
|
bb3e5afbbe57d58e558dee0c0519f5a20831f68d
|
408174bd81e9676bc8d479fc5e6836727644358c
|
refs/heads/master
| 2023-07-09T18:59:52.086535
| 2021-08-03T23:48:51
| 2021-08-03T23:48:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,034
|
r
|
gui.R
|
source('functions.R')
library(gWidgets)
library(gWidgetsRGtk2)
require(RGtk2)
options("guiToolkit"="RGtk2")
myplot3d = function(...){
if (length(rgl.dev.list()))
rgl.set(rgl.dev.list())
par3d(windowRect=c(10,10,800,600))
plot3d(...,xlab='X',ylab='Y',zlab='Z')
rgl.pop("lights")
light3d(specular="white")
}
read_ply = function(h, var, ...)
{
tryCatch(
{
f = svalue(h$obj)
if (substring(f,1,1) %in% c('"',"'")) {
# remove quotes if present
f = substring(f,2,nchar(f)-1)
}
assign(var, NULL, envir = globalenv())
assign(var, read.ply(f, ShowSpecimen = F), envir = globalenv())
},
error = function(e) {
gmessage('Could not open file! Is it an ASCII ply file? (BINARY ply files can not be opened)')
}
)
}
panel1 = function()
{
win <- gwindow("3D Pneumatophores - Data Import")
allwin = ggroup(cont=win, horizontal=T)
BigDataGroup <- ggroup(cont=allwin, horizontal=F)
DataGroup <- gframe("Data", container = BigDataGroup, horizontal=FALSE)
grp.file <- ggroup(horizontal=FALSE, container = DataGroup)
lbl.file <- glabel("3D model: ", container = grp.file)
browse.file <- gfilebrowse(text = "Select ASCII ply file",
container = grp.file,
filter = list("ply files" = list(patterns = c("*.ply"))),
handler = function(h, ...){
enabled(browse.file) = F;
enabled(grp.file) = F;
read_ply(h, var='tst', ...);
enabled(txt_data_frame_name) = T;
enabled(chk_ground) = T;
enabled(btn_next) = T;
})
grp.file2 <- ggroup(horizontal=FALSE, container = DataGroup)
lbl.file2 <- glabel("Horizontal optimization: ", container = grp.file2)
chk_ground <- gcheckbox(
text = "Skip alignment with ground",
handler = function(h,...){
},
container = grp.file2
)
enabled(chk_ground) = F
SizeGroup <- gframe("Size", container = BigDataGroup, horizontal=FALSE)
grp_name <- ggroup(horizontal=FALSE, container = SizeGroup)
lbl_data_frame_name <- glabel(
"Enter the size of the area (in meters) : ",
container = grp_name
)
txt_data_frame_name <- gedit("1.0", container = grp_name)
enabled(txt_data_frame_name) = F
RightGroup <- ggroup(cont=allwin, horizontal=F)
addSpring(RightGroup)
btn_next <- gbutton(
text = "Go to next step",
container = RightGroup,
handler = function(h, ...)
{
enabled(btn_next) = F
assign('area_width', as.numeric(svalue(txt_data_frame_name)), envir = globalenv())
if (svalue(chk_ground) == 1) {
aligned = re_orient_normals(tst, plot=F)
aligned = optimize_horizontal(aligned)
} else {
aligned = tst
}
assign('scaled', do.scale(aligned, area_width), envir = globalenv())
panel3()
dispose(win)
}
)
enabled(btn_next) = F
}
panel3 = function() {
win <- gwindow("3D Pneumatophores - Ground Removal")
grp_name <- ggroup(container = win,horizontal=F)
lbl_data_frame_name2 <- glabel(
"Offset to align the ground with 0 (in meters): ",
container = grp_name
)
txt_data_frame_name2 <- gedit("0", container = grp_name)
lbl_data_frame_name <- glabel(
"Height of the first slice (relative to the ground, in meters): ",
container = grp_name
)
txt_data_frame_name <- gedit("0.1", container = grp_name)
btn_flip <- gbutton(
text = "Flip it yeah!",
container = grp_name,
handler = function(h, ...)
{
scaled$vb[3,] = -scaled$vb[3,]
assign('scaled',scaled, envir = globalenv()) }
)
btn_v <- gbutton(
text = "Update",
container = win,
handler = function(h, ...)
{
tryCatch(
{
scaled$vb[3,] = scaled$vb[3,] - as.numeric(svalue(txt_data_frame_name2))
assign('scaled',scaled, envir = globalenv())
svalue(txt_data_frame_name2) = '0'
assign('baseline',as.numeric(svalue(txt_data_frame_name)), envir = globalenv())
cat(paste('baseline is',baseline,'\n'))
thr = 0.3
hist(scaled$vb[3,scaled$vb[3,]<thr],breaks=seq(min(scaled$vb[3,]),thr+0.01,by=0.01), include.lowest = T, xlab='height', main='Distribution of point heights')
abline(v=baseline,col='red')
myplot3d(t(scaled$vb))
planes3d(a=0,b=0,c=-1,d=baseline,col='red')
planes3d(a=0,b=0,c=1,d=0,col='green')
enabled(btn_next) = T
},
error = function(e) {
enabled(btn_next) = F
gmessage('Could not read the relative height of the first slice or the ground level. Are they numerical value?')
})
}
)
grp_next <- ggroup(container = win)
btn_next <- gbutton(
text = "Go to next step",
container = grp_next,
handler = function(h, ...)
{
panel4()
dispose(win)
}
)
enabled(btn_next) = F
}
panel4 = function() {
win <- gwindow("3D Pneumatophores - Cylindrical Approximation")
allwin = ggroup(cont=win, horizontal=T)
BigDataGroup <- ggroup(cont=allwin, horizontal=F)
DataGroup <- gframe("Clustering Parameters", container = BigDataGroup, horizontal=FALSE)
grp_name <- ggroup(horizontal=FALSE, container = DataGroup)
lbl_data_frame_name <- glabel(
"Enter the height of a slice (in meters)\nValues like 0.005, 0.01 or 0.02 make sense:",
container = grp_name
)
txt_data_frame_name <- gedit("0.01", container = grp_name)
lbl_data_frame_name2 <- glabel(
"Enter the max number of points within a slice (lower = faster, but less accurate)\nValues like 1000, 10000 or Inf make sense:",
container = grp_name
)
txt_data_frame_name2 <- gedit("Inf", container = grp_name)
lbl_data_frame_name3 <- glabel(
"Enter the number of iterations for each step of approximation (lower = faster, but less accurate)\nValues like 100, 200 or 300 make sense:",
container = grp_name
)
txt_data_frame_name3 <- gedit("300", container = grp_name)
grp_bandwidth <- ggroup(cont=grp_name, horizontal=T)
lbl_data_frame_name4 <- glabel(
"Enter the bandwidth of the mean-shift kernel (lower = more small clusters)\nValues like 0.0025, 0.005 or 0.0075 make sense:",
container = grp_bandwidth
)
txt_data_frame_name4 <- gedit("0.005", container = grp_bandwidth)
btn_bandwidth <- gbutton(
text = "Guess optimal",
container = grp_bandwidth,
handler = function(h, ...)
{
tryCatch(
{
step = as.numeric(svalue(txt_data_frame_name))
cat(paste('step is',step,'\n'))
},
error = function(e) {
step = 0
gmessage('Could not read the slice width. Is it a numerical value?')
})
tryCatch(
{
resample.n = as.numeric(svalue(txt_data_frame_name2))
cat(paste('resample.n is',resample.n,'\n'))
},
error = function(e) {
resample.n = 0
gmessage('Could not read the max number of points. Is it a numerical value (or, "Inf")?')
})
tryCatch(
{
iter = as.numeric(svalue(txt_data_frame_name3))
cat(paste('iter is',iter,'\n'))
},
error = function(e) {
iter = 0
gmessage('Could not read the iter number. Is it a numerical value?')
})
if ((step != 0) & (iter != 0) & (resample.n != 0)) {
# tryCatch(
# {
assign('resample.n', resample.n, envir = globalenv())
assign('iter', iter, envir = globalenv())
assign('step', step, envir = globalenv())
assign('breaks', seq(baseline,max(scaled$vb[3,])+step,by=step), envir = globalenv())
tmp_list = do.slice(scaled, breaks)
assign('breaks_label', tmp_list[[1]], envir = globalenv())
assign('slices', tmp_list[[2]], envir = globalenv())
svalue(txt_data_frame_name4) = guess_optimal_bandwidth(slices,resample.n = resample.n, iter = iter)
# }, error = function(e) {
# enabled(txt_data_frame_name4) = T
# gmessage('Could not perform automatic bandwidth detection. Falling back into debug mode..')
# browser()
# }
# )
}
}
)
RightGroup <- ggroup(cont=allwin, horizontal=F)
addSpring(RightGroup)
lbl_data_frame_name_lab <- glabel(
"Expect processing time up to 30 minutes",
container = RightGroup
)
lbl_data_frame_name_lab2 <- glabel(
"using the default settings...",
container = RightGroup
)
btn_scale <- gbutton(
text = "Approximate",
container = RightGroup,
handler = function(h, ...)
{
tryCatch(
{
step = as.numeric(svalue(txt_data_frame_name))
cat(paste('step is',step,'\n'))
},
error = function(e) {
step = 0
gmessage('Could not read the slice width. Is it a numerical value?')
})
tryCatch(
{
resample.n = as.numeric(svalue(txt_data_frame_name2))
cat(paste('resample.n is',resample.n,'\n'))
},
error = function(e) {
resample.n = 0
gmessage('Could not read the max number of points. Is it a numerical value (or, "Inf")?')
})
tryCatch(
{
iter = as.numeric(svalue(txt_data_frame_name3))
cat(paste('iter is',iter,'\n'))
},
error = function(e) {
iter = 0
gmessage('Could not read the iter number. Is it a numerical value?')
})
tryCatch(
{
b = as.numeric(svalue(txt_data_frame_name4))
cat(paste('bandwidth is',b,'\n'))
},
error = function(e) {
b = 0
gmessage('Could not read the bandwidth parameter. Is it a numerical value?')
})
if ((step != 0) & (iter != 0) & (resample.n != 0) & (b != 0)) {
tryCatch(
{
assign('resample.n', resample.n, envir = globalenv())
assign('iter', iter, envir = globalenv())
assign('step', step, envir = globalenv())
assign('b', b, envir = globalenv())
assign('breaks', seq(baseline,max(scaled$vb[3,])+step,by=step), envir = globalenv())
tmp_list = do.slice(scaled, breaks)
assign('breaks_label', tmp_list[[1]], envir = globalenv())
assign('slices', tmp_list[[2]], envir = globalenv())
assign('mss',
approximate(slices,breaks=breaks,iter=iter,resample.n=resample.n,h=b,cores=8),
envir = globalenv())
tmp_list = merge.clusters(mss, threshold=0.1); new.mss=tmp_list[[1]];
assign('unique.n',tmp_list[[2]], envir = globalenv())
s = 8
new.mss2 = assign.sector.width(new.mss, slices, threshold=0.06, s=s)
assign('mss',new.mss2, envir = globalenv())
enabled(btn_upload2) = T
myplot3d(t(scaled$vb));
plotcyl(mss,breaks=breaks,maxn=unique.n)
# a few statistics
pneu_nb = sapply(1:length(mss), function(i.slice){
nrow(mss[[i.slice]]$cluster.center)
})
mean_rad = sapply(1:length(mss), function(i.slice){
mean(mss[[i.slice]]$cluster.ws)
})
sem_rad = sapply(1:length(mss), function(i.slice){
sqrt(var(c(mss[[i.slice]]$cluster.ws))/pneu_nb[i.slice]/s)
})
mean_diam = sapply(1:length(mss), function(i.slice){
mean(c(mss[[i.slice]]$cluster.ws[,1:(s/2)]+mss[[i.slice]]$cluster.ws[,(1+s/2):s]))
})
sem_diam = sapply(1:length(mss), function(i.slice){
sqrt(var(c(
mss[[i.slice]]$cluster.ws[,1:(s/2)]+mss[[i.slice]]$cluster.ws[,(1+s/2):s]
))/pneu_nb[i.slice]/(s/2))
})
min_diam = sapply(1:length(mss), function(i.slice){
if(nrow(mss[[i.slice]]$cluster.ws) == 1) {
min(mss[[i.slice]]$cluster.ws[1:(s/2)]+mss[[i.slice]]$cluster.ws[(1+s/2):s])
} else {
mean(apply(mss[[i.slice]]$cluster.ws[,1:(s/2)]+mss[[i.slice]]$cluster.ws[,(1+s/2):s],1,min))
}
})
max_diam = sapply(1:length(mss), function(i.slice){
if(nrow(mss[[i.slice]]$cluster.ws) == 1) {
max(mss[[i.slice]]$cluster.ws[1:(s/2)]+mss[[i.slice]]$cluster.ws[(1+s/2):s])
} else {
mean(apply(mss[[i.slice]]$cluster.ws[,1:(s/2)]+mss[[i.slice]]$cluster.ws[,(1+s/2):s],1,max))
}
})
median_diam = sapply(1:length(mss), function(i.slice){
if(nrow(mss[[i.slice]]$cluster.ws) == 1) {
median(mss[[i.slice]]$cluster.ws[1:(s/2)]+mss[[i.slice]]$cluster.ws[(1+s/2):s])
} else {
mean(apply(mss[[i.slice]]$cluster.ws[,1:(s/2)]+mss[[i.slice]]$cluster.ws[,(1+s/2):s],1,median))
}
})
ex_p = c(T,F)
bp = barplot(mean_rad*100, horiz=T,main='Mean radius as a function of height',xlab='Radius (cm)')
require('plotrix')
plotCI(mean_rad*100,bp,uiw=sem_rad*100, err='x',sfrac=0.005,add=T,pch=NA,xpd=T)
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
bp = barplot(mean_diam*100, horiz=T,main='Mean diameter as a function of height',xlab='Diameter (cm)')
plotCI(mean_diam*100,bp,uiw=sem_diam*100, err='x',sfrac=0.005,add=T,pch=NA,xpd=T)
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
bp = barplot(pneu_nb, horiz=T,main='Number of pneumatophores per height',xlab='Count')
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
frontal = pneu_nb * mean_diam
sem_frontal = sem_diam * pneu_nb
tot_frontal = sum(frontal)
bp = barplot(frontal, horiz=T,
main=paste0('Mean +/- SEM frontal area densities\n(sum from ',breaks[1],' cm up to ',breaks[length(breaks)],' cm: ',round(tot_frontal,digits=3),' per meter)'),
xlab='Frontal area density (per meter and per slice)')
plotCI(frontal,bp,uiw=sem_frontal, err='x',sfrac=0.005,add=T,pch=NA,xpd=T)
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
minfrontal = pneu_nb * min_diam
tot_minfrontal = sum(minfrontal)
bp = barplot(minfrontal, horiz=T,
main=paste0('Min frontal area densities\n(sum from ',breaks[1],' cm up to ',breaks[length(breaks)],' cm: ',round(tot_minfrontal,digits=3),' per meter)'),
xlab='Frontal area density (per meter and per slice)')
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
maxfrontal = pneu_nb * max_diam
tot_maxfrontal = sum(maxfrontal)
bp = barplot(maxfrontal, horiz=T,
main=paste0('Max frontal area densities\n(sum from ',breaks[1],' cm up to ',breaks[length(breaks)],' cm: ',round(tot_maxfrontal,digits=3),' per meter)'),
xlab='Frontal area density (per meter and per slice)')
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
medianfrontal = pneu_nb * median_diam
tot_medianfrontal = sum(medianfrontal)
bp = barplot(medianfrontal, horiz=T,
main=paste0('Median frontal area densities\n(sum from ',breaks[1],' cm up to ',breaks[length(breaks)],' cm: ',round(tot_medianfrontal,digits=3),' per meter)'),
xlab='Frontal area density (per meter and per slice)')
text(x=par('usr')[1],bp[ex_p],breaks_label[ex_p],adj=1, cex=0.75,xpd=T)
text(x=par('usr')[1],bp[length(bp)]+2*diff(bp[1:2]),'Height (m)',adj=1, cex=1,xpd=T)
## assign the newly computed statistics into the main environment
assign('pneu_nb', pneu_nb, envir = globalenv())
assign('mean_rad', mean_rad, envir = globalenv())
assign('sem_rad', sem_rad, envir = globalenv())
assign('mean_diam', mean_diam, envir = globalenv())
assign('sem_diam', sem_diam, envir = globalenv())
assign('min_diam', min_diam, envir = globalenv())
assign('max_diam', max_diam, envir = globalenv())
assign('median_diam', median_diam, envir = globalenv())
assign('frontal', frontal, envir = globalenv())
assign('sem_frontal', sem_frontal, envir = globalenv())
assign('minfrontal', minfrontal, envir = globalenv())
assign('maxfrontal', maxfrontal, envir = globalenv())
cat('\n\nSTART OUTPUT TO CSV FILE\n\nhm,hM,nb,aveDIA,semDIA,minDIA,maxDIA,aveFDA,semFDA,minFDA,maxFDA\n')
for (i.slice in 1:length(mss)){
m = mss[[i.slice]]
cat(paste0(m$cluster.hm,
',',
m$cluster.hM,
',',
nrow(m$cluster.ws),
',',
mean_diam[i.slice],
',',
sem_diam[i.slice],
',',
min_diam[i.slice],
',',
max_diam[i.slice],
',',
frontal[i.slice],
',',
sem_frontal[i.slice],
',',
minfrontal[i.slice],
',',
maxfrontal[i.slice],
'\n'
))
}
cat('\n\nEND OUTPUT TO CSV FILE\n\n')
},
error = function(e) {
gmessage('Could not perform the approximation :(')
})
}
}
)
btn_upload2 <- gbutton(
text = "Save Workspace",
container = RightGroup,
handler = function(h, ...)
{
gfile(
text = "Where to save the workspace?",
type = "open",
# action = function(){},
handler = function(h, ...)
{
tryCatch(
{
save.image(file=h$file)
},
error = function(e) {
gmessage('Could not save the workspace!')
}
)
},
filter = list(
"RData files" = list(patterns = c("*.RData"))
)
)
}
)
enabled(btn_upload2) = F
}
tst = NULL
ground = NULL
scaled = NULL
area_width = 1.0
baseline = 0
mss=NULL
breaks=NULL
breaks_label=NULL
iter=NULL
resample.n=NULL
step=NULL
slices = NULL
b=NULL
slices=NULL
pneu_nb = NULL
mean_rad = NULL
sem_rad = NULL
mean_diam = NULL
sem_diam = NULL
min_diam = NULL
max_diam = NULL
median_diam = NULL
frontal = NULL
sem_frontal = NULL
minfrontal = NULL
maxfrontal = NULL
panel1()
|
6f8696d0ceec11316f4c0569c12318be0e0fd3ed
|
836b133c87bce43ae20673cc1842ab20e9decc87
|
/paper_code/Figure_codes/SFigures/FigureS6_validation/FigureS6A_6B_mean_fitness_across_each stability_bin.R
|
6aaa20bde150795f79a4423139d8626473866f32
|
[
"MIT"
] |
permissive
|
sashaflevy/PPiSeq
|
b98836b99e1e78ccb54e02be46792c700cb7fa32
|
646dbe151e7b6044e762fff1cf36b185dffe3bdc
|
refs/heads/master
| 2021-07-01T14:47:49.241826
| 2020-10-05T15:29:52
| 2020-10-05T15:29:52
| 177,872,906
| 0
| 4
| null | 2020-10-05T15:29:53
| 2019-03-26T21:41:35
|
HTML
|
UTF-8
|
R
| false
| false
| 4,611
|
r
|
FigureS6A_6B_mean_fitness_across_each stability_bin.R
|
### Check the normalized fitness values for these PPIs
setwd("~/Desktop/PPiSeq_additional_data/")
source("function.R") # Load commonly used functions
#Commonly used colors
apple_colors = c("#5AC8FA", "#FFCC00", "#FF9500", "#FF2D55", "#007AFF", "#4CD964", "#FF3B30",
"#8E8E93", "#EFEFF4", "#CECED2", "#000000", "007AFF")
################################
# Figure S5A mean fitness for each stability bin
vScore = dataFrameReader_T("Datasets_generated_by_preprocessing/Variation_score_PPI_environment_neg_zero_SD_merge_filter.csv")
count_summary = csvReader_T("Datasets_generated_by_preprocessing/PPI_environment_count_summary_SD_merge_filter.csv")
vScore_fit = vScore[,4:ncol(vScore)]
### Take the mean fitness for all the positive PPIs
mean_fitness_pos = rep(0, nrow(vScore))
for(i in 1:length(mean_fitness_pos)){
pos_env_index = which(count_summary[i,3:ncol(count_summary)] == "1")
pos_env_index_fit = pos_env_index + 3
mean_fitness_pos[i] = mean(as.numeric(vScore[i, pos_env_index_fit]))
}
min(mean_fitness_pos) # 0.1121392
fitness_count = data.frame(mean_fitness_pos, count_summary[,2])
colnames(fitness_count) = c("Mean_fitness", "Env_count")
library(ggplot2)
ggplot() +
geom_boxplot(aes(x = Env_count, y = Mean_fitness), fitness_count, outlier.shape=NA) +
geom_dotplot(aes(x = Env_count, y = Mean_fitness), fitness_count,
binaxis="y",stackdir="center",binwidth=0.002, alpha=0.2, col = apple_colors[8]) +
xlab("Number of environments in which a PPI is identified") +
ylab("Mean fitness of a PPI across different environments") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
theme(axis.text.x = element_text(size = 10, color = "black"),
axis.text.y.left = element_text(size = 10, color = "black"))
#theme(plot.margin = unit(c(1,1,2,1), "cm"))
ggsave("Figures/SFigures/SFigure6/FigureS6A_Mean_fitness_PPI_each_stability_bin.pdf", width=5, height =5)
##### Separate reported and unreported PPIs
################# Figure S5B can be directly Figure 3B
### Make barplot to show the percentage
### put the reported and unreported on to the same figure
rep_PPI_matrix = dataFrameReader_T("Growth_curve_validation_data/Reported_validation_matrix_SD_merge.csv")
unrep_PPI_matrix = dataFrameReader_T("Growth_curve_validation_data/Unreported_validation_matrix_SD_merge.csv")
ratio_rep = rep_PPI_matrix[4,-1]
ratio_unrep = unrep_PPI_matrix[4,-1]
ratio_all = as.numeric(c(ratio_rep[1], ratio_unrep[1], ratio_rep[2], ratio_unrep[2],
ratio_rep[3], ratio_unrep[3], ratio_rep[4], ratio_unrep[4],
ratio_rep[5], ratio_unrep[5], ratio_rep[6], ratio_unrep[6],
ratio_rep[7], ratio_unrep[7], ratio_rep[8], ratio_unrep[8],
ratio_rep[9], ratio_unrep[9]))
rep_PPI_matrix[1,] # 0 0 5 7 14 15 22 41 16
rep_PPI_matrix[3,] # 5 1 6 9 19 18 27 44 16
unrep_PPI_matrix[1,]# 55 22 21 13 28 37 30 37 26
unrep_PPI_matrix[3,]#99 32 31 20 33 45 32 38 27
counts_label = c("0/5", "55/99", "0/1", "22/32", "5/6", "21/31",
"7/9", "13/20", "14/19", "28/33", "15/18", "37/45",
"22/27", "30/32", "41/44", "37/38", "16/16", "26/27")
library(RColorBrewer)
#col_chosen = brewer.pal(3,"Dark2")[1:2]
col_chosen = apple_colors[c(1,4)]
pdf("Figures/SFigures/SFigure6/FigureS6B_Validation_bar_plot_merge_reported_unreported.pdf",
width= 6, height=5)
barCenter = barplot(ratio_all*100, horiz=F, beside=F, ylim=c(0,100), ylab="Validation rate (%)",
space= c(0.4, 0.15, 0.4, 0.15, 0.4, 0.15, 0.4, 0.15, 0.4, 0.15,
0.4, 0.15, 0.4, 0.15, 0.4, 0.15, 0.4, 0.15),
col= col_chosen , axisnames=F, border=NA, cex.axis=0.8)
legend(-0.5,120, legend=c("Previously reported", "Previously unreported"),fill=col_chosen, cex=0.8, bty="n",
border=FALSE, xpd = TRUE)
text(x= barCenter, y = ratio_all*100 + 2, labels = counts_label, cex=0.5, xpd = TRUE)
env_num_loc = rep(0, 9)
for(i in 1:9){
env_num_loc[i] = mean(barCenter[(2*i-1):(2*i)])
}
text(x = env_num_loc, y = -8, labels = as.character(1:9), xpd = TRUE)
text(median(barCenter), y = -16, labels = "Number of environments in which a PPI is identified", xpd = TRUE)
dev.off()
|
4a9244e0aa4dff5f5162f51f118b8fc3364e9eaf
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Concepts_Of_Modern_Physics_by_Arthur_Beiser/CH6/EX6.4/Ex6_4.R
|
21f80782e7630bd23fe3a42e4cddb0a23c2b6e67
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 238
|
r
|
Ex6_4.R
|
#(Pg no. 226)
B = 0.300
wl = 450
wl = wl * (10 ^ -9)
e = 1.6e-19
m = 9.1e-31
c = 3e+8
del_wl = (e * B * (wl ^ 2)) / (4 * pi * m * c)
del_wl = del_wl * (10 ^ 9)
del_wl = round(del_wl, 5)
cat("Delta_Lamda =", del_wl, "nm\n")
|
babafdef7cb397654fdef2c7f8ce9843d5f5f214
|
92508c4c9a770f213dd9fabb925ef780e2588bf9
|
/course2_Rprogramming/Assignment3/best.R
|
837dff2297e3c6c5e260a1d6af15f28b966dd2a1
|
[] |
no_license
|
SaraRafi/John-Hopkins-Data-Science-Specialization
|
f212d1d5b5da6ba692b1983d68e5e22fe9d2d8ce
|
59817619b18bcfae6becea86e60cb3ce148237cb
|
refs/heads/main
| 2023-02-08T20:15:55.668232
| 2021-01-06T00:52:07
| 2021-01-06T00:52:07
| 325,727,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,023
|
r
|
best.R
|
best <- function(state, outcome)
{
if (state == "NN")
stop("invliad state")
## Read outcome data
data<- read.csv("outcome-of-care-measures.csv")
## Check that state and outcome are valid
statedata<-subset(data,State == state)
## Return hospital name in that state with lowest 30-day death
## rate
ColName <- ""
if (outcome == "pneumonia")
ColName <- "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
else if (outcome == "heart attack")
ColName <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
else if (outcome == "heart failure")
ColName <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
else
stop("invalid outcome")
MortalityRates <- statedata[,ColName]
MortalityRates <- as.numeric(as.character(MortalityRates))
MinMortalityRate <- min(MortalityRates, na.rm=TRUE)
HospitalIndex <- which(MortalityRates==MinMortalityRate)
HospitalName <- droplevels(statedata$Hospital.Name[HospitalIndex])
as.character(HospitalName)
}
|
b688a2da6375d832ebfc448ea9afe4bed1e60777
|
ca944551b97bd0b5470463c6b397b1977f158067
|
/man/plrm.cv.Rd
|
6283ac8f37c618cf298780331e638bb1469ad5b4
|
[] |
no_license
|
cran/PLRModels
|
e4a6aa634752b6d0f35eea736c11fdc00bf2190c
|
0a13da2e2920bcf1385c192fc9005a762304ee0f
|
refs/heads/master
| 2023-09-01T05:28:20.762717
| 2023-08-19T10:42:44
| 2023-08-19T11:30:35
| 17,681,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,261
|
rd
|
plrm.cv.Rd
|
\name{plrm.cv}
\alias{plrm.cv}
\title{
Cross-validation bandwidth selection in PLR models
}
\description{
From a sample \eqn{{(Y_i, X_{i1}, ..., X_{ip}, t_i): i=1,...,n}}, this routine computes, for each \eqn{l_n} considered, an optimal pair of bandwidths for estimating the regression function of the model
\deqn{Y_i= X_{i1}*\beta_1 +...+ X_{ip}*\beta_p + m(t_i) + \epsilon_i,}
where
\deqn{\beta = (\beta_1,...,\beta_p)}
is an unknown vector parameter and
\deqn{m(.)}
is a smooth but unknown function.
The random errors, \eqn{\epsilon_i}, are allowed to be time series. The optimal pair of bandwidths, (\code{b.opt, h.opt}), is selected by means of the leave-(\eqn{2l_n + 1})-out cross-validation procedure. The bandwidth \code{b.opt} is used in the estimate of \eqn{\beta}, while the pair of bandwidths \code{(b.opt, h.opt)} is considered in the estimate of \eqn{m}. Kernel smoothing, combined with ordinary least squares estimation, is used.
}
\usage{
plrm.cv(data = data, b.equal.h = TRUE, b.seq=NULL, h.seq=NULL,
num.b = NULL, num.h = NULL, w = NULL, num.ln = 1, ln.0 = 0,
step.ln = 2, estimator = "NW", kernel = "quadratic")
}
\arguments{
\item{data}{
\code{data[,1]} contains the values of the response variable, \eqn{Y};
\code{data[, 2:(p+1)]} contains the values of the "linear" explanatory variables,
\eqn{X_1, ..., X_p};
\code{data[, p+2]} contains the values of the "nonparametric" explanatory variable, \eqn{t}.
}
\item{b.equal.h}{if TRUE (the default), the same bandwidth is used for estimating both \eqn{\beta} and \eqn{m}.}
\item{b.seq}{sequence of considered bandwidths, \code{b}, in the CV function for estimating \eqn{\beta}. If \code{NULL} (the default), \code{num.b} equidistant values between zero and a quarter of the range of \eqn{{t_i}} are considered.}
\item{h.seq}{sequence of considered bandwidths, \code{h}, in the pair of bandwidths \code{(b, h)} used in the CV function for estimating \eqn{m}. If \code{NULL} (the default), \code{num.h} equidistant values between zero and a quarter of the range of \eqn{t_i} are considered.}
\item{num.b}{number of values used to build the sequence of considered bandwidths for estimating \eqn{\beta}. If \code{b.seq} is not \code{NULL}, \code{num.b=length(b.seq)}. Otherwise, if both \code{num.b} and \code{num.h} are \code{NULL} (the default), \code{num.b=50} is considered; if \code{num.b} is \code{NULL} (the default) but \code{num.h} is not \code{NULL}, then \code{num.b=num.h} is considered; if \code{b.equal.h=TRUE} (the default) and both \code{num.b} and \code{num.h} are not \code{NULL} and different, the maximum value of \code{num.b} and \code{num.h} is considered for both.}
\item{num.h}{pairs of bandwidths (\code{b, h}) are used for estimating \eqn{m}, \code{num.h} being the number of values considered for \code{h}. If \code{h.seq} is not \code{NULL}, \code{num.h=length(h.seq)}. Otherwise, if both \code{num.b} and \code{num.h} are \code{NULL} (the default), \code{num.h=50} is considered; if \code{num.h} is \code{NULL} (the default) but \code{num.b} is not \code{NULL}, \code{num.h=num.b} is considered; if \code{b.equal.h=TRUE} (the default) and both \code{num.b} and \code{num.h} are not \code{NULL} and different, the maximum value of \code{num.b} and \code{num.h} is considered for both.}
\item{w}{support interval of the weigth function in the CV function. If \code{NULL} (the default), \eqn{(q_{0.1}, q_{0.9})} is considered, where \eqn{q_p} denotes the quantile of order \eqn{p} of \eqn{{t_i}}.}
\item{num.ln}{number of values for \eqn{l_n}: after estimating \eqn{\beta}, \eqn{2l_{n} + 1} observations around each point \eqn{t_i} are eliminated to estimate \eqn{m(t_i)} in the CV function. The default is 1.}
\item{ln.0}{minimum value for \eqn{l_n}. The default is 0.}
\item{step.ln}{distance between two consecutives values of \eqn{l_n}. The default is 2.}
\item{estimator}{allows us the choice between \dQuote{NW} (Nadaraya-Watson) or \dQuote{LLP} (Local Linear Polynomial). The default is \dQuote{NW}.}
\item{kernel}{allows us the choice between \dQuote{gaussian}, \dQuote{quadratic} (Epanechnikov kernel), \dQuote{triweight} or \dQuote{uniform} kernel. The default is \dQuote{quadratic}.}
}
\details{
A weight function (specifically, the indicator function \bold{1}\eqn{_{[w[1] , w[2]]}}) is introduced in the CV function to allow elimination (or at least significant reduction) of boundary effects from the estimate of \eqn{m(t_i)}.
As noted in the definition of \code{num.ln}, the estimate of \eqn{\beta} in the CV function is obtained from all data while, once \eqn{\beta} is estimated, \eqn{2l_{n} + 1} observations around each \eqn{t_i} are eliminated to estimate \eqn{m(t_i)} in the CV function. Actually, the estimate of \eqn{\beta} to be used in time \eqn{i} in the CV function could be done eliminating such \eqn{2l_{n} + 1} observations too; that possibility was not implemented because both their computational cost and the known fact that the estimate of \eqn{\beta} is quite insensitive to the bandwidth selection.
The implemented procedure generalizes that one in expression (8) in Aneiros-Perez and Quintela-del-Rio (2001) by including a weight function (see above) and allowing two smoothing parameters instead of only one (see Aneiros-Perez \emph{et al.}, 2004).
}
\value{
\item{bh.opt}{dataframe containing, for each \code{ln} considered, the selected value for \code{(b,h)}.}
\item{CV.opt}{\code{CV.opt[k]} is the minimum value of the CV function when de k-th value of \code{ln} is considered.}
\item{CV}{an array containing the values of the CV function for each pair of bandwidths and \code{ln} considered.}
\item{b.seq}{sequence of considered bandwidths, \code{b}, in the CV function for estimating \eqn{\beta}.}
\item{h.seq}{sequence of considered bandwidths, \code{h}, in the pair of bandwidths \code{(b, h)} used in the CV function for estimating \eqn{m}.}
\item{w}{support interval of the weigth function in the CV function.}
}
\references{
Aneiros-Perez, G., Gonzalez-Manteiga, W. and Vieu, P. (2004) Estimation and testing in a partial linear regression under long-memory dependence. \emph{Bernoulli} \bold{10}, 49-78.
Aneiros-Perez, G. and Quintela-del-Rio, A. (2001) Modified cross-validation in semiparametric regression models with dependent errors. \emph{Comm. Statist. Theory Methods} \bold{30}, 289-307.
Chu, C-K and Marron, J.S. (1991) Comparison of two bandwidth selectors with dependent errors. \emph{The Annals of Statistics} \bold{19}, 1906-1918.
}
\author{German Aneiros Perez \email{ganeiros@udc.es}
Ana Lopez Cheda \email{ana.lopez.cheda@udc.es}}
\seealso{
Other related functions are: \code{\link{plrm.beta}}, \code{\link{plrm.est}}, \code{\link{plrm.gcv}}, \code{\link{np.est}}, \code{\link{np.gcv}} and \code{\link{np.cv}}.
}
\examples{
# EXAMPLE 1: REAL DATA
data(barnacles1)
data <- as.matrix(barnacles1)
data <- diff(data, 12)
data <- cbind(data,1:nrow(data))
aux <- plrm.cv(data, step.ln=1, num.ln=2)
aux$bh.opt
plot.ts(aux$CV[,-2,])
par(mfrow=c(2,1))
plot(aux$b.seq,aux$CV[,-2,1], xlab="h", ylab="CV", type="l", main="ln=0")
plot(aux$b.seq,aux$CV[,-2,2], xlab="h", ylab="CV", type="l", main="ln=1")
# EXAMPLE 2: SIMULATED DATA
## Example 2a: independent data
set.seed(1234)
# We generate the data
n <- 100
t <- ((1:n)-0.5)/n
beta <- c(0.05, 0.01)
m <- function(t) {0.25*t*(1-t)}
f <- m(t)
x <- matrix(rnorm(200,0,1), nrow=n)
sum <- x\%*\%beta
epsilon <- rnorm(n, 0, 0.01)
y <- sum + f + epsilon
data_ind <- matrix(c(y,x,t),nrow=100)
# We apply the function
a <-plrm.cv(data_ind)
a$CV.opt
CV <- a$CV
h <- a$h.seq
plot(h, CV,type="l")
## Example 2b: dependent data and ln.0 > 0
set.seed(1234)
# We generate the data
x <- matrix(rnorm(200,0,1), nrow=n)
sum <- x\%*\%beta
epsilon <- arima.sim(list(order = c(1,0,0), ar=0.7), sd = 0.01, n = n)
y <- sum + f + epsilon
data_dep <- matrix(c(y,x,t),nrow=100)
# We apply the function
a <-plrm.cv(data_dep, ln.0=2)
a$CV.opt
CV <- a$CV
h <- a$h.seq
plot(h, CV,type="l")
}
\keyword{Statistical Inference}
\keyword{Regression}
\keyword{Time Series}
\keyword{Nonparametric Statistics}
|
1833c9502e26a7aa24595440e3d7bfcd90349124
|
727a052968125e92a3a0ff11154a1dcc00974627
|
/man/med.Rd
|
c33514e1639eea2c5e428e8d4a3c95993a04ddb7
|
[
"MIT"
] |
permissive
|
nbrosowsky/pwr2ppl
|
a358292901ec0939af53cbe9577d08570a8a44f2
|
197c89557655469dd46f9d1259dab978d18402f2
|
refs/heads/master
| 2020-05-04T22:55:47.099107
| 2019-04-02T19:37:37
| 2019-04-02T19:37:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,958
|
rd
|
med.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/med.R
\name{med}
\alias{med}
\title{Compute Power for Mediated (Indirect) Effects
Requires correlations between all variables as sample size.}
\usage{
med(rxm1, rxm2 = 0, rxm3 = 0, rxm4 = 0, rxy, rym1, rym2 = 0,
rym3 = 0, rym4 = 0, rm1m2 = 0, rm1m3 = 0, rm1m4 = 0,
rm2m3 = 0, rm2m4 = 0, rm3m4 = 0, alpha = 0.05, mvars, n)
}
\arguments{
\item{rxm1}{Correlation between predictor (x) and first mediator (m1)}
\item{rxm2}{Correlation between predictor (x) and second mediator (m2)}
\item{rxm3}{Correlation between predictor (x) and third mediator (m3)}
\item{rxm4}{Correlation between predictor (x) and fourth mediator (m4)}
\item{rxy}{Correlation between DV (y) and predictor (x)}
\item{rym1}{Correlation between DV (y) and first mediator (m1)}
\item{rym2}{Correlation between DV (y) and second mediator (m2)}
\item{rym3}{Correlation DV (y) and third mediator (m3)}
\item{rym4}{Correlation DV (y) and fourth mediator (m4)}
\item{rm1m2}{Correlation first mediator (m1) and second mediator (m2)}
\item{rm1m3}{Correlation first mediator (m1) and third mediator (m3)}
\item{rm1m4}{Correlation first mediator (m1) and fourth mediator (m4)}
\item{rm2m3}{Correlation second mediator (m2) and third mediator (m3)}
\item{rm2m4}{Correlation second mediator (m2) and fourth mediator (m4)}
\item{rm3m4}{Correlation third mediator (m3) and fourth mediator (m4)}
\item{alpha}{Type I error (default is .05)}
\item{mvars}{Number of Mediators}
\item{n}{Sample size}
}
\value{
Power for Mediated (Indirect) Effects
}
\description{
Compute Power for Mediated (Indirect) Effects
Requires correlations between all variables as sample size.
}
\examples{
med(rxm1=.25, rxy=-.35, rym1=-.5,mvars=1, n=150)
med(rxm1=.3, rxm2=.3, rxm3=.25, rxy=-.35, rym1=-.5,rym2=-.5, rym3 = -.5,
rm1m2=.7, rm1m3=.4,rm2m3=.4, mvars=3, n=150)
}
|
dcc5e88b4274afed15fc52c3b98c40a4e3927d5d
|
7180123ce9d90a5642d8595eff3bce61f3bdb3e5
|
/common/Measurements.R
|
243fed087f3248843c60c809c464a249f3d15403
|
[] |
no_license
|
isantabarbara/bcldndc
|
33f00b4893b9eed4c8406fe10b404ee23b076811
|
1a1b0e7aee54f7d70a46d846f33d9926bf99bee6
|
refs/heads/master
| 2020-07-16T02:00:52.005235
| 2019-11-24T13:15:41
| 2019-11-24T13:15:41
| 205,696,419
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,948
|
r
|
Measurements.R
|
#Responsability:
#Read the measurements
# Scheme of how are the measurements structuredin the values variable (a list with data.frames each one):
#
#
#
# siteValues (list)
# +------------+------------+
# | n_n2o | n_no |
# |------------|------------|
# |meas (d.f) |meas (d.f) |
# |+--+--+--+-+|+--+--+--+-+|
# || | | | ||| | | | ||
# ||--|--|--|-|||--|--|--|-||
# ||1 |65|98|9|||44|98|33|7||
# ||34|68|89|9|||65|34|67|7||
# |+--+--+--+-+|+--+--+--+-+|
# +------------+------------+
#
Measurements <- setRefClass(
"measurements"
, fields = list(
site = "list",
dataDaysRange = "numeric",
values = "list",
valuesAggregated = "list",
valuesColumnIndex = "numeric"
)
, methods = list(
#
# Constructor
#
initialize = function(site = list(),
dataDaysRange = 1,
values=list(),
valuesAggregated=list(),
valuesColumnIndex = 3,
...)
{
callSuper(...,
site = site,
dataDaysRange = dataDaysRange,
values = values,
valuesAggregated = valuesAggregated,
valuesColumnIndex = valuesColumnIndex)
},
getFilesToRead = function() {
logdebug("METHOD IN: measurements$getFilesToRead")
meas.path <- file.path(site$measFolder, site$measSubfolder)
files <- c()
for (file.name in site$measFiles) {
files <- c(files, list.files(path = meas.path, pattern = file.name, full.names = T, recursive = T,
ignore.case = T, include.dirs = F))
}
logdebug(files)
files <- files[grepl("PARSED", files)]
logdebug("METHOD OUT: measurements$getFilesToRead")
return(files)
},
#
# Read the configured measurements files
# Return: the measurements
#
readMeasurements = function() {
logdebug("METHOD IN: measurements$readMeasurements")
files <- getFilesToRead()
for (file in files) {
logdebug(paste("Read measurement file: ", file, sep=" "))
meas <- read.table(file, header=T, sep="\t",dec=".")
measName <- colnames(meas)[[valuesColumnIndex]]
values[[measName]] <<- meas
#CHECKED!: Measurements are aggregated correctly
#write.table(valuesAggregated[[measName]], file = paste0("meas",measName,proc.time()[3],".csv"), row.names=F)
}
logdebug("METHOD OUT: measurements$readMeasurements")
#including all years and all measurements
return(values)
},
#
# Overwrite the measurements with average values
#
aggregate = function() {
#years <- getMeasurementsYears()
logdebug("METHOD IN: measurements$aggregate")
for (measName in names(values)) {
meas <- values[[measName]]
years <- as.numeric(unique(meas$year))
values[[measName]] <<- weeklyAverage(meas)
logdebug(head(values[[measName]]))
}
logdebug("METHOD OUT: measurements$aggregate")
},
#
# Get the peaks, ignoring the values under the prob quantile
#
peaks = function(probs.num=0.50) {
logdebug("METHOD IN: measurements$peaks")
for (measName in names(values)) {
meas <- values[[measName]]
idx <- getHighPeaks(meas[,3], probs.num=probs.num)
meas <- meas[idx,]
values[[measName]] <<- meas
}
logdebug("METHOD OUT: measurements$peaks")
},
highValues = function(probs.num=0.50) {
logdebug("METHOD IN: measurements$highValues")
for (measName in names(values)) {
logdebug(paste("high values for",measName, probs.num))
logdebug(capture.output(head(values)))
meas <- values[[measName]]
logdebug(capture.output(meas))
med <- quantile(meas[,3], probs=probs.num)
logdebug(paste("quantile", med))
high.indexs <- which(meas[,3] > med)
logdebug(paste("here1"))
meas <- meas[high.indexs,]
logdebug(paste("here2"))
values[[measName]] <<- meas
}
logdebug("METHOD OUT: measurements$highValues")
},
#
# We set the same number of peaks than valley values
#
proportionate = function(probs.num=0.50) {
logdebug("METHOD IN: measurements$highValues")
for (measName in names(values)) {
logdebug(paste("high values for",measName, probs.num))
logdebug(capture.output(head(values)))
is.fake <- grepl("fake",measName)
meas <- values[[measName]]
logdebug(capture.output(meas))
med <- quantile(meas[,3], probs=probs.num)
logerror(paste("quantile", med))
high.indexs <- which(meas[,3] > med)
low.indexs <- which(meas[,3] <= med)
while ((length(low.indexs)/length(high.indexs) > 1.3) && !is.fake) {
if (length(low.indexs)/length(high.indexs) < 2) {
low.indexs <- low.indexs[which(which(low.indexs==low.indexs) %% 5 != 0)]
} else {
#Remove half of the low indexes
low.indexs <- low.indexs[which(which(low.indexs==low.indexs) %% 2 != 0)]
}
}
logdebug(paste("Number of aggregated measurements: Low:", length(low.indexs), "High:", length(high.indexs)))
indexs <- c(high.indexs, low.indexs)
meas <- meas[indexs,]
values[[measName]] <<- meas
}
logdebug("METHOD OUT: measurements$highValues")
},
#
# We set the same number of peaks than valley values
#
proportionateByYear = function(probs.num=0.50) {
logdebug("METHOD IN: measurements$proportionateByYear")
for (measName in names(values)) {
logdebug(paste("high values for",measName, probs.num))
all.meas <- values[[measName]]
logdebug(capture.output(head(all.meas)))
all.index <- c()
new.all.meas <- NULL
for (year in unique(all.meas$year)) {
meas <- all.meas[all.meas$year == year,]
med <- quantile(meas[,3], probs=probs.num)
logerror(paste("quantile", med))
high.indexs <- which(meas[,3] > med)
low.indexs <- which(meas[,3] <= med)
logdebug(paste("lowlen:",length(low.indexs),"highlen",length(high.indexs)))
its <- 0
proportion <- 1.3
while ((length(low.indexs)/length(high.indexs) > proportion)) {
if (length(low.indexs)/length(high.indexs) < 2) {
low.indexs <- low.indexs[which(which(low.indexs==low.indexs) %% 5 != 0)]
} else {
#Remove half of the low indexes
low.indexs <- low.indexs[which(which(low.indexs==low.indexs) %% 2 != 0)]
}
logdebug(paste("lowlen:",length(low.indexs),"highlen",length(high.indexs)))
if (its > 3) {
proportion <- proportion + 0.1
}
its <- its + 1
}
logerror(paste("Number of aggregated measurements: Year:", year, "Low:", length(low.indexs), "High:", length(high.indexs)))
indexs <- c(high.indexs, low.indexs)
if (is.null(new.all.meas)) {
new.all.meas <- meas[indexs,]
} else {
new.all.meas <- rbind(new.all.meas, meas[indexs,])
}
#all.index <- c(all.index, indexs)
}
#all.meas <- all.meas[all.index,]
values[[measName]] <<- new.all.meas
}
logdebug("METHOD OUT: measurements$proportionateByYear")
},
#
# Remove from measurements years outside the selected interval
#
removeYears = function(ini, end) {
logdebug("METHOD IN: measurements$removeYears")
for (measName in names(values)) {
meas <- values[[measName]]
valid.indexs <- which(meas$year >= ini & meas$year <= end)
meas <- meas[valid.indexs,]
values[[measName]] <<- meas
}
logdebug("METHOD OUT: measurements$removeYears")
},
extremes = function() {
logdebug("METHOD IN: measurements$extremes")
for (measName in names(values)) {
meas <- values[[measName]]
idx <- getHighPeaks(meas[,3])
idx.low <- getLowPeaks(meas[,3])
meas <- meas[c(idx,idx.low),]
values[[measName]] <<- meas
}
logdebug("METHOD OUT: measurements$extremes")
},
#
# Get the index of the peak in measurements above 0.75 quantile
#
getHighPeaks = function(data, probs.num=0.75) {
logerror("METHOD IN: measurementParer$getHighPeak")
meas <- data
med <- quantile(meas, probs=probs.num)
logerror(paste("quantile",med))
high.indexs <- which(meas > med)
meas[-high.indexs] <- 0
index <- peakIndexes(meas)
logerror("METHOD OUT: measurementParer$getHighPeak")
return(index)
},
peakIndexes = function(x, thresh = 0) {
logerror("METHOD IN: measurementParer$peakIndexes")
pks <- which(diff(sign(diff(x, na.pad=F)), na.pad=F) < 0) + 2
if (!missing(thresh)) {
pks[x[pks -1] -x[pks] > thresh]
}
else pks
return(pks-1)
},
getLowPeaks = function(data, probs.num=0.50) {
logdebug("METHOD IN: measurementParer$getLowPeaks")
meas <- data
med <- quantile(meas, probs=probs.num)
high.indexs <- which(meas > med)
meas[-high.indexs] <- 0
index <- inv.peakIndexes(meas)
logdebug("METHOD OUT: measurementParer$getLowPeaks")
return(index)
},
inv.peakIndexes = function(x, thresh = 0) {
pks <- which(diff(sign(diff(x, na.pad=F)), na.pad=F) > 0) + 2
if (!missing(thresh)) {
pks[x[pks -1] -x[pks] > thresh]
}
else pks
},
discretize = function(probs.num=0.50) {
for (measName in names(values)) {
meas <- values[[measName]]
med <- quantile(meas[,3], probs=probs.num)
period <- peaksMedianPeriod(meas, probs.num=0.75)
logerror(paste("Removing all data but maximun and minimum from periods of",period,"for compound",measName))
years <- as.numeric(unique(meas$year))
indexes <- maxMinFilter(meas, years, period)
meas <- meas[indexes,]
meas <- meas[meas[,3] > med,]
values[[measName]] <<- meas
}
},
peaksMedianPeriod = function(meas, probs.num=0.50) {
logerror("METHOD IN: measurementParer$peaksMedianPeriod")
idx <- getHighPeaks(meas[,3], probs.num=probs.num)
meas$date <- strptime(paste(meas$year, meas$day), "%Y %j")
dates <- meas[idx,]$date
periods.between.peaks <- c()
for (i in 1:length(dates)) {
if (i < length(dates)) {
periods.between.peaks <- c(periods.between.peaks, (dates[i+1] - dates[i]))
}
}
med.period <- median(periods.between.peaks)
logerror("METHOD OUT: measurementParer$peaksMedianPeriod")
return(med.period)
},
#
# Get the years we have for all the measurements
#
getMeasurementsYears = function() {
logdebug("METHOD IN: measurements$getMeasurementsYears")
# years <- c()
# for (measurements in values) {
# years <- c(years, levels(factor(measurements$year)))
# }
# years <- as.numeric(levels(factor(years)))
#years <- as.numeric(unique(data$year))
logdebug("METHOD OUT: measurements$getMeasurementsYears")
return(years)
},
#
# Get the unique names of the measurements values columns (ej: n_n2o, n_no...)
#
getMeasurementsNames = function() {
logdebug("METHOD IN: measurements$getMeasurementsNames")
names <- c()
for (meas in values) {
name <- names(meas)[[valuesColumnIndex]]
names <- c(names, name)
}
logdebug("METHOD OUT: measurements$getMeasurementsNames")
return(unique(names))
},
#TODO: We should save the aggregated measurements in a file, to check future errors
aggregation = function(meas, years) {
logdebug("METHOD IN: measurements$aggregation")
aggMean <- c()
aggSD <- c()
sdColumnIndex <- valuesColumnIndex + 1
for(year in years) {
aggregationsInTheYear <- round(365/dataDaysRange)
for (aggNum in 0:(aggregationsInTheYear-1)) {
combinedMeasurements <- getAggregatedMeasurementsData(meas, aggNum, year)
#na.rm=TRUE, if there is a lack some day we can ignore it
aggmean <- mean(combinedMeasurements[[valuesColumnIndex]],na.rm=TRUE)
#The NA sd should be replace by a value during the parsing process
combinedVariancesMean <- mean((combinedMeasurements[[sdColumnIndex]])^2,na.rm=TRUE)
aggsd <- sqrt(combinedVariancesMean)
aggMean <- c(aggMean, aggmean)
aggSD <- c(aggSD, aggsd)
}
}
logdebug("METHOD OUT: measurements$aggregation")
return(list(meas=aggMean, measSD=aggSD))
},
#
# Aggregate data with a moving average criteria
#
movingAverage= function(values, years) {
logdebug("METHOD IN: measurements$movingAverage")
aggMean <- c()
aggSD <- c()
aggDays <- c()
aggYears <- c()
sdColumnIndex <- valuesColumnIndex + 1
for(year in years) {
for (prevDay in c(0:364)) {
combinedMeasurements <- movingAverageAux(values, prevDay, year)
#na.rm=TRUE, if there is a lack some day we can ignore it
aggmean <- mean(combinedMeasurements[[valuesColumnIndex]],na.rm=TRUE)
#The NA sd should be replace by a value during the parsing process
combinedVariancesMean <- mean((combinedMeasurements[[sdColumnIndex]])^2,na.rm=TRUE)
aggsd <- sqrt(combinedVariancesMean)
if (!is.na(aggmean)) {
aggMean <- c(aggMean, aggmean)
aggSD <- c(aggSD, aggsd)
aggDays <- c(aggDays,prevDay+1)
aggYears <- c(aggYears, year)
}
}
}
new.values <- data.frame(year=aggYears, day=aggDays, value=aggMean, sd=aggSD)
logdebug("finish aggregation")
logdebug(capture.output(new.values))
colnames(new.values) <- colnames(values)
logdebug("METHOD OUT: measurements$getMovingAverage")
return(new.values)
},
movingAverageAux = function(values, prevStartDay, year) {
#logdebug("METHOD IN: measurements$movingAverageAux")
startDay <- prevStartDay + 1
endDay <- prevStartDay + dataDaysRange
selectorCriteria <-(values$year == year
& values$day >= startDay
& values$day <= endDay)
aggMeasurements <- values[selectorCriteria,]
#logdebug(capture.output(aggMeasurements))
# logdebug("METHOD OUT: measurements$movingAverageAux")
return(aggMeasurements)
},
getAggregatedMeasurementsData = function(meas, aggNumber, year) {
#logdebug("METHOD IN: measurements$getAggregatedMeasurementsData")
startDay <- 1 + aggNumber*dataDaysRange
endDay <- dataDaysRange + aggNumber*dataDaysRange
selectorCriteria <-(meas$year == year
& meas$day >= startDay
& meas$day <= endDay)
aggMeasurements <- meas[selectorCriteria,]
#logdebug("METHOD OUT: measurements$getAggregatedMeasurementsData")
return(aggMeasurements)
},
maxMinFilter = function(meas, years, period) {
logdebug("METHOD IN: measurements$aggregation")
indexes <- c()
for(year in years) {
aggregationsInTheYear <- round(365/period)
for (aggNum in 0:(aggregationsInTheYear-1)) {
indexes <- c(indexes,getMaxMinDataIndex(meas, aggNum, year))
}
}
logdebug("METHOD OUT: measurements$aggregation")
return(indexes)
},
getMaxMinDataIndex = function(meas, aggNumber, year) {
#logdebug("METHOD IN: measurements$getAggregatedMeasurementsData")
startDay <- 1 + aggNumber*dataDaysRange
endDay <- dataDaysRange + aggNumber*dataDaysRange
selectorCriteria <-(meas$year == year
& meas$day >= startDay
& meas$day <= endDay)
max.val <- max(meas[selectorCriteria,3])
min.val <- median(meas[selectorCriteria,3])
maxCriteria <-(meas$year == year
& meas$day >= startDay
& meas$day <= endDay
& meas[,3] == max.val)
minCriteria <-(meas$year == year
& meas$day >= startDay
& meas$day <= endDay
& meas[,3] == min.val)
max.idx <- which(maxCriteria)
min.idx <- which(minCriteria)
#logdebug("METHOD OUT: measurements$getAggregatedMeasurementsData")
return(c(max.idx,min.idx))
},
#
# Average without moving/smoothing
#
weeklyAverage = function(values) {
logdebug("METHOD IN: measurements$weeklyAverage")
values.dt <- data.table(values)
agg.res <- data.frame()
rangesNumr <- round(365/dataDaysRange)
#Nested function used in the by clause of data.table
#Returns a number for data aggregation by the day range configured.
day <- function(day) {
aggNum <- as.integer((day/dataDaysRange)-0.00001)+1
aggNum[aggNum > rangesNumr] <- rangesNumr
return(aggNum)
}
colIdxs <- c(valuesColumnIndex:length(colnames(values.dt)))
agg.res <- values.dt[, lapply(.SD, mean), by=list(year,day(day)), .SDcols=colIdxs]
logdebug("METHOD OUT: measurements$weeklyAverage")
return(as.data.frame(agg.res))
}
)#End methods List
)#End RefClass
|
a15ce22ceaf0c33a6e4379f5fee24bbc26d55559
|
0f77b988691f6de6d6d2b360bcda644302f5844f
|
/R/plotScatterAcd.R
|
ae3e1b2eb592de3fe0c74d5be54f910a8f76502a
|
[] |
no_license
|
cran/ACDm
|
665b66ec52358449c391574924dcf57978375644
|
ffee40a02ba834b0ed201036a3aa7f2307221cbc
|
refs/heads/master
| 2022-11-23T05:09:39.138507
| 2022-11-16T11:11:48
| 2022-11-16T11:11:48
| 39,196,192
| 6
| 1
| null | 2019-12-14T13:09:11
| 2015-07-16T12:43:39
|
R
|
UTF-8
|
R
| false
| false
| 3,421
|
r
|
plotScatterAcd.R
|
plotScatterAcd <- function(fitModel, x = "muHats", y = "residuals", xlag = 0, ylag = 0,
colour = NULL, xlim = NULL, ylim = NULL, alpha = 1/10,
smoothMethod = "auto"){
x <- match.arg(x, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
y <- match.arg(y, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
if(length(colour) != 0) colour <- match.arg(colour, c("muHats", "residuals", "durations", "adjDur", "dayTime", "time", "index"))
contTime = TRUE
xData <- switch(x,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N)
yData <- switch(y,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N)
if(length(colour) != 0){
colourData <- switch(colour,
muHats = fitModel$muHats,
residuals = fitModel$residuals,
durations = fitModel$durations$durations,
adjDur = fitModel$durations$adjDur,
dayTime = fitModel$durations$time$min / 60 + fitModel$durations$time$hour,
time = {if(contTime) fitModel$durations$time
else fitModel$durations$time$yday * (60*8 + 25) + fitModel$durations$time$min + fitModel$durations$time$hour * 60},
index = 1:fitModel$N,
NULL = NULL)
colourData <- colourData[(1+ylag):length(colourData)]
}
yData <- yData[(1+xlag):(length(yData)-ylag)]
xData <- xData[(1+ylag):(length(xData)-xlag)]
if(ylag != 0) y <- paste("lagged ", y, " (i-", ylag, ")", sep = "")
if(xlag != 0) x <- paste("lagged ", x, " (i-", xlag, ")", sep = "")
if(length(colour) == 0){
g <- ggplot(data.frame(x = xData, y = yData), aes(x = x, y = y))
} else{
g <- ggplot(data.frame(x = xData, y = yData, colour = colourData), aes(x = x, y = y, colour = colour)) + scale_colour_continuous(name = colour)
}
g <- g + geom_point(alpha = alpha) + geom_smooth(colour="red", size=1.5, fill = "blue", alpha = .2, method = smoothMethod)
if(x == "muHats" && y == "residuals") g <- g + scale_y_continuous(breaks = seq(1, max(yData), 1)) #+ geom_hline(yintercept = 1, colour = "red")
if(length(xlim) != 0) g <- g + xlim(xlim)
if(length(ylim) != 0 ) g <- g + ylim(ylim)
g + ylab(y) + xlab(x)
}
|
e829c2b1a45e1f0afbd86babb005321ea460e3ce
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvmeta/examples/mvmetaSim.Rd.R
|
ef7c3d16a68d4060ee2a05b5dde4ceccc000ba6a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 710
|
r
|
mvmetaSim.Rd.R
|
library(mvmeta)
### Name: mvmetaSim
### Title: Simulating Responses for mvmeta Models
### Aliases: mvmetaSim simulate.mvmeta
### Keywords: models regression multivariate methods
### ** Examples
# RUN A MODEL
model <- mvmeta(cbind(PD,AL)~pubyear,S=berkey98[5:7],data=berkey98)
# SIMULATE A NEW SET OF OUTCOMES
simulate(model)
# SIMULATE FROM SCRATCH: 3 OUTCOMES, 8 STUDIES
(y <- matrix(0,8,3))
(S <- inputcov(matrix(runif(8*3,0.1,2),8,3,dimnames=list(NULL,
c("V1","V2","V3"))),cor=c(0,0.5,0.7)))
(Psi <- inputcov(1:3,cor=0.3))
mvmetaSim(y,S,Psi)
# ALTERNATIVELY, DEFINE Psi THROUGH STANDARD DEVIATIONS AND CORRELATION 0.2
mvmetaSim(y,S,sd=1:3,cor=0.3)
# 2 SIMULATION SETS
mvmetaSim(y,S,Psi,nsim=2)
|
7b06ac8436c597e026d6b7e5064af1d64c4ccc75
|
05de00b8c0512fe56bb0727b1210e4c6755f5fc4
|
/tests/testthat/test-step_utils.R
|
e0a4d0a3ea09ac8ff3e0102b5ec309424025a59d
|
[
"Apache-2.0"
] |
permissive
|
DyfanJones/aws-step-functions-data-science-sdk-r
|
8b4a221d6b18d9015a002c50d3063f6e0e405e16
|
ab2eb28780791f8e8bb50227b844fa240b1252da
|
refs/heads/main
| 2023-04-28T21:24:34.920961
| 2021-05-30T21:58:04
| 2021-05-30T21:58:04
| 362,764,798
| 1
| 0
|
NOASSERTION
| 2021-05-30T21:58:05
| 2021-04-29T09:37:47
|
R
|
UTF-8
|
R
| false
| false
| 1,231
|
r
|
test-step_utils.R
|
aws = c('af-south-1',
'ap-east-1',
'ap-northeast-1',
'ap-northeast-2',
'ap-northeast-3',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-south-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'me-south-1',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2')
aws_cn = c('cn-north-1', 'cn-northwest-1')
aws_us_gov = c('us-gov-east-1', 'us-gov-west-1')
aws_iso = 'us-iso-east-1'
aws_iso_b = 'us-isob-east-1'
test_that("check if correct partition is returned",{
Aws = sapply(aws, get_aws_partition)
AwsCn = sapply(aws_cn, get_aws_partition)
AwsUsGov = sapply(aws_us_gov, get_aws_partition)
AwsIso = sapply(aws_iso, get_aws_partition)
AwsIsoB = sapply(aws_iso_b, get_aws_partition)
expect_equal(unname(Aws), rep("aws", length(aws)))
expect_equal(unname(AwsCn), rep("aws-cn", length(aws_cn)))
expect_equal(unname(AwsUsGov), rep("aws-us-gov", length(aws_us_gov)))
expect_equal(unname(AwsIso), rep("aws-iso", length(aws_iso)))
expect_equal(unname(AwsIsoB), rep("aws-iso-b", length(aws_iso_b)))
})
|
13f64544db9357d10636edeca6cc6f0bacfca711
|
93f615199df1fc8b2817676c91c820704f8e7983
|
/man/LownerJohnEllipse.Rd
|
0d73486d828b9babf91dc4bb13efd5db61eccc42
|
[] |
no_license
|
cran/PlaneGeometry
|
d2f0242a3d655a7375a6fb8b33cee156d9a60ac1
|
91a2bba25a18b897bb069511bf2a3ab303355e71
|
refs/heads/master
| 2023-08-17T14:14:25.865334
| 2023-08-09T21:40:02
| 2023-08-10T01:45:56
| 239,984,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 747
|
rd
|
LownerJohnEllipse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ellipse.R
\name{LownerJohnEllipse}
\alias{LownerJohnEllipse}
\title{Löwner-John ellipse (ellipse hull)}
\usage{
LownerJohnEllipse(pts)
}
\arguments{
\item{pts}{the points in a two-columns matrix (one point per row); at least
three distinct points}
}
\value{
An \code{Ellipse} object.
}
\description{
Minimum area ellipse containing a set of points.
}
\examples{
\donttest{pts <- cbind(rnorm(30, sd=2), rnorm(30))
ell <- LownerJohnEllipse(pts)
box <- ell$boundingbox()
plot(NULL, asp = 1, xlim = box$x, ylim = box$y, xlab = NA, ylab = NA)
draw(ell, col = "seaShell")
points(pts, pch = 19)
all(apply(pts, 1, ell$contains)) # should be TRUE}
}
|
909d48ed26fc45b313cd20a9be09d3206d8b5609
|
427951ea581f9b4d5df15edbc1cca2c0cb7122ee
|
/man/request_body.azureml_request_response.Rd
|
d874f02b12b43762daed6885eabb5be26e76aabd
|
[
"MIT"
] |
permissive
|
ijlyttle/AzureMLPlus
|
cde1de18a65c544ddbfc3441754dfb64b6b04fac
|
cbe46f6339e3ac65425eca394ca6d49e438b5fa0
|
refs/heads/master
| 2021-01-13T15:05:27.681673
| 2016-12-18T23:28:57
| 2016-12-18T23:28:57
| 76,304,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
request_body.azureml_request_response.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/request_response_body.R
\name{request_body.azureml_request_response}
\alias{request_body.azureml_request_response}
\alias{response_body.azureml_request_response}
\title{Gets request/response body}
\usage{
\method{request_body}{azureml_request_response}(x, ...)
\method{response_body}{azureml_request_response}(x, ...)
}
\arguments{
\item{x}{\code{\link{azureml_request_response}} object}
}
\value{
character (JSON), body of request/response
}
\description{
Gets request/response body
}
|
9d9848d7d9429fc063ce2c64b8c2b02b5eea6aaa
|
a9c7f4bd51272fd6146e779d478e51633d40931a
|
/R/ondisc.R
|
7d30f93cf896c33eab8481f877253cc19712f8bb
|
[
"MIT"
] |
permissive
|
scarlettcanny0629/ondisc
|
9d78a0a6551b2b54a6af22fc54d44ba2000c2fbc
|
787dfaff245114f1f5fa9d1fb5595a61f492b199
|
refs/heads/main
| 2023-06-26T17:59:57.541622
| 2021-07-23T00:57:20
| 2021-07-23T00:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
r
|
ondisc.R
|
utils::globalVariables(c("gene_idx", "expression", "."))
#' ondisc: A package for out-of-memory computing on single-cell data
#'
#' Single-cell datasets are large and are growing in size as sequencing costs drop. The ondisc package is designed to facilitate large-scale computing on single-cell expression data by providing access to expression matrices out-of-memory. ondisc is functional (i.e., all objects are persistent) and efficient (i.e., all algorithms are theoretically optimal in time).
#' @useDynLib ondisc, .registration = TRUE
#' @importFrom Rcpp sourceCpp
#' @importFrom magrittr %>%
#' @import methods
#' @import Matrix
#' @docType package
#'
#' @name ondisc
NULL
|
eee72fe3026aeb2f1e38df8ea28a91b614c7fdbd
|
fbdbfdc857cc2a59cb5066d3a0f0c3a2afe3c2f7
|
/scripts/R/2021Spring/topic4_Theory_vs_Application.R
|
39c2ab64974e6bb72f6f265d7533e3fc06151eac
|
[
"MIT"
] |
permissive
|
RitvikaN25/Introduction-to-Machine-Learning-Big-Data-and-Application
|
269848f367926b5f0a5fd6ec3af9d10859252664
|
5efa49cd71f8b80a552e15957af01ac6492925c3
|
refs/heads/main
| 2023-06-05T19:51:17.611492
| 2021-06-28T19:59:23
| 2021-06-28T19:59:23
| 381,170,121
| 0
| 0
|
MIT
| 2021-06-28T21:55:18
| 2021-06-28T21:55:17
| null |
UTF-8
|
R
| false
| false
| 2,900
|
r
|
topic4_Theory_vs_Application.R
|
# I. Application of Bernoulli Random Variable
# Library
library(quantmod)
getSymbols("AAPL")
getSymbols("FB")
head(AAPL); tail(AAPL)
head(FB); tail(FB)
returnAVec = quantmod::dailyReturn(AAPL$AAPL.Close)[1356:length(quantmod::dailyReturn(AAPL$AAPL.Close))]
returnA = mean(returnAVec)
sdA = sd(returnAVec)
returnBVec = quantmod::dailyReturn(FB$FB.Close)
returnB = mean(returnBVec)
sdB = sd(returnBVec)
weightA = 0.3
weightB = 1 - weightA
portfolioReturn = weightA * returnA + weightB * returnB # this return (if converted to annual performance) is Forbes100 level even though it looks like a small number using daily unit
# covariance formula: var(X1 + X2) = var(X1) + 2cov(X1, X2) + var(X2)
portfolioVolatility = (weightA^2*sdA^2 + 2*cov(returnAVec, returnBVec)*weightA*weightB + weightB^2*sdB^2)^(.5)
MPT_data = sapply(
seq(0, 1, 0.01),
function(s) {
weightA = s
weightB = 1 - s
portfolioReturn = weightA * returnA + weightB * returnB # this return (if converted to annual performance) is Forbes100 level even though it looks like a small number using daily unit
portfolioVolatility = (weightA^2*sdA^2 + 2*cov(returnAVec, returnBVec)*weightA*weightB + weightB^2*sdB^2)^(.5)
return(c(portfolioReturn, portfolioVolatility))
}
)
MPT_data = data.frame(t(MPT_data))
colnames(MPT_data) = c("Return", "Volatility(SD)")
plot(MPT_data$`Volatility(SD)`, MPT_data$Return, xlab = "Volatility(SD)", ylab = "Return",
main = "Modern Portfolio Theory", pch = "*")
vector_of_sharpe_ratio = MPT_data$Return / MPT_data$`Volatility(SD)`
max(vector_of_sharpe_ratio)
bestIndex = which(vector_of_sharpe_ratio == max(vector_of_sharpe_ratio)); bestIndex
bestCombo = MPT_data[bestIndex, ]
abline(a = 0, b = max(vector_of_sharpe_ratio))
bestWeightA = seq(0, 1, 0.01)[bestIndex]
bestWeightB = 1 - bestWeightA
# Look for optimal point
# Optimal: the largest return & the smallest risk (SD)
# II. Application of Uniform Random Variable & Law of Large Numbers
# Library
library(animation)
## Plot Monte Carlo Simulation of Pi
saveGIF({
## Define a function to output a plot of pi
nRange <- seq(1e2, 1e4, 2e2)
pi_hat_vec <- rep(NA, length(nRange))
for (N in nRange) {
x <- runif(N)
y <- runif(N)
d <- sqrt(x^2 + y^2)
label <- ifelse(d < 1, 1, 0)
pi_hat <- round(4*plyr::count(label)[2,2]/N,3)
pi_hat_vec[which(N == nRange)] <- pi_hat
par(mfrow=c(1,2))
plot(
x, y,
col = label+1,
main = paste0(
"Simulation of Pi: N=", N,
"; \nApprox. Value of Pi=", pi_hat),
pch = 20, cex = 1)
plot(
nRange, pi_hat_vec, type = "both",
main = "Path for Simulated Pi");
lines(nRange, y = rep(pi, length(nRange)))
}
}, movie.name = "C:/Users/eagle/OneDrive/PalmDrive/Classrooms/StatsProgram/2021Winter/Scripts/mc-sim-pi.gif",
# please change above to your own address
interval = 0.8, nmax = 30, ani.width = 480)
|
3279326e014b07d82552f86948ce38c9a9c0f43f
|
cd796aa36215cf47f0da2b89788ea50872c62c57
|
/man/as.fumeric.Rd
|
29232d7fcee5bf064e9a617dfc62d9584a435fbd
|
[] |
no_license
|
CymGen30/rafalib
|
0d000b70dd19124cabdc983874295f43979070f6
|
2580666c8e7f8baf1d721a43054b33392724703e
|
refs/heads/master
| 2023-04-07T12:06:39.932162
| 2021-04-15T21:35:48
| 2021-04-15T21:35:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 534
|
rd
|
as.fumeric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.fumeric.R
\name{as.fumeric}
\alias{as.fumeric}
\title{converts to factor and then numeric}
\usage{
as.fumeric(x, levels = unique(x))
}
\arguments{
\item{x}{a character vector}
\item{levels}{the leves to be used in the call to factor}
}
\description{
Converts a vector of characters into factors and then converts these into numeric.
}
\examples{
group = c("a","a","b","b")
plot(seq_along(group),col=as.fumeric(group))
}
\author{
Rafael A. Irizarry
}
|
440c6ebcf1f4360c0cde0cadf9d924f4e029d26c
|
e72b18b49000f170cc708dc4a0e59f3cd9769d84
|
/Analysis and classification of London bike sharing/hclust.R
|
18f3b868c6253d099b27ff41d69864192d3a05e2
|
[] |
no_license
|
Rungerlas/Data-Mining-Algorithm
|
fc136950c97d93be5791484972adcea63424c9ce
|
52a8cccfb4b2e91d6aac03fdf9f5df4dca0daa0c
|
refs/heads/main
| 2023-05-01T16:57:06.215565
| 2021-05-04T02:40:24
| 2021-05-04T02:40:24
| 364,118,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,309
|
r
|
hclust.R
|
rm(list = ls())
library(factoextra)
library(NbClust)
library("class")
Data <- read.csv("Processed_Data_v2.csv", stringsAsFactors = TRUE)
Data <- Data[-1]
True_Class <- Data$cnt
Data <- Data[-1]
d <- dist(Data)
True_Class <- factor(x = True_Class, levels = c("low","normal","high"))
dir <- sort(sample(c(1:nrow(Data)),size = 0.7*nrow(Data)))
creat_new_data <- function(Data,tree,TrueClass)
{
new_data <- Data
new_data$clusterlabel <- tree
new_data$ClusterOne <- c(0)
new_data$ClusterTwo <- c(0)
new_data$ClusterThree <- c(0)
new_data <- change_flag_variable(new_data)
new_data <- new_data[-16]
new_data$Class <- TrueClass
return(new_data)
}
change_flag_variable <- function(x)
{
for(i in 1:nrow(x))
{
if(x$clusterlabel[i] == 1)
x$ClusterOne[i] <- 1
if(x$clusterlabel[i] == 2)
x$ClusterTwo[i] <- 1
if(x$clusterlabel[i] == 3)
x$ClusterThree[i] <- 1
}
return(x)
}
knn_model_creation <- function(Data,dir)
{
max_accuracy <- 0
True_Class <- Data[-dir,19]
i <- 1
for(i in 1:100)
{
model <- knn(train = Data[dir,-19], test = Data[-dir,-19], cl = Data[dir,19], k = i)
cm <- table(factor(x = True_Class, levels = c("low","normal","high")),model)
accuracy <- sum(diag(cm))/length(Data[-dir,19])
if(accuracy > max_accuracy)
{
max_model <- model
max_accuracy <- accuracy
k <- i
}
}
model_accur_k <- list(max_model,max_accuracy,k)
return(model_accur_k)
}
#using ward.D2 method
ward <- hclust(d,method = "ward.D2")
ward_tree <- cutree(ward,k = 3)
ward_data <- creat_new_data(Data,ward_tree,True_Class)
ward_list <- knn_model_creation(ward_data,dir)
error_rate_ward <- 1 - as.numeric(ward_list[2])
ward_k <- as.numeric(ward_list[3])
#using single method
sing <- hclust(d,method = "single")
sing_tree <- cutree(sing,k = 3)
sing_data <- creat_new_data(Data,sing_tree,True_Class)
sing_list <- knn_model_creation(sing_data,dir)
error_rate_single <- 1 - as.numeric(sing_list[2])
single_k <- as.numeric(sing_list[3])
#using complete method
com <- hcut(d,k=3,hc_method = "complete")
fviz_cluster(com,Data)
com_tree <- cutree(com,k = 3)
com_data <- creat_new_data(Data,com_tree,True_Class)
com_list <- knn_model_creation(com_data,dir)
error_rate_complete <- 1 - as.numeric(com_list[2])
complete_k <- as.numeric(com_list[3])
#using average method
ave <- hcut(d,k=3,hc_method = "average")
fviz_cluster(ave,Data)
ave_tree <- cutree(ave,k = 3)
ave_data <- creat_new_data(Data,ave_tree,True_Class)
ave_list <- knn_model_creation(ave_data,dir)
error_rate_average <- 1 - as.numeric(ave_list[2])
average_k <- as.numeric(ave_list[3])
#using mcquitty method
mcq <- hclust(d,method = "mcquitty")
mcq_tree <- cutree(mcq,k = 3)
mcq_data <- creat_new_data(Data,mcq_tree,True_Class)
mcq_list <- knn_model_creation(mcq_data,dir)
error_rate_mcquitty <- 1 - as.numeric(mcq_list[2])
mcquitty_k <- as.numeric(mcq_list[3])
#using median method
med <- hclust(d,method = "median")
med_tree <- cutree(med,k = 3)
med_data <- creat_new_data(Data,med_tree,True_Class)
med_list <- knn_model_creation(med_data,dir)
error_rate_median <- 1 - as.numeric(med_list[2])
median_k <- as.numeric(med_list[3])
#using centroid method
cen <- hclust(d,method = "centroid")
cen_tree <- cutree(cen,k = 3)
cen_data <- creat_new_data(Data,cen_tree,True_Class)
cen_list <- knn_model_creation(cen_data,dir)
error_rate_centroid <- 1 - as.numeric(cen_list[2])
centroid_k <- as.numeric(cen_list[3])
#only use knn
max_accuracy_knn <- 0
i <- 1
for(i in 1:100)
{
model <- knn(train = Data[dir,], test = Data[-dir,], cl = True_Class[dir], k = i)
cm <- table(factor(x = True_Class[-dir], levels = c("low","normal","high")),model)
accuracy <- sum(diag(cm))/nrow(Data[-dir,])
if(accuracy > max_accuracy_knn)
{
max_model_knn <- model
max_accuracy_knn <- accuracy
k_knn <- i
}
}
error_rate_knn <- 1 - max_accuracy_knn
output <- data.frame(error_rate_ward,ward_k,error_rate_average,average_k,error_rate_centroid,centroid_k,error_rate_complete,complete_k,error_rate_mcquitty,mcquitty_k,error_rate_single,single_k,error_rate_knn,k_knn)
write.csv(output,"hclust_result.csv")
|
fab6b96480b446876eee2093b3cf3c9972c1b2e5
|
af0b8239a7b4577829c38bd993ca5b0da79fe2a7
|
/preprocessing.R
|
fd17a417ee70bcbc5713e53b090471ea9c89e229
|
[] |
no_license
|
TeresaWenhart/ambvoice_analysis
|
d0f7da1cda798867726d59c9352e91cf645614f2
|
c4ae89598138ab54e3c858c1495ddfe178c0fd9a
|
refs/heads/main
| 2022-12-27T19:40:38.441552
| 2020-10-14T11:12:38
| 2020-10-14T11:12:38
| 303,986,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,252
|
r
|
preprocessing.R
|
#preprocessing behavioural data from ambiguous voices fMRI study
#library
library(dplyr)
library(tidyr)
library(stringr)
#directory
setwd("~/projects/ambiguous_voices/FMRI_study/data/behavioral")
path<-paste(getwd())
#read version A files
sub.folders <- list.files(path=getwd(), pattern="av[0-9]{2}[a-z]{1}",include.dirs = TRUE)
for (j in sub.folders) {
path<-paste(getwd(),"/",j,sep="")
filenames = list.files(path=path,pattern="AV_[A-Za-z]{3}_av[0-9]{2}[a-z]{1}.csv")
for (i in filenames) {
name <- gsub(".csv","",i)
name <-gsub("/","_",name)
assign(name,read.csv(paste(path,"/",i,sep=""))) #read in the table and name as "name"
}
}
#recode button presses for baseline condition
#version a
file_list<-ls(pattern="AV_Bas_av[0-9]{2}a")
for (i in 1:length(file_list)) {
name=file_list[i]
thistable<-get(file_list[i])
thistable$key_num<-dplyr::recode(thistable$key, b = 1, z = 2, g = 3, r = 4, .default=99)
thistable$key_num[thistable$key_num==99]<-NA
thistable$X<-NULL
assign(name,thistable)
i=i+1
}
#version b
file_list<-ls(pattern="AV_Bas_av[0-9]{2}b")
for (i in 1:length(file_list)) {
name=file_list[i]
print(i)
thistable<-get(file_list[i])
thistable$key_num<-dplyr::recode(thistable$key, b = 4, z = 3, g = 2, r = 1, .default=99)
thistable$key_num[thistable$key_num==99]<-NA
thistable$X<-NULL
assign(name,thistable)
i=i+1
}
#recode button presses to numbers 0 and 1 (opposite assignment for versions a and b)
#version a
file_list<-ls(pattern="AV_[A-Za-z]{3}_av[0-9]{2}a")
file_list<-file_list[str_detect(file_list, "Bas")==FALSE]
for (i in 1:length(file_list)) {
name=file_list[i]
thistable<-get(file_list[i])
thistable$key_num<-dplyr::recode(thistable$key, z = 1, b = 0, .default=99)
thistable$key_num[thistable$key_num==99]<-NA
thistable$X<-NULL
assign(name,thistable)
i=i+1
}
#version b
file_list<-ls(pattern="AV_[A-Za-z]{3}_av[0-9]{2}b")
file_list<-file_list[str_detect(file_list, "Bas")==FALSE]
for (i in 1:length(file_list)) {
name=file_list[i]
print(i)
thistable<-get(file_list[i])
thistable$key_num<-dplyr::recode(thistable$key, z = 0, b = 1, .default=99)
thistable$key_num[thistable$key_num==99]<-NA
thistable$X<-NULL
assign(name,thistable)
i=i+1
}
|
60633ff891b371e8e20e18272f8bbd4ef448dc8b
|
deed2e00447a8f039bfaa941b27fb3c2b446ddf3
|
/man/conduct_ri.Rd
|
3ae99324839124615e7d40d67018fcfc2636344a
|
[] |
no_license
|
nfultz/ri2
|
4b450d9a71ad60777988494294d98b1a24610c65
|
a7fa973b5f4ecc76ffb7770f20e89b6973e1ed74
|
refs/heads/master
| 2021-04-12T09:50:44.705998
| 2018-03-26T19:50:32
| 2018-03-28T20:17:38
| 126,432,485
| 3
| 0
| null | 2018-03-28T20:17:39
| 2018-03-23T04:19:14
|
R
|
UTF-8
|
R
| false
| true
| 5,465
|
rd
|
conduct_ri.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conduct_ri.R
\name{conduct_ri}
\alias{conduct_ri}
\title{Conduct Randomization Inference}
\usage{
conduct_ri(formula = NULL, model_1 = NULL, model_2 = NULL,
test_function = NULL, assignment = "Z", outcome = NULL,
declaration = NULL, sharp_hypothesis = 0, studentize = FALSE,
IPW = TRUE, IPW_weights = NULL, sampling_weights = NULL,
permutation_matrix = NULL, data, sims = 1000, progress_bar = FALSE)
}
\arguments{
\item{formula}{an object of class formula, as in \code{\link{lm}}. Use formula when conducting significance tests of an Average Treatment Effect estimate under a sharp null hypothesis. For the difference-in-means estimate, do not include covariates. For the OLS covariate-adjusted estimate, include covariates.}
\item{model_1}{an object of class formula, as in \code{\link{lm}}. Models 1 and 2 must be "nested." model_1 should be the "restricted" model and model_2 should be the "unrestricted" model.}
\item{model_2}{an object of class formula, as in \code{\link{lm}}. Models 1 and 2 must be "nested." model_1 should be the "restricted" model and model_2 should be the "unrestricted" model.}
\item{test_function}{A function that takes data and returns a scalar test statistic.}
\item{assignment}{a character string that indicates which variable is randomly assigned. Defaults to "Z".}
\item{outcome}{a character string that indicates which variable is the outcome variable. Defaults to NULL.}
\item{declaration}{A random assignment declaration, created by \code{\link{declare_ra}}.}
\item{sharp_hypothesis}{either a numeric scalar or a numeric vector of length k - 1, where k is the number of treatment conditions. In a two-arm trial, this number is the *hypothesized* difference between the treated and untreated potential potential outcomes for each unit.. In a multi-arm trial, each number in the vector is the hypothesized difference in potential outcomes between the baseline condition and each successive treatment condition.}
\item{studentize}{logical, defaults to FALSE. Should the test statistic be the t-ratio rather than the estimated ATE? T-ratios will be calculated using HC2 robust standard errors or their clustered equivalent. CLUSTERING NOT YET IMPLEMENTED.}
\item{IPW}{logical, defaults to TRUE. Should inverse probability weights be calculated?}
\item{IPW_weights}{a character string that indicates which variable is the existing inverse probability weights vector. Usually unnecessary, as IPW weights will be incorporated automatically if IPW = TRUE. Defaults to NULL.}
\item{sampling_weights}{a character string that indicates which variable is the sampling weights vector. Optional, defaults to NULL. NOT YET IMPLEMENTED}
\item{permutation_matrix}{An optional matrix of random assignments, typically created by \code{\link{obtain_permutation_matrix}}.}
\item{data}{A data.frame.}
\item{sims}{the number of simulations. Defaults to 1000.}
\item{progress_bar}{logical, defaults to FALSE. Should a progress bar be displayed in the console?}
}
\description{
This function makes it easy to conduct three kinds of randomization inference.
}
\details{
1. Conduct hypothesis tests under the sharp null when the test statistic is the difference-in-means or covariate-adjusted average treatment effect estimate.
2. Conduct "ANOVA" style hypothesis tests, where the f-statistic from two nested models is the test statistic. This procedure is especially helpful when testing interaction terms under null of constant effects.
3. Arbitrary (scalar) test statistics
}
\examples{
# Data from Gerber and Green Table 2.2
# Randomization Inference for the Average Treatment Effect
table_2.2 <-
data.frame(d = c(1, 0, 0, 0, 0, 0, 1),
y = c(15, 15, 20, 20, 10, 15, 30))
## Declare randomization procedure
declaration <- declare_ra(N = 7, m = 2)
## Conduct Randomization Inference
out <- conduct_ri(y ~ d,
declaration = declaration,
assignment = "d",
sharp_hypothesis = 0,
data = table_2.2)
summary(out)
plot(out)
# Randomization Inference for an Interaction
N <- 100
declaration <- randomizr::declare_ra(N = N, m = 50)
Z <- randomizr::conduct_ra(declaration)
X <- rnorm(N)
Y <- .9 * X + .2 * Z + 1 * X * Z + rnorm(N)
dat <- data.frame(Y, X, Z)
ate_obs <- coef(lm(Y ~ Z, data = dat))[2]
out <-
conduct_ri(
model_1 = Y ~ Z + X,
model_2 = Y ~ Z + X + Z * X,
declaration = declaration,
assignment = "Z",
sharp_hypothesis = ate_obs,
data = dat, sims = 100
)
plot(out)
summary(out)
summary(out, p = "two-tailed")
summary(out, p = "upper")
summary(out, p = "lower")
# Randomization Inference for arbitrary test statistics
## In this example we're conducting a randomization check (in this case, a balance test).
N <- 100
declaration <- randomizr::declare_ra(N = N, m = 50)
Z <- randomizr::conduct_ra(declaration)
X <- rnorm(N)
Y <- .9 * X + .2 * Z + rnorm(N)
dat <- data.frame(Y, X, Z)
balance_fun <- function(data) {
f_stat <- summary(lm(Z ~ X, data = data))$f[1]
names(f_stat) <- NULL
return(f_stat)
}
## confirm function works as expected
balance_fun(dat)
## conduct randomization inference
out <-
conduct_ri(
test_function = balance_fun,
declaration = declaration,
assignment = "Z",
sharp_hypothesis = 0,
data = dat, sims = 100
)
plot(out)
summary(out)
}
|
79d00f3c3bd9b5ec4a7e3a28e08ad069ba8c2e3e
|
41e48aee12f8aea01b8d9c15a43f3ec452077e16
|
/man/get_post_pred.Rd
|
affee72841fbf92fa08889d00313f6c0e06e5b24
|
[
"MIT"
] |
permissive
|
anthopolos/EHRMiss
|
f7347a3bc403273bca45f5e67b3f88250491f6b9
|
72bf3593de43643631423f78ea54fd52ba5c1ddf
|
refs/heads/main
| 2022-12-29T06:56:28.518818
| 2020-10-06T10:52:35
| 2020-10-06T11:11:24
| 301,695,997
| 0
| 1
|
MIT
| 2023-09-09T22:50:10
| 2020-10-06T10:54:11
|
R
|
UTF-8
|
R
| false
| true
| 718
|
rd
|
get_post_pred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_post_pred.R
\name{get_post_pred}
\alias{get_post_pred}
\title{Get posterior predictive draws of the completed data at each iteration.}
\usage{
get_post_pred(J, priorPik, bSub, betaObs, Sigma, XRe, XObs, subjectIDY)
}
\value{
Draws from the posterior predictive distribution at the current iteration.
}
\description{
The reference for the approach is from algorithm 3.7 on page 91 of Fruhwirth-Schnatter, S. (2006) Finite Mixture and Markov Switching Models. Springer Science & Business Media, New York. The latent classes are redrawn using the probabilities of latent class membership obtained from the latent class membership model.
}
|
48488eae61d8862c919fe8a43134a8029c30b489
|
b1c2a03915fb21e4bd3f3bf78c4035b07a9abe1d
|
/Transforming data.R
|
765881eccc2d9df5edda88997f8ac6efa09e3f91
|
[
"MIT"
] |
permissive
|
musawenkosikhulu/Manually-create-a-data-frame
|
ff2630b9465b8a30292d86b2f1410962b5e44ce4
|
b5800b98018ae3b661256aed4cc91b2ffbc14a16
|
refs/heads/main
| 2023-06-07T12:19:42.070549
| 2021-06-25T07:23:58
| 2021-06-25T07:23:58
| 380,158,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,195
|
r
|
Transforming data.R
|
## Below we are creating a data frame from stratch
id <- c(1:10)
name <- c("John Mendes", "Rob Stewart", "Rachel Abrahamson", "Christy Hickman", "Johnson Harper", "Candace Miller", "Carlson Landy", "Pansy Jordan", "Darius Berry", "Claudia Garcia")
job_title <- c("Professional", "Programmer", "Management", "Clerical", "Developer", "Programmer", "Management", "Clerical", "Developer", "Programmer")
employee <- data.frame(id, name, job_title)
## printing the data frame to see how does it look
print(employee)
## We can see that the last name and first are combined so we are going to use separate function
## if we the separate function we have first put in the dataframe name "employess", the column we want to separate
## the new names with "into = c("","")" and sep with space as a separator "sep = '' "
separate(employee, name, into = c("first_name","last_name"), sep = " ")
## to unite the separated column we can use the function "unite"
## this function take argument first the data frame, the column name you want to combine the in qoutation marks
## then the column you want to combine separated by a space "sep = ' ' "
unite(employee, 'name',last_name, first_name, sep = ' ')
|
56a66a61bcd4d04f75a842985195e2e32b5c7701
|
d169d886be8108c4c034c8eff53d87f943594b9e
|
/man/betaFD.Rd
|
eefcdc6b24c146fc1a3f8393628a59ad8cda36fa
|
[] |
no_license
|
ibartomeus/fundiv
|
3bc70e767a100c3e7a8e4f388c78840e905a846c
|
23335e30c771e0fc0b4d27e27a8be01ed2423146
|
refs/heads/master
| 2021-01-21T12:52:59.988727
| 2018-10-28T20:45:44
| 2018-10-28T20:45:44
| 13,769,749
| 13
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
rd
|
betaFD.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/betaFD.R
\name{betaFD}
\alias{betaFD}
\title{Functional betadiversity}
\usage{
betaFD(c1, c2, S, Tree = NULL)
}
\arguments{
\item{c1}{vector containing the species names in the first community. NA not tolerated. In addition,
the species labels in c1 and S must be identical. \code{}}
\item{c2}{vector containing the species names in the first community. NA not tolerated. In addition,
the species labels in c2 and S must be identical.\code{}}
\item{S}{matrix or data frame of functional traits. Traits can be numeric, ordered,
or factor. NAs are tolerated. All species in c1 and c2 should be present with no extra species.
Species should be ordered alphabetically.\code{}}
}
\value{
Btot Total Betadiversity
B_3 Beta diversity due to replacement
Brich Beta diversity due to richness diferences
quality the ouptput will print the quality of the dendogram representation.
clustering performance is assessed by the correlation with the cophenetic distance
}
\description{
Calculate functional trait beta diversity for two communities using Petchey and Gaston 2002 index
and Carvalho et al. 2012 decomposition.
}
\examples{
ex1 <- betaFD(c1 = c("sp3", "sp2", "sp1", "sp4", "sp5"), c2 = c("sp6", "sp7", "sp8", "sp4", "sp5"), S = dummy$trait)
ex1
}
|
877b599908a94601fd377c23ddede5951cbc5428
|
69fbcf0e3a7c3293e077b654c0438fcaf5c34836
|
/man/gs_data_dir_local.Rd
|
b88328f801e7bf4c2dbbd149e0c2d810c00cedd1
|
[] |
no_license
|
dfalbel/cloudml
|
539c38d4149ac36e87a0071494d24971ef9e11b2
|
70b9449a87dc1ed62885e9eea487a1d8e941f16f
|
refs/heads/master
| 2022-01-11T23:30:01.319252
| 2019-05-13T15:14:59
| 2019-05-13T15:14:59
| 118,812,235
| 1
| 0
| null | 2018-01-24T19:33:09
| 2018-01-24T19:33:08
| null |
UTF-8
|
R
| false
| true
| 1,331
|
rd
|
gs_data_dir_local.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gcloud-storage.R
\name{gs_data_dir_local}
\alias{gs_data_dir_local}
\title{Get a local path to the contents of Google Storage bucket}
\usage{
gs_data_dir_local(url, local_dir = "gs", echo = FALSE)
}
\arguments{
\item{url}{Google Storage bucket URL (e.g. \code{gs://<your-bucket>}).}
\item{local_dir}{Local directory to synchonize Google Storage bucket(s) to.}
\item{echo}{Echo command output to console.}
}
\value{
Local path to contents of bucket.
}
\description{
Provides a local filesystem interface to Google Storage buckets. Many
package functions accept only local filesystem paths as input (rather than
gs:// URLs). For these cases the \code{gcloud_path()} function will synchronize
gs:// buckets to the local filesystem and provide a local path interface
to their contents.
}
\details{
If you pass a local path as the \code{url} it will be returned
unmodified. This allows you to for example use a training flag for the
location of data which points to a local directory during
development and a Google Cloud bucket during cloud training.
}
\note{
For APIs that accept gs:// URLs directly (e.g. TensorFlow datasets)
you should use the \code{\link[=gs_data_dir]{gs_data_dir()}} function.
}
\seealso{
\code{\link[=gs_data_dir]{gs_data_dir()}}
}
|
e223b2ec40bba225427310d944965377d6363fa0
|
2855eef035919e3f2c7d28e1d6d330c49ca5547a
|
/tests/testthat/test_MODIStsp_01.R
|
67a3e243c3f1772b33c9c4cfdbc890ad1695417c
|
[] |
no_license
|
cran/MODIStsp
|
57c72091c8b91d71d08de8186c3ddd0ae55b0cd4
|
2b12d6ee9c9b61f335d49c45bfaf515f2f7ff25d
|
refs/heads/master
| 2023-06-22T23:23:42.374311
| 2023-06-12T08:20:05
| 2023-06-12T08:20:05
| 88,372,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,199
|
r
|
test_MODIStsp_01.R
|
message("MODIStsp Test 1: Basic processing on bands and quality
indicators")
test_that(
"Tests on MODIStsp", {
# skip("Skip tests - since they rely on download they are only run locally")
skip_on_cran()
# skip_on_travis()
skip_if(!"HDF4" %in% sf::st_drivers("raster")$name)
### Test 1: test of the basic operations of MODIStsp. ####
# The test process two bands and extracts one quality indicator from a
# single local hdf file for MOD11A2 product without any
# additional preprocessing operations. Output files are in GeoTiff format.
MODIStsp(test = 1)
out_files <- list.files(
file.path(tempdir(), "MODIStsp/Surf_Temp_8Days_GridSin_v6"),
pattern = "\\.tif$", recursive = TRUE, full.names = TRUE)
file_sizes <- file.info(out_files)$size
# check that size of files file resulting from test run are equal to those
# obtained with a "working" MODIStsp version
# momentarly disable - TODO reenable when TRAVIS gets to GDAL 2.3
# expect_equal(file_sizes, c(80662, 80662, 40908, 40908))
# check that median value of files file resulting from test run are
# equal to those obtained with a "working" MODIStsp version
means <- unlist(
lapply(out_files,
FUN = function(x) {
mean(raster::getValues(suppressWarnings(raster::raster(x))), na.rm = T)
})
)
expect_equal(means, c(13341.450786, 13266.374624, 2.843336, 2.824311),
tolerance = 0.001, scale = 1)
# NodataValue not changed
r <- sf::gdal_utils("info",out_files[1], quiet = TRUE)
r <- unlist(strsplit(r, "\n"))
r <- r[grep("NoData", r)]
r <- as.numeric(strsplit(r, "NoData Value=")[[1]][2])
expect_equal(r, 0)
unlink(out_files)
### Test 1: Nodata values are properly changed on full tiles ####
message("Nodata values are properly changed on full tiles ")
MODIStsp(test = "01a")
r <- sf::gdal_utils("info", out_files[1], quiet = TRUE)
expect_equal(substring(strsplit(r, "NoData Value=")[[1]][2], 1, 5),
"65535")
})
|
d9f73393168bcfbf81df726aa650f58e9cbdc28b
|
9a7714ceea4c5cf1fa7e19023d3fcd8d02f9b6e1
|
/R/yeast.r
|
b914b3db6b73b851db04888d532748a9c74982fe
|
[
"MIT"
] |
permissive
|
Bohdan-Khomtchouk/gtf
|
d9d7b84f670701bf5a05afabb2c75b7628dd88e1
|
b69254e12bafc3599d42fdeaf93b8f546e38908f
|
refs/heads/master
| 2021-01-09T20:52:49.677153
| 2017-07-17T02:13:33
| 2017-07-17T02:13:33
| 58,662,564
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
yeast.r
|
#' Saccharomyces cerevisiae (yeast) gene transfer format file data
#'
#' A dataset containing annotations of both coding and non-coding genes
#' of Saccharomyces_cerevisiae.R64-1-1.84.gtf
#'
#' @docType data
#'
#' @usage data(yeast)
#'
#' @format A data frame with 42071 rows and 9 variables:
#' \describe{
#' \url{http://www.gencodegenes.org/gencodeformat.html}
#' }
#'
#' @keywords datasets
"yeast"
|
0a6480b1adff7970cf3b38aaf3fbc55dbc2d523d
|
186a582414c0ba0119abd77f34fae8708e233d27
|
/05_rshiny_footballkicks/server.R
|
a2ae43947cfe832911fbb2d76daa68820122f604
|
[] |
no_license
|
chencong853/programming_for_analytics_coursework
|
5145b79a700b608e8e87d2e47b0b7bc0c97c5e45
|
2a6e584683790bb90ef32c0f97c362969863090c
|
refs/heads/master
| 2020-04-23T14:42:22.692502
| 2019-12-05T00:06:48
| 2019-12-05T00:06:48
| 171,240,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,002
|
r
|
server.R
|
# Assignment 04
# G43265951
# Cong Chen
library(shiny)
# Define server logic required to show the data table and draw a histogram
shinyServer(function(input, output) {
output$table <- renderTable({
infile <- input$file
# Make sure that no error happens when infile is empty
if (is.null(infile))
return(NULL)
# Import data file
df <- read.csv(infile$datapath, header = TRUE, sep = ",")
# Filter data based on selection
df_filter <- subset(df, df$practiceormatch == input$PorM & df$goal == input$Goal)
df_filter
})
library(ggplot2)
output$plot <- renderPlot({
# Get filtered data
infile <- input$file
if (is.null(infile))
return(NULL)
df <- read.csv(infile$datapath, header = TRUE, sep = ",")
df_filter <- subset(df, df$practiceormatch == input$PorM & df$goal == input$Goal)
# Draw the histogram
ggplot(df_filter, aes(x = yards)) + geom_histogram(binwidth = 1, color = "white" )
})
})
|
21cc94d352a1913760dedbb193d21a36407b28ee
|
e4b60669e0d0b8e1e945dd53dce706e8657ed23c
|
/Miyao-2002_replication/Policy_Effect.R
|
f3734e11a0d76424742cf6c3e3775be9f35cee44
|
[] |
no_license
|
yoshiki146/Applied_TimeSeries_Analysis
|
ee0b69243f1832b699f2e78cd5e73783700004b4
|
0399cd3d0376c068463989c267de4c0a106cc9ac
|
refs/heads/master
| 2021-04-30T14:26:35.502507
| 2018-09-14T07:42:32
| 2018-09-14T07:42:32
| 121,217,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,517
|
r
|
Policy_Effect.R
|
# In this project I try to reproduce Miyao(2002, Journal of Money) and update it using recent data
rm(list = ls())
#### Import & preprocessing ####
library(tidyverse)
library(magrittr)
library(zoo)
call=read.csv('callrate.csv', fileEncoding = 'cp932',
skip=1, na.strings=c('NA ',''),
stringsAsFactors = F) %>%
set_colnames(c('date','collaterised','uncolRate' ))
call$date=as.yearmon(call$date, '%Y/%m')
money=read.csv('money.csv', fileEncoding = 'cp932',
skip=1, stringsAsFactors = F) %>%
set_colnames(c('date','m2cd','m2cd.2','cd','m2'))
money$date=as.yearmon(money$date, '%Y/%m')
stock= read.csv('N225.csv', stringsAsFactors = F) %>%
extract(c(1,6)) %>%
set_colnames(c('date','stock'))
stock$date=as.yearmon(stock$date,'%Y-%m-%d')
IP=as.data.frame(t(read.csv('ip.csv', fileEncoding = 'cp932',
header=F,skip=2, row.names = 2)[1,-c(1,2)]))
colnames(IP)= IP[2,]
IP=as.data.frame(t(read.csv('ip.csv', skip=2, fileEncoding = 'cp932',
header=F,row.names = 2)[1:2,-c(1,2)])) %>%
set_colnames(c('date','IIP')) %>%
set_rownames(NULL)
IP$date= IP$date %>%
gsub('\\.0', '' ,. ) %>%
as.yearmon('%Y%m')
IP$IIP=as.numeric(as.character(IP$IIP))
IP2=read.csv('ip_past.csv', fileEncoding = 'cp932', skip=1)[-c(1:2),1:2] %>%
set_colnames(c('date', 'IIP'))
IP2$date=as.yearmon(IP2$date, '%Y%m')
IIP=IP2 %>%
full_join(IP)
dat=reduce(list(call, money, stock, IIP), full_join)
rm(list = ls()[!ls() %in% c('dat')])
qplot(date,stock, data=dat, geom='line')
## untill 1998:4
dat_miyao= dat[37:280,c(1:4,8:9)] %>%
set_rownames(NULL)
ave_col= mean(dat_miyao$collaterised[!is.na(dat_miyao$uncolRate)])
ave_uncol= mean(dat_miyao$uncolRate, na.rm=T)
diff_coluncol=ave_uncol-ave_col
for (i in 1:nrow(dat_miyao)){
if (is.na(dat_miyao$uncolRate[i])){
dat_miyao$call[i]=dat_miyao$collaterised[i]+diff_coluncol
} else {
dat_miyao$call[i]=dat_miyao$uncolRate[i]
}
}
qplot(date,stock,data=dat_miyao,geom='line')
dat_miyao_log=dat_miyao %>%
mutate(money= log(m2cd)*100,
stock= log(stock)*100) %>%
dplyr::select(date,call,money, stock, IIP)
qplot(date,stock,data=dat_miyao_log,geom='line')
## full information
dat_update=dat[37:515,] %>%
set_rownames(NULL)
ave_col_u=mean(dat_update$collaterised[!is.na(dat_update$uncolRate)], na.rm=T)
ave_uncol_u= mean(dat_update$uncolRate[!is.na(dat_update$collaterised)], na.rm=T)
diff_coluncol_u= ave_uncol_u - ave_col_u
for (i in 1:nrow(dat_update)){
if (is.na(dat_update$uncolRate[i])) {
dat_update$call[i]=dat_update$collaterised[i]+diff_coluncol_u
} else{
dat_update$call[i]=dat_update$uncolRate[i]
}
}
ave_m2cd_11=mean(dat_update$m2cd[!is.na(dat_update$m2cd.2)], na.rm = T)
ave_m2cd_12=mean(dat_update$m2cd.2[!is.na(dat_update$m2cd)], na.rm=T)
diff_m2cd_1=ave_m2cd_12 - ave_m2cd_11
ave_m2cd_21= mean(dat_update$m2cd.2[!is.na(dat_update$m2)], na.rm = T)
ave_m2cd_22= mean((dat_update$m2[!is.na(dat_update$m2cd.2)]+dat_update$cd[!is.na(dat_update$m2cd.2)]), na.rm = T)
diff_m2cd_2=ave_m2cd_22 - ave_m2cd_21
for (i in 1:nrow(dat_update)) {
if (is.na(dat_update$m2cd.2[i]) & is.na(dat_update$m2[i])) {
dat_update$m2cd.2[i]=dat_update$m2cd[i]+diff_m2cd_1
}
}
for (i in 1:nrow(dat_update)) {
if (is.na(dat_update$m2[i])) {
dat_update$money[i]=dat_update$m2cd.2[i]+diff_m2cd_2
} else {
dat_update$money[i]=dat_update$cd[i]+ dat_update$m2[i]
}
}
qplot(date, stock, data=dat_update, geom='line')
dat_update_log= dat_update %>%
mutate(money=log(money*100),
stock=log(stock)*100) %>%
dplyr::select(date, call, money, stock, IIP)
qplot(date, money, data=dat_update_log, geom='line')
rm(list = ls()[!ls() %in% c('dat', 'dat_miyao', 'dat_miyao_log', 'dat_update', 'dat_update_log')])
#### Unit Root and cointegration ####
library(tseries)
dat_test=dat_miyao_log
# dat_test=dat_update_log
# adf.test in levels
for (i in 2:5){
print(adf.test(dat_test[,i]))
}
adf.test(dat_update_log$money, k=12)
# create diff's
dat_miyao_return= dat_miyao_log %>%
mutate(d_call=c(NA,diff(call)),
l_money=c(NA,diff(money)),
l_stock=c(NA,diff(stock)),
d_IIP=c(NA,diff(IIP))) %>%
dplyr::select(date, d_call, l_money, l_stock, d_IIP) %>%
filter(!is.na(d_call))
qplot(date, l_stock, data=dat_miyao_return, geom='line')
write.csv(dat_miyao_return, 'dat_miyao.csv', row.names = F)
dat_update_return= dat_update_log %>%
mutate(d_call=c(NA,diff(call)),
l_money=c(NA,diff(money)),
l_stock=c(NA,diff(stock)),
d_IIP=c(NA,diff(IIP))) %>%
dplyr::select(date, d_call, l_money, l_stock, d_IIP) %>%
filter(!is.na(d_call))
qplot(date, l_stock, data=dat_update_return, geom='line')
write.csv(dat_update_return, 'dat_update.csv', row.names = F)
# adf.test in diff
dat_test=dat_miyao_return # dat_test= dat_update_return
adf.test(dat_test$call, k=1)
for (i in 2:5){
print(adf.test(dat_test[-1,i], k=12))
}
for (i in 1:12){
print(adf.test(dat_test$l_money, k=i))
}
bic=urca::ur.df(dat_test$l_stock,selectlags = 'BIC')
bic@lags
# Johansen test
library(urca)
summary(ca.jo(dat_test[2:5],type='eigen',K=6 ))
summary(ca.jo(dat_test[2:5],type='eigen',K=10))
summary(ca.jo(dat_test[2:5],type='eigen',K=12))
rm(list = ls()[!ls() %in% c('dat', 'dat_miyao_return', 'dat_update_return')])
### SVAR
library(vars)
var=VAR(dat_miyao_return[2:5], p=12)
amat=matrix(c(1,NA,NA,NA,0,1,NA,NA,0,0,1,NA,0,0,0,1),nrow=4);amat
bmat=matrix(c(NA,0,0,0,0,NA,0,0,0,0,NA,0,0,0,0,NA), nrow=4);bmat
svar=SVAR(var, Amat=amat, Bmat = bmat)
irf=irf(svar, n.ahead=36)
plot(irf)
### VAR and AR forecast
data=dat_update_return
rm(list = ls()[!ls() %in% c('data')])
# Recursive forecast starting from 1998:5 (based on data from 1978:2 till 1998:4)
# VAR forecast
ind=as.integer(rownames(data[data$date==' 4 1998',]))
call_mse_var=c()
money_mse_var=c()
stock_mse_var=c()
iip_mse_var=c()
for (i in ind:(nrow(data)-1)){
var=VAR(data[1:i,2:5], ic='AIC', lag.max = 12)
pred=predict(var, n.ahead = 1)$fcst
err_call=(pred$d_call[1]-data[i+1,2])^2; call_mse_var=append(call_mse_var, err_call)
err_money=(pred$l_money[1]-data[i+1,3])^2; money_mse_var=append(money_mse_var, err_money)
err_stock=(pred$l_stock[1]-data[i+1,4])^2; stock_mse_var=append(stock_mse_var, err_stock)
err_iip=(pred$d_IIP[1]-data[i+1,5])^2; iip_mse_var=append(iip_mse_var, err_iip)
}
rm(list = ls()[!ls() %in% c('data','call_mse_var','stock_mse_var','money_mse_var','iip_mse_var')])
# AR forecast
attach(data)
library(forecast)
ind=as.integer(rownames(data[data$date==' 4 1998',]))
call_mse_ar=c()
money_mse_ar=c()
stock_mse_ar=c()
iip_mse_ar=c()
for (i in ind:(nrow(data)-1)){
ar_call=ar(d_call[1:i], order.max=12) # AIC in default
fcst_c=forecast(ar_call,1)
call_mse_ar=append(call_mse_ar, (fcst_c$mean[1]-d_call[i+1])^2)
ar_money=ar(l_money[1:i], order.max=12)
fcst_m= forecast(ar_money,1)
money_mse_ar=append(money_mse_ar, (fcst_m$mean[1]-l_money[i+1])^2)
ar_stock=ar(l_stock[1:i], order.max=12)
fcst_s=forecast(ar_stock,1)
stock_mse_ar=append(stock_mse_ar, (fcst_s$mean[1]-l_stock[i+1])^2)
ar_iip=ar(d_IIP[1:i], order.max=12)
fcst_i=forecast(ar_iip,i)
iip_mse_ar=append(iip_mse_ar, (fcst_i$mean[1]-d_IIP[i+1])^2)
}
rm(list = ls()[!ls() %in% c('data','call_mse_var','stock_mse_var','money_mse_var','iip_mse_var',
'call_mse_ar','stock_mse_ar','money_mse_ar','iip_mse_ar', 'ind')])
sum(iip_mse_ar)
qplot(money_mse_ar, money_mse_var, geom='point')
|
46165f360a5fbda77c0552ac1e07eb71599d44b2
|
7b82068433efacf8840c57e2c05b613dbe13d31c
|
/man/FactorizationMachinesPredictor.Rd
|
b3c07df9367a542124739b3bb6a246bbc37fb3fe
|
[
"Apache-2.0"
] |
permissive
|
OwenGarrity/sagemaker-r-sdk
|
d25f0d264dcddcb6e0fa248af22d47fc22c159ce
|
3598b789af41ed21bb0bf65bd1b4dfe1469673c9
|
refs/heads/master
| 2022-12-09T04:50:07.412057
| 2020-09-19T13:02:38
| 2020-09-19T13:02:38
| 285,834,692
| 0
| 0
|
NOASSERTION
| 2020-09-19T13:02:39
| 2020-08-07T13:23:16
|
R
|
UTF-8
|
R
| false
| true
| 4,622
|
rd
|
FactorizationMachinesPredictor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/amazon_factorization_machines.R
\name{FactorizationMachinesPredictor}
\alias{FactorizationMachinesPredictor}
\title{Performs binary-classification or regression prediction from input
vectors.}
\description{
The implementation of
:meth:`~sagemaker.predictor.Predictor.predict` in this
`Predictor` requires a numpy ``ndarray`` as input. The array should
contain the same number of columns as the feature-dimension of the data used
to fit the model this Predictor performs inference on.
:meth:`predict()` returns a list of
:class:`~sagemaker.amazon.record_pb2.Record` objects, one for each row in
the input ``ndarray``. The prediction is stored in the ``"score"`` key of
the ``Record.label`` field. Please refer to the formats details described:
https://docs.aws.amazon.com/sagemaker/latest/dg/fm-in-formats.html
}
\section{Super class}{
\code{\link[R6sagemaker:Predictor]{R6sagemaker::Predictor}} -> \code{FactorizationMachinesPredictor}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{FactorizationMachinesPredictor$new()}}
\item \href{#method-clone}{\code{FactorizationMachinesPredictor$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="delete_endpoint">}\href{../../R6sagemaker/html/Predictor.html#method-delete_endpoint}{\code{R6sagemaker::Predictor$delete_endpoint()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="delete_model">}\href{../../R6sagemaker/html/Predictor.html#method-delete_model}{\code{R6sagemaker::Predictor$delete_model()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="disable_data_capture">}\href{../../R6sagemaker/html/Predictor.html#method-disable_data_capture}{\code{R6sagemaker::Predictor$disable_data_capture()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="enable_data_capture">}\href{../../R6sagemaker/html/Predictor.html#method-enable_data_capture}{\code{R6sagemaker::Predictor$enable_data_capture()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="list_monitor">}\href{../../R6sagemaker/html/Predictor.html#method-list_monitor}{\code{R6sagemaker::Predictor$list_monitor()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="predict">}\href{../../R6sagemaker/html/Predictor.html#method-predict}{\code{R6sagemaker::Predictor$predict()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="print">}\href{../../R6sagemaker/html/Predictor.html#method-print}{\code{R6sagemaker::Predictor$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="R6sagemaker" data-topic="Predictor" data-id="update_data_capture_config">}\href{../../R6sagemaker/html/Predictor.html#method-update_data_capture_config}{\code{R6sagemaker::Predictor$update_data_capture_config()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Initialize FactorizationMachinesPredictor class
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FactorizationMachinesPredictor$new(endpoint_name, sagemaker_session = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{endpoint_name}}{(str): Name of the Amazon SageMaker endpoint to which
requests are sent.}
\item{\code{sagemaker_session}}{(sagemaker.session.Session): A SageMaker Session
object, used for SageMaker interactions (default: NULL). If not
specified, one is created using the default AWS configuration
chain.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FactorizationMachinesPredictor$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
56f790e401087a9eabac8ae92c065beeeed22abc
|
ef0c2298da42c0c5dd684544cf9eb4ad392d57a5
|
/workflow/scripts/arbigent_utils/mosaiclassifier_scripts/mosaiClassifier/haploAndGenoName.R
|
b37e20bbbda24b29fac41453473a0f21c9a798b2
|
[
"MIT"
] |
permissive
|
friendsofstrandseq/mosaicatcher-pipeline
|
2b4a78cea345c67fbce4b4b5b4d52fe860fe1bab
|
66cceada275347f2411dd1df039c91aad891f3f2
|
refs/heads/master
| 2023-08-31T00:22:05.838662
| 2023-08-22T14:17:41
| 2023-08-22T14:17:41
| 206,941,559
| 18
| 8
|
MIT
| 2023-09-13T21:26:15
| 2019-09-07T08:50:59
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,600
|
r
|
haploAndGenoName.R
|
#' returns the hapotyoe name
#'
#' @param hap.code The haplotype coding
#' @author Maryam Ghareghani
#' @export
#'
get_hap_name <- function(hap.code)
{
hap.codes <- c("1010", "0010", "1000", "0000", "0110", "1001", "0101", "2010", "1020", "2020", "1110", "1011")
hap.names <- c("ref_hom", "del_h1", "del_h2", "del_hom", # ref and del
"inv_h1", "inv_h2", "inv_hom", # inv
"dup_h1", "dup_h2", "dup_hom", # dup
"idup_h1", "idup_h2") # inv-dup
hap.idx <- match(hap.code, hap.codes)
if (!is.na(hap.idx))
{
return(hap.names[hap.idx])
}
return("complex")
}
#' returns the genotype code given the genotyoe name
#'
#' @param geno.name genotype name
#' @author Maryam Ghareghani
#' @export
#'
geno_name_to_code <- function(geno.name)
{
geno.codes <- c("1010", "1010",
"0010", "0000", # del
"0110", "0101", # inv
"2010", "2020", # dup
"1110") # inv-dup
geno.names <- c("hom_ref", "false_del",
"het_del", "hom_del", # del
"het_inv", "hom_inv", # inv
"het_dup", "hom_dup", # dup
"inv_dup") # inv-dup
geno.idx <- match(geno.name, geno.names)
if (!is.na(geno.idx))
{
return(geno.codes[geno.idx])
}
return("complex")
}
#' converts the haplotype to the genotype name
#'
#' @param hap.name The haplotype name
#' @author Maryam Ghareghani
#' @export
#'
haplo_to_geno_name <- function(hap.name)
{
geno.name <- gsub("h1","het",hap.name)
geno.name <- gsub("h2","het",geno.name)
return(geno.name)
}
#' translates the haplotype code to the corresponding classe of genotypes (normal, inv, CN loss, CN gain)
#'
#' @param hap.code The haplotype coding
#' @author Maryam Ghareghani
#' @export
#'
haplo_code_to_geno_class <- function(hap.code)
{
if (length(hap.code) < 1) return (character())
dd = as.data.table(str_split(hap.code,"", simplify = T, n = 4))
dd = dd[, lapply(.SD, as.integer)]
dd[, state := ifelse(V1+V2+V3+V4 != 2,
ifelse(V1+V2+V3+V4<2, "loss", "gain"),
ifelse(V2+V4>0, "inv", "ref") )]
return(dd$state)
}
#' get CN from haplotype
#'
#' @param hap.code The haplotype coding
#' @author Maryam Ghareghani
#' @export
#'
haplo_code_to_geno_class <- function(hap.code)
{
if (length(hap.code) < 1) return (character())
dd = as.data.table(str_split(hap.code,"", simplify = T, n = 4))
dd = dd[, lapply(.SD, as.integer)]
dd[, CN:=V1+V2+V3+V4, by=1:nrow(dd)]
return(dd$CN)
}
|
f622b0ffe9c55ff3d1b686ead740274ae75718c4
|
a8227fe6164abf0bd786044f4206dd3e2b978af3
|
/Retail Customer Association Analysis/Source Code/apriori.r
|
beb3c00bed8cc562d770173b6bf1fe651dcfa6d8
|
[] |
no_license
|
bhargavchintam/Data-Science-with-R
|
6ba41bb228366b09ef5406aec954658d62565111
|
3621c2957f1702c61c4d5defd957e82787a3c9c4
|
refs/heads/main
| 2023-03-07T14:26:42.039489
| 2021-02-25T05:06:42
| 2021-02-25T05:06:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
apriori.r
|
library(arules)
data=read.transactions("D:/Projects/Completed/1.5.MBA/groceries.csv",sep=",")
summary(data)
inspect(data[1:2])
library(arules)
rules=apriori(data,
parameter = list(support=0.001,
confidence=0.8,
minlen=2))
rules
inspect(head(sort(rules,by="lift")[1:3]))
|
47eaa8c0fda2aded5cf989b79ba050849ecb6172
|
5919f7ceba992775d6c6df40521a453f978e0c1c
|
/R/acf_tidiers.R
|
21dcb147ca7dee6344f13dcea7a1f31d562e6d44
|
[] |
no_license
|
jayhesselberth/broom
|
2f9e61f356271c99e2acc2354ec862fc30d0cb2d
|
5f43b99ef048a8ddd88054123767d0879c853709
|
refs/heads/master
| 2021-01-13T16:18:01.089515
| 2016-01-05T16:20:49
| 2016-01-05T16:20:49
| 47,700,987
| 0
| 0
| null | 2015-12-09T15:44:48
| 2015-12-09T15:44:47
| null |
UTF-8
|
R
| false
| false
| 1,387
|
r
|
acf_tidiers.R
|
#' Tidying method for the acf function
#'
#' Tidy the output of \code{acf} and related \code{pcf} and \code{ccf} functions.
#'
#' @name acf_tidiers
#'
#' @param x acf object
#' @param ... (not used)
#'
#' @return \code{data.frame} with columns
#' \item{lag}{lag values}
#' \item{acf}{calucated correlation}
#'
#' @examples
#'
#' # acf
#' result <- acf(lh, plot=FALSE)
#' tidy(result)
#'
#' # ccf
#' result <- ccf(mdeaths, fdeaths, plot=FALSE)
#' tidy(result)
#'
#' # pcf
#' result <- pacf(lh, plot=FALSE)
#' tidy(result)
#'
#' # lag plot
#' library(ggplot2)
#' result <- tidy(acf(lh, plot=FALSE))
#' p <- ggplot(result, aes(x=lag, y=acf)) +
#' geom_bar(stat='identity', width=0.1) +
#' theme_bw()
#' p
#'
#' # with confidence intervals
#' conf.level <- 0.95
#' # from \code{plot.acf} method
#' len.data <- length(lh) # same as acf$n.used
#' conf.int <- qnorm((1 + conf.level) / 2) / sqrt(len.data)
#' p + geom_hline(yintercept = c(-conf.int, conf.int),
#' color='blue', linetype='dashed')
NULL
#' @rdname acf_tidiers
#'
#' @export
tidy.acf <- function(x, ...) {
process_cf(x)
}
#' @rdname acf_tidiers
#'
#' @export
tidy.pcf <- tidy.acf
#' @rdname acf_tidiers
#'
#' @export
tidy.ccf <- tidy.acf
process_cf <- function(x) {
ret <- with(x, data.frame(lag = x$lag,
acf = x$acf))
return(ret)
}
|
52768979644f97ce93fb9cb3a6d595b56ab2975d
|
ea025083ec6cd77206b17e24a83a8374d19047d9
|
/man/PlumberStatic.Rd
|
1a6311ea834d927cd8298f5ecec9590b06a5dce4
|
[] |
no_license
|
cran/plumber
|
0d5cad10b2901671fcb48b90c2872317d875fd15
|
f610ca3332c4902e5402544d7702be58b7bb671e
|
refs/heads/master
| 2022-09-14T21:44:41.054635
| 2022-09-06T18:10:02
| 2022-09-06T18:10:02
| 56,244,613
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 8,936
|
rd
|
PlumberStatic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plumber-static.R
\name{PlumberStatic}
\alias{PlumberStatic}
\title{Static file router}
\description{
Static file router
Static file router
}
\details{
Creates a router that is backed by a directory of files on disk.
}
\section{Super classes}{
\code{plumber::Hookable} -> \code{plumber::Plumber} -> \code{PlumberStatic}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-PlumberStatic-new}{\code{PlumberStatic$new()}}
\item \href{#method-PlumberStatic-print}{\code{PlumberStatic$print()}}
\item \href{#method-PlumberStatic-clone}{\code{PlumberStatic$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Hookable" data-id="registerHooks"><a href='../../plumber/html/Hookable.html#method-Hookable-registerHooks'><code>plumber::Hookable$registerHooks()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="addAssets"><a href='../../plumber/html/Plumber.html#method-Plumber-addAssets'><code>plumber::Plumber$addAssets()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="addEndpoint"><a href='../../plumber/html/Plumber.html#method-Plumber-addEndpoint'><code>plumber::Plumber$addEndpoint()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="addFilter"><a href='../../plumber/html/Plumber.html#method-Plumber-addFilter'><code>plumber::Plumber$addFilter()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="addGlobalProcessor"><a href='../../plumber/html/Plumber.html#method-Plumber-addGlobalProcessor'><code>plumber::Plumber$addGlobalProcessor()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="call"><a href='../../plumber/html/Plumber.html#method-Plumber-call'><code>plumber::Plumber$call()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="filter"><a href='../../plumber/html/Plumber.html#method-Plumber-filter'><code>plumber::Plumber$filter()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="getApiSpec"><a href='../../plumber/html/Plumber.html#method-Plumber-getApiSpec'><code>plumber::Plumber$getApiSpec()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="getDebug"><a href='../../plumber/html/Plumber.html#method-Plumber-getDebug'><code>plumber::Plumber$getDebug()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="handle"><a href='../../plumber/html/Plumber.html#method-Plumber-handle'><code>plumber::Plumber$handle()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="mount"><a href='../../plumber/html/Plumber.html#method-Plumber-mount'><code>plumber::Plumber$mount()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="onHeaders"><a href='../../plumber/html/Plumber.html#method-Plumber-onHeaders'><code>plumber::Plumber$onHeaders()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="onWSOpen"><a href='../../plumber/html/Plumber.html#method-Plumber-onWSOpen'><code>plumber::Plumber$onWSOpen()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="openAPIFile"><a href='../../plumber/html/Plumber.html#method-Plumber-openAPIFile'><code>plumber::Plumber$openAPIFile()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="registerHook"><a href='../../plumber/html/Plumber.html#method-Plumber-registerHook'><code>plumber::Plumber$registerHook()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="removeHandle"><a href='../../plumber/html/Plumber.html#method-Plumber-removeHandle'><code>plumber::Plumber$removeHandle()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="route"><a href='../../plumber/html/Plumber.html#method-Plumber-route'><code>plumber::Plumber$route()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="run"><a href='../../plumber/html/Plumber.html#method-Plumber-run'><code>plumber::Plumber$run()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="serve"><a href='../../plumber/html/Plumber.html#method-Plumber-serve'><code>plumber::Plumber$serve()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="set404Handler"><a href='../../plumber/html/Plumber.html#method-Plumber-set404Handler'><code>plumber::Plumber$set404Handler()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setApiSpec"><a href='../../plumber/html/Plumber.html#method-Plumber-setApiSpec'><code>plumber::Plumber$setApiSpec()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setDebug"><a href='../../plumber/html/Plumber.html#method-Plumber-setDebug'><code>plumber::Plumber$setDebug()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setDocs"><a href='../../plumber/html/Plumber.html#method-Plumber-setDocs'><code>plumber::Plumber$setDocs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setDocsCallback"><a href='../../plumber/html/Plumber.html#method-Plumber-setDocsCallback'><code>plumber::Plumber$setDocsCallback()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setErrorHandler"><a href='../../plumber/html/Plumber.html#method-Plumber-setErrorHandler'><code>plumber::Plumber$setErrorHandler()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setParsers"><a href='../../plumber/html/Plumber.html#method-Plumber-setParsers'><code>plumber::Plumber$setParsers()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="setSerializer"><a href='../../plumber/html/Plumber.html#method-Plumber-setSerializer'><code>plumber::Plumber$setSerializer()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="swaggerFile"><a href='../../plumber/html/Plumber.html#method-Plumber-swaggerFile'><code>plumber::Plumber$swaggerFile()</code></a></span></li>
<li><span class="pkg-link" data-pkg="plumber" data-topic="Plumber" data-id="unmount"><a href='../../plumber/html/Plumber.html#method-Plumber-unmount'><code>plumber::Plumber$unmount()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PlumberStatic-new"></a>}}
\if{latex}{\out{\hypertarget{method-PlumberStatic-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{PlumberStatic} router
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PlumberStatic$new(direc, options)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{direc}}{a path to an asset directory.}
\item{\code{options}}{options to be evaluated in the \code{PlumberStatic} router environment}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{PlumberStatic} router
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PlumberStatic-print"></a>}}
\if{latex}{\out{\hypertarget{method-PlumberStatic-print}{}}}
\subsection{Method \code{print()}}{
Print representation of \code{PlumberStatic()} router.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PlumberStatic$print(prefix = "", topLevel = TRUE, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{prefix}}{a character string. Prefix to append to representation.}
\item{\code{topLevel}}{a logical value. When method executed on top level
router, set to \code{TRUE}.}
\item{\code{...}}{additional arguments for recursive calls}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A terminal friendly representation of a \code{PlumberStatic()} router.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PlumberStatic-clone"></a>}}
\if{latex}{\out{\hypertarget{method-PlumberStatic-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PlumberStatic$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
b0fe0e44dbb321e934174cf12409aa596abe8f34
|
c8483aa2b20715ede1ac67f978926a893364c1a4
|
/MakeBarGraph.R
|
2735456c8f3ffcd29037f6e6de2eb903f39c0ea6
|
[] |
no_license
|
rkchudha/BioHacks
|
19e138700c6107ae1fc8321f31f93e10ca40a725
|
2ca5c87b81e3d406f51343ae46e767e27579fccc
|
refs/heads/master
| 2021-09-09T17:37:10.078629
| 2018-03-18T16:03:13
| 2018-03-18T16:03:13
| 125,648,012
| 0
| 2
| null | 2018-03-17T23:37:11
| 2018-03-17T16:24:22
| null |
UTF-8
|
R
| false
| false
| 466
|
r
|
MakeBarGraph.R
|
# Creates a bar graph indicating the number of genes related to to a specific cancer and whether it's favourable
nums <- read.table("CancerCount.csv",sep = ",", header = TRUE)
mat <- matrix(c(nums$Unfavourable, nums$Favourable), ncol=2)
barplot(t(mat), horiz = TRUE, names.arg = nums$BodyPart, las=2, main = "Cancer And Related Genes", xlab = "Number Of Related Genes", col = c( "cornflowerblue","cadetblue3"), legend = c("Unfavourable", "Favourable"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.