blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
85d0d85610f25c8374f8b10204ee616956ccb312
60aee3139de2c74fee465fd27c5ae52355b90306
/code/deseq_to_function.R
9e5df8bf1fe581391936e8fdc16abeae43cf6fe1
[]
no_license
jflater/incubation_manuscript
2f9fc4a35a52f4537a60ffae73383e9b9fb40c87
950128b4e1e1d88c9733a260102882378007ae9e
refs/heads/master
2020-04-11T04:20:00.155974
2020-01-13T21:09:07
2020-01-13T21:09:07
161,508,762
1
1
null
2018-12-12T16:49:34
2018-12-12T15:37:50
R
UTF-8
R
false
false
7,571
r
deseq_to_function.R
library(phyloseq) library(tidyverse) library(DESeq2) inc.physeq <- readRDS("data/RDS/incubation_physeq_Aug18.RDS") tree <- read.tree("data/tree.nwk") inc.physeq <- merge_phyloseq(inc.physeq, tree) #Rename treatments to more informative titles data <- data.frame(sample_data(inc.physeq)) %>% mutate(treatment = recode(treatment, 'Control' = 'Reference', 'CompAlfa' = 'Mix')) %>% mutate(C_N = C_flash / N_flash, Inorganic_N = NH3 + NO3) %>% mutate(TreatmentAndDay = paste(treatment, day)) rownames(data) <- data$i_id sample_data(inc.physeq) <- data sample_data(inc.physeq)$day <- as.factor(sample_data(inc.physeq)$day) sample_data(inc.physeq)$treatment <- as.character(sample_data(inc.physeq)$treatment) inc.physeq.data <- data.frame(sample_data(inc.physeq)) inc.physeq.data$response.group[inc.physeq.data$day == "0"] <- "baseline" inc.physeq.data$response.group[inc.physeq.data$day %in% c("7", "14", "21")] <- "early" inc.physeq.data$response.group[inc.physeq.data$day %in% c("35", "49", "97")] <- "late" inc.physeq.data <- inc.physeq.data %>% mutate(Treatment_Response = paste(treatment, response.group, sep = '_')) rownames(inc.physeq.data) <- data$i_id sample_data(inc.physeq) <- inc.physeq.data no.unclass <- subset_taxa(inc.physeq, !Phylum=="Bacteria_unclassified") no.unclass <- subset_taxa(no.unclass, !Genus=="Gp6_unclassified") inc.physeq <- no.unclass who_diff_day <- function(DDS, choice1, choice2, phy.object){ res = results(DDS, contrast = c("response.group", choice1, choice2), cooksCutoff = FALSE) #plotCounts(AlfalfaDDS, gene="OTU_311", intgroup="day") #Use above line to check if an OTU is increasing or decreasing depending on order of contrast alpha = 0.01 #alpha = 0.1 sigtab = res[which(res$padj < alpha), ] sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(phy.object)[rownames(sigtab), ], "matrix")) theme_set(theme_bw()) scale_fill_discrete <- function(palname = "Set1", ...) { scale_fill_brewer(palette = palname, ...) } # Phylum order x = tapply(sigtab$log2FoldChange, sigtab$Phylum, function(x) max(x)) x = sort(x, TRUE) sigtab$Phylum = factor(as.character(sigtab$Phylum), levels=names(x)) # Genus order x = tapply(sigtab$log2FoldChange, sigtab$Genus, function(x) max(x)) x = sort(x, TRUE) sigtab$Genus = factor(as.character(sigtab$Genus), levels=names(x)) #ggplot(sigtab, aes(x=Genus, y=log2FoldChange, color=phylum)) + geom_point(size=2) + # theme(axis.text.x = element_text(angle = -90, hjust = 0, vjust=1.0)) + # ggtitle("Day 0 to Day 7") return(sigtab) } # function plot log2FoldChange log_plot <- function(sigtab,t1){ sigtab <- sigtab %>% rownames_to_column(var = "OTU") %>% filter(log2FoldChange >= 2) ggplot(sigtab, aes(x=Genus, y=log2FoldChange, color=Phylum)) + geom_point(size=2) + coord_flip() + ggtitle(t1) } ################# alf.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Alfalfa_early", "Reference_early")) %>% filter_taxa(function(x) sum(x) >= 3, T) log.plot.early.alf <- alf.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Alfalfa", "Reference", alf.physeq) %>% log_plot("Alfalfa OTUS in early group that are significantly changing compared to reference early") log.plot.early.alf png("Figures/log.plot.earlyvref.alf",height=5,width=6,units='in',res=300) plot(plot) dev.off() alf.late.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Alfalfa_late", "Reference_late")) %>% filter_taxa(function(x) sum(x) >= 3, T) log.plot.late.alf <- alf.late.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Alfalfa", "Reference", alf.late.physeq) %>% log_plot("Alfalfa OTUS in late group that are significantly changing compared to reference late group") log.plot.late.alf early.alf.otus <- log.plot.early.alf$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) %>% arrange(desc(log2FoldChange)) early.alf.otus saveRDS(early.alf.otus, "data/early.alfvref.otus.rds") late.alf.otus <- log.plot.late.alf$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) late.alf.otus saveRDS(late.alf.otus, "data/late.alfvref.otus.rds") ############ comp.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Compost_early", "Reference_early")) %>% filter_taxa(function(x) sum(x) >= 3, T) %>% tax_glom(taxrank = "Genus") log.plot.early.comp <- comp.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Compost", "Reference", comp.physeq) %>% log_plot("Compost OTUS in early group that are significantly changing compared to reference early") log.plot.early.comp png("Figures/log.plot.earlyvref.comp",height=5,width=6,units='in',res=300) plot(plot) dev.off() comp.late.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Compost_late", "Reference_late")) %>% filter_taxa(function(x) sum(x) >= 3, T) %>% tax_glom(taxrank = "Genus") log.plot.late.comp <- comp.late.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Compost", "Reference", comp.late.physeq) %>% log_plot("Compost OTUS in late group that are significantly changing compared to late reference group") log.plot.late.comp early.comp.otus <- log.plot.early.comp$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) %>% arrange(desc(log2FoldChange)) early.comp.otus saveRDS(early.comp.otus, "data/early.compvref.otus.rds") late.comp.otus <- log.plot.late.comp$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) late.comp.otus saveRDS(late.comp.otus, "data/late.compvref.otus.rds") ################ mix.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Mix_early", "Reference_early")) %>% filter_taxa(function(x) sum(x) >= 3, T) %>% tax_glom(taxrank = "Genus") log.plot.early.mix <- mix.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Mix", "Reference", mix.physeq) %>% log_plot("Mix OTUS in early group that are significantly changing compared to early reference") log.plot.early.mix png("Figures/log.plot.earlyvref.mix",height=5,width=6,units='in',res=300) plot(plot) dev.off() mix.late.physeq <- subset_samples(inc.physeq, Treatment_Response %in% c("Mix_late", "Reference_late")) %>% filter_taxa(function(x) sum(x) >= 3, T) %>% tax_glom(taxrank = "Genus") log.plot.late.mix <- mix.late.physeq %>% phyloseq_to_deseq2( ~ treatment) %>% DESeq(test = "Wald", fitType = "local") %>% who_diff_day("Mix", "Reference", mix.late.physeq) %>% log_plot("Mix OTUS in late group that are significantly changing compared to late reference") log.plot.late.mix early.mix.otus <- log.plot.early.mix$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) %>% arrange(desc(log2FoldChange)) early.mix.otus saveRDS(early.mix.otus, "data/early.mixvref.otus.rds") late.mix.otus <- log.plot.late.mix$data %>% rownames_to_column() %>% select(OTU, Phylum, Genus, log2FoldChange) %>% filter(log2FoldChange >= 2) late.mix.otus saveRDS(late.mix.otus, "data/late.mixvref.otus.rds")
a978df24cea4bc9efaaa332dd92e95f2518e09cf
92e828aeed0eb4203cd1890fbdcee430d6cc9773
/src/linear_regression.R
3202120702f6cfab9bcb5c83c5f6f1c76fb3d8fb
[ "MIT" ]
permissive
dy-lin/stat540-project
46d9ee4f4638dbe933bdb607f3f8232e872a6847
20f040d9399ab5b9df0341b317e0f22eefafe5e8
refs/heads/master
2022-11-18T03:27:53.049420
2020-07-19T22:47:39
2020-07-19T22:47:39
259,114,940
0
1
null
null
null
null
UTF-8
R
false
false
2,467
r
linear_regression.R
#author: Almas K. #date : 2020-04-02 'This script is for linear regression in R' ## Libraries library(tidyverse) library(reshape2) library(stringr) library(here) library(lumi) library(limma) library(FDb.InfiniumMethylation.hg19) ## Import Data male_metadata <- read_csv(here("data","processed_data","male_metadata.csv")) data_limma <- readRDS(here("data","raw_data","data_for_limma.rds")) tsv_file <- read_tsv(here("data","raw_data","EPIC.hg38.manifest.tsv.gz")) #annotation file ## Convert Betas to M values data_mval <- beta2m(data_limma) ## Edit Metadata male_metadata$cancer <- as.factor(male_metadata$cancer) male_metadata$cancer_type <- as.factor(male_metadata$cancer_type) ##Custom Annotation Function for limma: genes_annotator <- function(t_tab){ annotated <-tsv_file %>% filter(probeID %in% rownames(t_tab)) %>% dplyr::select(CpG_chrm,probeID,gene,gene_HGNC) t_table_joined <-t_tab %>% mutate(probeID=rownames(t_tab)) %>% left_join(.,annotated, by="probeID") t_table_joined } ## Limma using Primary vs Metastatic cancer and age d_matrix2 <- model.matrix(~cancer_type*age,male_metadata) lm_fit2 <- lmFit(data_mval,d_matrix2) eBayes2 <- eBayes(lm_fit2) ## Annotate Limma Results v2_t_table <- topTable(eBayes2,coef = "cancer_typeprimary") ## Primary genes coefficient v2_t_table <- genes_annotator(v2_t_table) knitr::kable(v2_t_table) v2_t_table2 <- topTable(eBayes2,coef = "cancer_typeprimary:age") v2_t_table2 <- genes_annotator(v2_t_table2) knitr::kable(v2_t_table2) v2_t_table3 <- topTable(eBayes2,coef = "age") v2_t_table3 <- genes_annotator(v2_t_table3) knitr::kable(v2_t_table3) ## Look for missing gene names in v2_t_table using the 450K annotation and find them hm450 <- getPlatform(platform='HM450', genome='hg19') annot450 <- FDb.InfiniumMethylation.hg19::getNearestGene(hm450) annot450 <- annot450 %>% mutate(probeID=rownames(annot450)) NA_genes <- v2_t_table %>% filter(is.na(gene)) found_gene <- annot450 %>% filter(probeID %in%(NA_genes$probeID)) ## only found label for one of them v2_t_table <- v2_t_table %>% mutate(gene=(if_else(probeID %in% found_gene$probeID,found_gene$nearestGeneSymbol,gene))) ## Saving the Csv results for Annotated top table genes write_csv(v2_t_table,here("results","final","primary_topGenes_limma2.csv")) write_csv(v2_t_table3,here("results","revised","age_top_genes_limma2.csv")) write_csv(v2_t_table2,here("results","revised","ageandprimary_top_genes_limma2.csv"))
c9cf5cff4048d919256b4a0e40508cc4ecc0052e
b4fa3f92a666972e8147eaccc268e775c983df70
/run_analysis.R
e81bded4ca70c9ca75c3c3c6fa0a252509cf9450
[]
no_license
mdon89/Cleaning_Data_Quiz_4
b9bc58deb7301476aed3bc739f041c70b6c708cd
a93831c6ea938f823b0d5084144fec282e7af352
refs/heads/master
2021-04-15T17:13:12.353786
2018-03-24T00:10:33
2018-03-24T00:10:33
126,549,024
0
0
null
null
null
null
UTF-8
R
false
false
2,345
r
run_analysis.R
#Set working directory setwd("C:/Users/mdon/datasciencecoursera/Cleaning data quiz 4/UCI HAR Dataset) #list file names filenames <- list.files()[c(5,6,9,10,11,12)] #read in all test documents and assign to a dataframe List <- lapply(filenames, read.table, fill = TRUE) #Assign names to data frames newnames <- gsub(".txt", "",filenames) names(List) <- newnames #Unlist List in Global environment list2env(List, .GlobalEnv) #Merge datasets with cbind for data rames with the same number or rows Mergeddata <- cbind(subject_test, y_test, X_test) Mergeddatatrain <- cbind(subject_train, y_train, X_train) #rbind two dataframes above to give one data source df1 <- rbind(Mergeddata, Meregeddatatrain) #Change first 2 column names colnames(df1)[1] <- "Subject" colnames(df1)[2] <- "Y" #Add column headings to rest of df1 dataframe from features file df1columnheadings <- features$V2 #Convert column headings to character df1columnheadings <- as.character(df1columnheadings) colnames(df1)[3:563] <- df1columnheadings #Find which columns are mean or standard deviation so heading contains mean() or std() std <- grep("std()", colnames(df1), fixed = TRUE) mean <- grep("mean()", colnames(df1), fixed = TRUE) #subset df1 on mean and std columns meanstddf1 <- df1[,c(1,2,mean,std)] #read in activity labels document activity_labels <- read.table("activity_labels.txt") #assign labels to corresponding numbers in column Y of dataframe. Lookup Y values and match to activities to create new vector of activities newlabel <- as.character(sapply(meanstddf1$Y, function(x) activity_labels$V2[match(x, activity_labels$V1)])) #Substitute numbers with names of activities meanstddf1$Y <- newlabel #Replace column name Y with "activity names" colnames(meanstddf1)[2] <- "Activity_Names" #Rename column variables so they are more understandable. Replace t with time and f with freq renamet <- gsub("^t", "time",colnames(meanstddf1)[3:68]) renametf <- gsub("^f", "freq",renamet) colnames(meanstddf1)[3:68] <- renametf #create 2nd data frame with tidy data. One mean per subject and activity of each variable. Group by Activity name and subject then take mean of variables res = meanstddf1 %>% group_by(Subject, Activity_Names) %>% + summarise_each(funs(mean(.)))
593ab22daa252c044f13958653419661c3ebd112
fbff64c4ca06554868cbd7f74fa2c56e9884896b
/cachematrix.R
05487019eb2724a6d18accaacd5244de35df4541
[]
no_license
ssuzuki323/ProgrammingAssignment2
be683ef02ab9454f74427f36368a54e3ec785743
a80a63079ad0304ab6c3643b49424e30ca58abee
refs/heads/master
2021-01-22T17:15:06.286648
2015-04-12T22:16:38
2015-04-12T22:16:38
33,834,894
0
0
null
2015-04-12T21:38:00
2015-04-12T21:38:00
null
UTF-8
R
false
false
1,407
r
cachematrix.R
## makeCacheMatrix creates a special "matrix" that can cache its inverse. cacheSolve computes ## the inverse of the special "matrix" but first checks to see if the inverse has already been ## cached, in which case it would skip the computation and return the inverse matrix. ## Create a special "matrix" object that can cache its inverse ## The returned object is a list containing functions to set the value of the matrix, ## get the value of the matrix, set the value of the inverse matrix, and get the value ## of the inverse matrix. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Computes the inverse of matrix x. The function first checks to see if inverse has ## already been cached, in which case the function skips the computation and returns the ## inverse matrix. Otherwise, the function calculates the inverse matrix and sets the value ## of the inverse in the cache using setinverse function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' i <- x$getinverse() if(!is.null(i)) { message("getting cached data") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
954491424980e0b24397c07566cd89cd3c36a183
6b78fcf74c68d33be7b52414e273eeeb8eeb2e7d
/01_split_data.R
81d19ba3238bae1e386671ab7f5a31bd62ed4e90
[]
no_license
zpeng1989/TCGA_CNV_sumamry_diff
51189a7427c714c5f22fd696e15a69d714e1c300
6a70724709395a4458689669bff90d026778e1a0
refs/heads/master
2020-03-25T09:59:19.295740
2018-08-14T05:07:18
2018-08-14T05:07:18
143,681,178
0
0
null
null
null
null
UTF-8
R
false
false
704
r
01_split_data.R
args<-commandArgs(T) cancer = args[1] ##主要目标,分割样本; input_data_path = paste0('/work/home/zhangp/mission/09_deep_sumamry/data_used/CNV/',cancer,'.txt') input_data <- read.delim(input_data_path,header=F) sample_id = names(table(input_data[,1])) for (one_sample_id in sample_id){ #print(one_sample_id) one_data_table <- input_data[which(input_data[,1]==one_sample_id),] print(dim(one_data_table)) print(head(one_data_table)) write_table_path <- paste0('/work/home/zhangp/mission/12_CNV_deep_sumamry/split_data/all_data_test/',cancer,'/',one_sample_id,'.txt') #print(write_table_path) write.table(one_data_table,write_table_path,sep='\t',quote=F,row.names=F,col.names=F) }
182b9c6221bd71e6d9caa672e37c5e2ce84a5fac
f422c73b787ae891bcc155e3f0867973ef3dc949
/shiny_weather/server.R
3e6d43263e6423fd217e0f528d911761d736bd5f
[]
no_license
karafede/WeatherMaps_R
209510016e88cda99d400543ac50ac06903d4905
2d391645d4d0a34ffe20f1ba36c6ff5994956a00
refs/heads/master
2016-09-14T01:31:35.262014
2016-06-06T16:13:29
2016-06-06T16:13:29
59,492,926
0
0
null
null
null
null
UTF-8
R
false
false
2,436
r
server.R
library(shiny) library(dplyr) library(leaflet) ############################################################### shinyServer(function(input, output) { finalMap_rain <- reactive({ map_rain <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% # addProviderTiles("Stamen.TonerBackground", options = providerTileOptions(opacity = 1)) %>% addProviderTiles("OpenWeatherMap.Rain", options = providerTileOptions(opacity = 0.35)) }) finalMap_temperature <- reactive({ map_temperature <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("OpenWeatherMap.Temperature", options = providerTileOptions(opacity = 0.35)) }) finalMap_clouds <- reactive({ map_clouds <- leaflet() %>% # addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("Thunderforest.TransportDark", options = providerTileOptions(opacity = 1)) %>% addProviderTiles("OpenWeatherMap.Clouds", options = providerTileOptions(opacity = 0.35)) }) finalMap_wind <- reactive({ map_wind <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("OpenWeatherMap.Wind", options = providerTileOptions(opacity = 0.35)) }) finalMap_pressure <- reactive({ map_pressure <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("OpenWeatherMap.PressureContour", options = providerTileOptions(opacity = 0.35)) }) finalMap_AOD <- reactive({ map_AOD <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("NASAGIBS.ModisTerraAOD", options = providerTileOptions(opacity = 0.35)) }) finalMap_Chlorophyll <- reactive({ map_Chlorophyll <- leaflet() %>% addTiles() %>% setView(18, 38, 4) %>% addProviderTiles("NASAGIBS.ModisTerraChlorophyll", options = providerTileOptions(opacity = 0.35)) }) # Return to client output$myMap_rain = renderLeaflet(finalMap_rain()) output$myMap_temperature = renderLeaflet(finalMap_temperature()) output$myMap_clouds = renderLeaflet(finalMap_clouds()) output$myMap_wind = renderLeaflet(finalMap_wind()) output$myMap_pressure = renderLeaflet(finalMap_pressure()) output$myMap_AOD = renderLeaflet(finalMap_AOD()) output$myMap_Chlorophyll = renderLeaflet(finalMap_Chlorophyll()) })
aeda9596c197c39e640ed0c481fad2605a3c05fb
c1958ff3ca2b10a041fadaaa3503724f194631ae
/plot3.R
e93268d69f2431b6e4a1574dcf82e2b13dfb8e52
[]
no_license
pbrogan12/ExData_Plotting1
63e9c48be8a7937a3b7f33e6a48bc431d1b83f80
795a4781855ca302e85774d4a903d8f46cc85181
refs/heads/master
2021-01-24T03:11:40.013244
2014-06-08T23:23:16
2014-06-08T23:23:16
null
0
0
null
null
null
null
UTF-8
R
false
false
1,029
r
plot3.R
data <- read.table("~/Downloads/household_power_consumption.txt",header=T,sep=";",na.strings="?",stringsAsFactors=F) data <- data[which(data$Date=="1/2/2007" | data$Date=="2/2/2007"),] png("plot3.png",width = 480, height = 480) plot(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_1,type='n',ylab="Energy sub meeting", xlab="") points(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_2,type='n') points(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_3,type='n') lines(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_1,type='l') lines(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_2,type='l',col="red") lines(strptime(paste(data$Date,data$Time),format = "%d/%m/%Y %H:%M:%S"),data$Sub_metering_3,type='l',col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1,lwd=1,col=c("black","red","blue")) dev.off()
88abfc3a0a0d5a3e141269027e5c0eae8f1db059
bfec4871cd31a5d5b52bbd1dc22f977724952bbc
/tests/testthat/testMMM.R
edea33435ab092cec653e2bb8ee804c09b1f17ae
[]
no_license
ekstroem/mommix
a51cd361d973616d8b4faef453d92e20f09607d4
4b9601ee6e31b97ee4d5a0da274c647b6e4187e2
refs/heads/master
2021-05-05T00:57:00.022881
2019-01-06T21:16:45
2019-01-06T21:16:45
119,524,727
0
0
null
null
null
null
UTF-8
R
false
false
1,325
r
testMMM.R
context("test results for MMM") set.seed(1) p <- 0.7 N <- 100 x <- rnorm(N) x2 <- rnorm(N) y <- rnorm(N, mean=x) y[1:(N*p)] <- rnorm(N*p, mean=0, sd=.25) DF <- data.frame(x, x2, y) res <- moment_mixture_model(DF, y ~ x + x2) res2 <- moment_mixture_model(DF, y ~ x + x2, weight="square") test_that("computations are consistant", { expect_equivalent(coef(res), c(0.1654199, 0.8233733, -0.2667224), tolerance=1e-4) expect_equivalent(coef(res2), c(0.1495042, 0.8259178, -0.2469152), tolerance=1e-4) expect_equivalent(res$alpha,0.3041905, tolerance=1e-4) expect_equivalent(res2$alpha,0.3162248, tolerance=1e-4) } )
41858f9e70de4799c9f92f4fb9bd0e03c1886d52
b2360322fc78847770c990a55a2e0859f328c520
/projects/MachineLearning/code/LinearRegressionForPrediction.R
942d506b75f56d0c6141e6df2165a7b503077969
[]
no_license
senthil-lab/RWork
6739546116a67cd8a6e95d5760d230f3f7432643
3754a2c4f50c39b9be189a48c7ee2087ce2e248c
refs/heads/master
2020-12-18T18:19:09.299030
2020-10-04T18:28:28
2020-10-04T18:28:28
235,481,973
0
0
null
null
null
null
UTF-8
R
false
false
5,064
r
LinearRegressionForPrediction.R
library(tidyverse) library(HistData) galton_heights <- GaltonFamilies %>% filter(childNum == 1 & gender == "male") %>% select(father, childHeight) %>% rename(son = childHeight) head(galton_heights) library(caret) y <- galton_heights$son test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE) train_set <- galton_heights %>% slice(-test_index) test_set <- galton_heights %>% slice(test_index) avg <- mean(train_set$son) avg mean((avg - test_set$son)^2) # fit linear regression model fit <- lm(son ~ father, data = train_set) fit$coef son_height_pred <- 35.4251130 + 0.5080324 * test_set$father mean((son_height_pred - test_set$son)^2) y_hat <- fit$coef[1] + fit$coef[2]*test_set$father mean((y_hat - test_set$son)^2) # Using the predict function y_hat <- predict(fit, test_set) mean((y_hat - test_set$son)^2) # read help files ?predict.lm ?predict.glm #Exercise # set.seed(1) # if using R 3.5 or earlier set.seed(1, sample.kind="Rounding") # if using R 3.6 or later n <- 100 Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2) dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>% data.frame() %>% setNames(c("x", "y")) nrow(dat) set.seed(1, sample.kind="Rounding") res <- sapply(seq(1:100),function(x1) { test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x, data = train_set) y_hat <- predict(fit, test_set) RMSE(y_hat, test_set$y) }) mean(res) sd(res) # set.seed(1) # if using R 3.5 or earlier set.seed(1, sample.kind="Rounding") # if using R 3.6 or later rmse <- replicate(100, { test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) }) mean(rmse) sd(rmse) set.seed(1, sample.kind="Rounding") # if using R 3.6 or later res <- sapply(c(100, 500, 1000, 5000, 10000),function(x1) { n <- x1 Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2) dat <- MASS::mvrnorm(n = x1, c(69, 69), Sigma) %>% data.frame() %>% setNames(c("x", "y")) rmse <- replicate(100, { test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) }) c(mean(rmse), sd(rmse)) }) res mean(res[,1]) sd(res[,1]) # set.seed(1) # if using R 3.5 or earlier set.seed(1, sample.kind="Rounding") # if using R 3.6 or later n <- 100 Sigma <- 9*matrix(c(1.0, 0.95, 0.95, 1.0), 2, 2) dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>% data.frame() %>% setNames(c("x", "y")) head(dat) set.seed(1, sample.kind="Rounding") # if using R 3.6 or later rmse <- replicate(100, { test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) }) mean(rmse) sd(rmse) # set.seed(1) # if using R 3.5 or earlier set.seed(1, sample.kind="Rounding") # if using R 3.6 or later Sigma <- matrix(c(1.0, 0.75, 0.75, 0.75, 1.0, 0.25, 0.75, 0.25, 1.0), 3, 3) dat <- MASS::mvrnorm(n = 100, c(0, 0, 0), Sigma) %>% data.frame() %>% setNames(c("y", "x_1", "x_2")) cor(dat) head(dat) set.seed(1, sample.kind="Rounding") test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x_1, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) set.seed(1, sample.kind="Rounding") fit <- lm(y ~ x_2, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) set.seed(1, sample.kind="Rounding") fit <- lm(y ~ x_1 + x_2, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) set.seed(1, sample.kind="Rounding") # if using R 3.6 or later Sigma <- matrix(c(1.0, 0.75, 0.75, 0.75, 1.0, 0.95, 0.75, 0.95, 1.0), 3, 3) dat <- MASS::mvrnorm(n = 100, c(0, 0, 0), Sigma) %>% data.frame() %>% setNames(c("y", "x_1", "x_2")) set.seed(1, sample.kind="Rounding") test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE) train_set <- dat %>% slice(-test_index) test_set <- dat %>% slice(test_index) fit <- lm(y ~ x_1, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) set.seed(1, sample.kind="Rounding") fit <- lm(y ~ x_2, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2)) set.seed(1, sample.kind="Rounding") fit <- lm(y ~ x_1 + x_2, data = train_set) y_hat <- predict(fit, newdata = test_set) sqrt(mean((y_hat-test_set$y)^2))
09cb736a6407f943b21116021b2e1c57fda5312f
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/EMMIXskew/examples/ddmvt.Rd.R
d41a7d1cc276b95110c6645ad8da5076510bb063
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
311
r
ddmvt.Rd.R
library(EMMIXskew) ### Name: ddmvt ### Title: The Multivariate t-Distribution ### Aliases: ddmvt rdmvt ### Keywords: cluster datasets ### ** Examples n <- 100 p <- 2 mean <- rep(0,p) cov <- diag(p) nu <- 3 set.seed(3214) x <- rdmvt( n,p,mean,cov,nu) den <- ddmvt(x ,n,p,mean,cov,nu)
1059ed7919435c56c471f7790539417fa71493cd
7efa216193f994c69c6566eb6f23418ec3b03706
/rejection_sampling.R
430c9d2a4341d12ee2c754a40b533aca5d2ffa5e
[]
no_license
Sirius081/MCMC
bd4952077572de7806c0a0819ea2f2b457f95e36
742a96b116c38f8ff7551ec980d6638329bce440
refs/heads/master
2021-01-20T02:37:37.086023
2017-04-26T02:49:04
2017-04-26T02:49:04
89,430,292
0
0
null
null
null
null
UTF-8
R
false
false
607
r
rejection_sampling.R
#p is gaussian(0,1) #q is uniform(min,max) rejection_sampling_gau=function(mean,variance){ N=100000 x=vector(length=N) min_v=mean-3*variance max_v=mean+3*variance k=max-min print(min_v) print(max_v) i=1 count=0 while(i<=N){ z=runif(1,min_v,max_v) #sample from q p_z=dnorm(z,mean,variance) #p(z) q_z=dunif(z,min_v,max_v) #q(z) u=runif(1,min=0,max=k*q_z)#sample from uniform(0,k*q(z)) if(p_z>=u){ #accept x[i]=z i=i+1 } } hist(x,breaks = 50,freq = F) lines(density(rnorm(N*10,0,1))) } rejection_sampling_gau(0,1)
9a925cc50bb6a3c2520ab0f06a6aa35a7379b9ac
854175b2bd9411866784aba87b959dad603cf6e9
/R/capture_ping.R
7bd790a8d6e31d301d2869148571acc78a67ef0f
[ "MIT" ]
permissive
JesseVent/pingers
e43ddf27c8f8012d3e245982e5771091a3b411be
1fc9ddaa201de08c1187fa68ee3a3b2f0de04c70
refs/heads/master
2021-08-15T20:28:42.676962
2018-10-31T12:05:01
2018-10-31T12:05:01
145,440,455
6
0
null
null
null
null
UTF-8
R
false
false
2,515
r
capture_ping.R
#' Ping Server #' #' Ping a server to capture response details #' @name ping_capture #' #' @param server IP address or URL of server #' @param count Number of times to ping server #' #' @return dataframe with ping results #' @export #' #' @importFrom dplyr "%>%" #' @importFrom stringr str_replace_all #' @importFrom tibble tibble #' #' @examples #' \dontrun{ #' dest <- get_destinations(top_n = 1) #' ping_res <- ping_capture(dest$ip[1], 10) #' print(ping_res) #' } ping_capture <- function(server, count) { sys_os <- .Platform$OS.type if (sys_os == "unix") { ping_query <- paste("ping", server, "-c", count) } else { ping_query <- paste("ping", server, "-n", count) } d <- system(ping_query, intern = TRUE) n <- length(d) %>% as.numeric() ping_list <- list(d[2:(n - 4)]) packet_loss <- d[n - 1] ping_stats <- d[n] timestamp <- Sys.time() ## Strip out ping statistics stats <- stringr::str_replace_all(ping_stats, "[^0-9|./]", "") %>% strsplit("/") ping_min <- stats[[1]][4] %>% as.numeric() ping_avg <- stats[[1]][5] %>% as.numeric() ping_max <- stats[[1]][6] %>% as.numeric() ping_stddev <- stats[[1]][7] %>% as.numeric() ## Strip out packet loss pkt <- stringr::str_replace_all(packet_loss, "[^0-9|,.]", "") %>% strsplit(",") packets_sent <- pkt[[1]][1] %>% as.numeric() packets_back <- pkt[[1]][2] %>% as.numeric() packet_loss <- pkt[[1]][3] %>% as.numeric() packets_lost <- packets_sent - packets_back loss_rate <- ((packets_sent - packets_back) / packets_sent) * 100 pres <- tibble::tibble( timestamp, server, packets_sent, packets_back, packet_loss, packets_lost, loss_rate, ping_min, ping_avg, ping_max, ping_stddev, ping_list ) pres$packets_sent[is.na(pres$packets_sent)] <- 100 pres$packets_back[is.na(pres$packets_back)] <- 0 pres$loss_rate[is.na(pres$loss_rate)] <- 100 pres$packets_lost[is.na(pres$packets_lost)] <- 100 pres$ping_stddev[is.na(pres$ping_stddev)] <- 0 pres$ping_min[is.na(pres$ping_min)] <- 0 pres$ping_max[is.na(pres$ping_max)] <- 0 pres$ping_avg[is.na(pres$ping_avg)] <- 0 pres$loss_rate <- ((pres$packets_sent - pres$packets_back) / pres$packets_sent * 100) pres$packet_loss[is.na(pres$packet_loss)] <- pres$loss_rate pres$packets_lost <- pres$packets_sent - pres$packets_back return(pres) }
d9b42af3f4bb3d9aefd4c56d161ba03d8b0bb532
737d67e2884b33324003a7303351f5abf2c419e6
/OutlierAnalysis/outlierMatrixProtein_gluC.R
793e59f63d6bd1c526b810f501e19eb8dda014f8
[ "Apache-2.0" ]
permissive
bwbai/ATLANTiC
ad576ec305120a8b5012d08cad7f11a0c4ab217e
c831ac3e0c6ae0ed9be130a1959b50a6bc41ce2f
refs/heads/master
2022-04-19T18:42:15.671658
2020-04-21T11:41:52
2020-04-21T11:41:52
null
0
0
null
null
null
null
UTF-8
R
false
false
1,257
r
outlierMatrixProtein_gluC.R
source("https://raw.githubusercontent.com/mengchen18/RFunctionCollection/master/outliers.R") library(openxlsx) library(matrixStats) if (!dir.exists("Res/20180328_outlierAnalysis")) dir.create("Res/20180328_outlierAnalysis") t1 <- read.xlsx("../Manuscript/current/supplementTables/table_S3_Intensity_of_proteins.xlsx", sheet = 7) an <- read.xlsx("../Manuscript/current/supplementTables/table_S3_Intensity_of_proteins.xlsx", sheet = 6) nam <- t1$ID t1$ID <- NULL t1 <- apply(t1, 2, as.numeric) rownames(t1) <- nam i <- findOutlier(t1, foldthresh = 5, pvalue = 0.1, reachLowBound = FALSE, window = 0.5) getab <- function(m, a, col = make.names(c("Gene.names", "iBAQ", "Majority protein IDs", "Protein names"))) { i <- findOutlier(m, foldthresh = 5, pvalue = 0.1, reachLowBound = FALSE, window = 0.5) ls <- lapply(names(i$outlierIndexColumns), function(ii) { ir <- i$outlierIndexColumns[[ii]] d <- a[ir, col] if (nrow(d) == 0) return(d) d$cellline <- ii d }) do.call(rbind, ls) } tab <- getab(t1, a = an) tab$cellline <- paste0(tab$cellline, "_NCI60_GluC") write.table(tab, file = "Res/20180328_outlierAnalysis/outlierTableProtein_GluC.txt", col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
a9dab08b26c070fb959183717c747c0812d4bccd
d64d88d896260aac75f0ed98a01c7e8e02005063
/server.R
c0b4322809062589dfdf9499da89314910334e63
[]
no_license
neeagle/CourseraDDP
025aa557875e6fa5fb7a865b396cabf0ae93708a
1f8dd2c9a6f981736f7516c5d2350c38c69537cf
refs/heads/master
2020-12-25T08:59:40.106663
2016-08-15T04:04:27
2016-08-15T04:04:27
65,699,493
0
0
null
null
null
null
UTF-8
R
false
false
982
r
server.R
library(shiny) library(zipcode) library(ggmap) data(zipcode) zip_price_rent <- read.csv(file="Zip_PriceToRentRatio_AllHomes052016.csv") map<-get_map(location='united states', zoom=4, maptype = "toner", source='google',color='color') zipmerge <- merge(zip_price_rent, zipcode, by.x="RegionName", by.y="zip") g <- ggmap(map) + geom_point(aes(x=longitude, y=latitude, show_guide = TRUE, colour=log(as.numeric(1/X2016.05))), data=zipmerge, na.rm=TRUE) + scale_color_gradient(low="yellow", high="purple") shinyServer( function(input, output) { dataInput <- reactive({zipcode[zipcode$zip==input$select,] }) lon <- reactive({zipcode[zipcode$zip==input$select,]$longitude[1]}) lat <- reactive({zipcode[zipcode$zip==input$select,]$latitude[1]}) output$oid1 <- renderPrint({dataInput()}) output$gmap <- renderPlot( { #zcor <- dataInput() #p <- g + geom_point(aes(x=zcor$longitude[1], y=zcor$latitude[1], size=15), colour=20) print(g) }) } )
08ada3e6f8fe9a9bf6dab1dc608b6114197f2175
ba094e0fc15739a634ecb3fb7703ab52d0ee1925
/Mario.R
7842f2369cd6f9bf8b37c68cd0c27cc97b899f5b
[]
no_license
vina1998/TidyTuesday05-25
0951463fe004b3c010211e3c8443b0da6059439a
098eca6451c87526ac79f6e640abfe09aadcd6a1
refs/heads/master
2023-06-19T22:48:38.494303
2021-07-19T08:39:16
2021-07-19T08:39:16
371,938,912
0
0
null
null
null
null
UTF-8
R
false
false
6,672
r
Mario.R
records <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-05-25/records.csv') drivers <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-05-25/drivers.csv') library(tidyverse) library(ggimage) library(viridis) library(extrafont) loadfonts() #where is the line between analysis and visualizations lol drivers <- drivers %>% drop_na() #prep data df <- drivers %>% count(year) %>% rowwise() %>% mutate( w = round(runif(1, 0, 3)), h = runif(1, 0, 3)) %>% ungroup() #starting line starting_line <- data.frame( fyear = df$year ) %>% mutate( r = row_number(), c = list(c(1:1)) ) %>% unnest(c(c, r)) %>% rowwise %>% mutate( x = list(c + c(0, 0.9, 0.9, 0)), y = list(c * 0.5 + c(fyear - 0.45, fyear, fyear + 0.9, fyear + 0.45)) ) %>% unnest(c(x, y)) #track: ggplot(df) + annotate("polygon", x = c(-20, 15, 10, -20), y = c(1997, 1997, 2021, 2021), fill = "grey30", color = "grey30") + annotate("polygon", x = c(15, 22.5, 22.5, 10), y = c(1997, 1997, 2021, 2021), fill = "grey30", color = "grey30") + annotate("polygon", x = c(-20, 15, 10, -20), y = c(2020.3, 2020.3, 2030, 2030), fill = "palegreen3", color = "palegreen3") + annotate("polygon", x = c(15, 22.5, 22.5, 10), y = c(2020.3, 2020.3, 2030, 2030), fill = "palegreen3", color = "palegreen3") + annotate("line", x = c(-15,-17), y=2010, colour="white", size=2) + annotate("line", x = c(-10,-12), y=2010, colour="white", size=2) + annotate("line", x = c(-8,-6), y=2010, colour="white", size=2) + annotate("line", x = c(-5,-3), y=2010, colour="white", size=2) + annotate("line", x = c(-2,0), y=2010, colour="white", size=2) + annotate("line", x = c(2,4), y=2010, colour="white", size=2) + annotate("line", x = c(6,8), y=2010, colour="white", size=2) + annotate("line", x = c(10,12), y=2010, colour="white", size=2) + annotate("line", x = c(14,16), y=2010, colour="white", size=2) + annotate("line", x = c(18,20), y=2010, colour="white", size=2) + geom_polygon(data = starting_line, aes(x = x - 21, y = y - 1.9, group = interaction(c, r)), fill = "white", color = "black", size= 1.5) + annotate("polygon", x = c(-20, 15, 10, -20), y = c(1987.3, 1987.3, 1997, 1997), fill = "palegreen3", color = "palegreen3") + annotate("polygon", x = c(15, 22.5, 22.5, 10), y = c(1987.3, 1987.3, 1997, 1997), fill = "palegreen3", color = "palegreen3") + geom_curve(aes(x = -15 - w, y = year - w * 0.4, xend = n, yend = 2001 + h, color = year, size = n), curvature = 0) + scale_size_continuous(range = c(0.1, 3)) + scale_color_viridis_c(option = "inferno") + annotate("polygon", x = c(-19, -15, -12, -16), y = c(2028, 2028, 2029, 2029), fill = "yellow1", color = "grey20") + annotate("polygon", x = c(-19, -15, -15, -19), y = c(2022, 2022, 2028, 2028), fill = "yellow3", color = "grey20") + annotate("polygon", x = c(-15, -12, -12, -15), y = c(2022, 2024, 2029, 2028), fill = "yellow4", color = "grey20") + annotate("text", x = -16, y = 2026, hjust = 1, vjust = 1, label = "?", size = 15, color = "white", family= "Impact") + annotate("text", x = -13, y = 2027, hjust = 1, vjust = 1, label = "?", size = 15, color = "seashell3", family= "Impact") + annotate("polygon", x = c(13, 17, 20, 16), y = c(2028, 2028, 2029, 2029), fill = "blue1", color = "grey20") + annotate("polygon", x = c(13, 17, 17, 13), y = c(2022, 2022, 2028, 2028), fill = "blue3", color = "grey20") + annotate("polygon", x = c(17, 20, 20, 17), y = c(2022, 2024, 2029, 2028), fill = "blue4", color = "grey20") + annotate("polygon", x = c(-19, -15, -12, -16), y = c(1995, 1995, 1996, 1996), fill = "red1", color = "grey20") + annotate("polygon", x = c(-19, -15, -15, -19), y = c(1989, 1989, 1995, 1995), fill = "red3", color = "grey20") + annotate("polygon", x = c(-15, -12, -12, -15), y = c(1989, 1991, 1996, 1995), fill = "red4", color = "grey20") + annotate("polygon", x = c(13, 17, 20, 16), y = c(1995, 1995, 1996, 1996), fill = "green1", color = "grey20") + annotate("polygon", x = c(13, 17, 17, 13), y = c(1989, 1989, 1995, 1995), fill = "green3", color = "grey20") + annotate("polygon", x = c(17, 20, 20, 17), y = c(1989, 1991, 1996, 1995), fill = "green4", color = "grey20") + annotate("text", x = -16, y = 1993, hjust = 1, vjust = 1, label = "?", size = 15, color = "white", family= "Impact") + annotate("text", x = -13, y = 1994, hjust = 1, vjust = 1, label = "?", size = 15, color = "seashell3", family= "Impact") + annotate("text", x = 16, y = 1993, hjust = 1, vjust = 1, label = "?", size = 15, color = "white", family= "Impact") + annotate("text", x = 19, y = 1994, hjust = 1, vjust = 1, label = "?", size = 15, color = "seashell3", family= "Impact") + annotate("text", x = 16, y = 2026, hjust = 1, vjust = 1, label = "?", size = 15, color = "white", family= "Impact") + annotate("text", x = 19, y = 2027, hjust = 1, vjust = 1, label = "?", size = 15, color = "seashell3", family= "Impact") + annotate("text", x = 9.5, y = 2025, hjust = 1, vjust = 1, label = "Number of records by year", size = 10, family= "Comic Sans MS",color = "white") + annotate("text", x = 17.5, y = 2022, hjust = 1, vjust = 1, label = "Interesting observation:overlap in number of records obtained between earliest and later years", size= 5, family= "Comic Sans MS",color = "black") + annotate("text", x = 17.5, y = 2019, hjust = 1, vjust = 1, label = "Source: Mario Kart World Records, Graphics: Thivina Thanabalan", size= 5, family= "Comic Sans MS",color = "grey48") + scale_x_continuous(breaks = 0:20, labels = ifelse(0:20 %% 5 == 0, 0:20, ""), limits = c(-20, 23)) + theme_void() + theme(legend.position = "none",axis.ticks.y = element_line(color = "yellow"), axis.ticks.length.y = unit(0.5, "line", "lemonchiffon4"), axis.text.y= element_text(colour="lemonchiffon4"), axis.ticks.x = element_line(color = "lemonchiffon4"), axis.ticks.length.x = unit(0.5, "line", "lemonchiffon4"), axis.text.x = element_text(colour="lemonchiffon4")) + theme(plot.background = element_rect(fill = "lemonchiffon", colour=NA)) + geom_image(aes(x = n, y = 2002 + h - 0.5, image = "https://www.pngkey.com/png/full/1007-10074038_bananas-transparent-mario-kart.png"), size = 0.035, by = "height") + geom_image(aes(x = 1, y = 2027, image = "https://www.pngkey.com/png/full/140-1403727_super-mario-kart-png-file-mario-kart-super.png"), size = 0.1, by = "height") + ggsave(path = "png", filename = "track.png", dpi = 128, width = 15, height =8.4 )
7d838c2a24fed29632bda716dcb0a45fff268204
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/seasonal/examples/outlier.Rd.R
6cf16a1805cf69e819406060aa4234966653809b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
184
r
outlier.Rd.R
library(seasonal) ### Name: outlier ### Title: Outlier Time series ### Aliases: outlier ### ** Examples ## Not run: ##D x <- seas(AirPassengers) ##D outlier(x) ## End(Not run)
cf71f8773ff285b889f877978b110e5a9ec46e0d
9992af6db68a9d3a92844b83cf992210da05cc32
/AnalisisRendimientoRiesgo.R
c193d8fd2e09062bbf20ec6dcfcf1bbce89fbc6d
[]
no_license
cristinacambronero/CovarianzaR
bcaa06f763ef13220e1090995f2e989663ebf19a
e899f3234ca19ccff410d838e6ecff81d17bf5bf
refs/heads/master
2020-04-13T12:53:38.498218
2015-07-26T09:42:44
2015-07-26T09:42:44
39,720,639
0
0
null
null
null
null
UTF-8
R
false
false
1,317
r
AnalisisRendimientoRiesgo.R
numAleatorios<-c(round(abs(rnorm(10)*100))) ##10 número aleatorios CIAleatoria<-CI_Empresas[,numAleatorios] ##Obtenemos los valores close de 10 empresas CIRend<-(CIAleatoria[2,1:length(CIAleatoria)]-CIAleatoria[1,1:length(CIAleatoria)])/CIAleatoria[1,1:length(CIAleatoria)] ##Calculamos la rentabilidad de cada serie temporal for(i in 3:length(CIAleatoria[,1])){ aux<-(CIAleatoria[1,1:length(CIAleatoria)]-CIAleatoria[i-1,1:length(CIAleatoria)])/CIAleatoria[i-1,1:length(CIAleatoria)] CIRend = rbind(CIRend,aux) } ##Calculamos la media de los rendimientos mu = colMeans(CIRend[,-1]) ##Y la matriz de covarianza de los rendimientos bigsig = cov(CIRend[,-1]) m = nrow(bigsig)-1 ##Asignamos aleatoriamente mesos entre 0 y 1 con suma total 1 para las 10 empresas w = diff(c(0,sort(runif(m)), 1)); ##Multiplicamos los pesos por la media rb = sum(w*mu); ##Y por la matriz de covarianza sb = sum(w*bigsig*w); ##Definimos un número de iteraciones N = 2000 ##E iteramos para calcular la curva de la cartera eficiente for (j in 2:N) { w = diff(c(0,sort(runif(m)), 1)); r = sum(w*mu); rb = rbind(rb,r); s = sum(w*bigsig*w); sb = rbind(sb,s); ##Obtenemos la desviació tipica. sb = sqrt(sb) ##Y dibujamos la gráfica plot(sb,rb,col="blue",ylab="E(r)", xlab="Sigma",main="Cartera Eficiente")
696880d105a43c61d27d2b92111eaf40c6f39df2
0d93a95face0469ca653511e689738794eed05d8
/script/ResampledWells.R
e5520cb749dc8b6d8eea69abb64dd7c205a1fd5c
[]
no_license
xxmissingnoxx/As-measurement-code
fefa5b1ad176f1958a7d08686019935a9a760567
a5325fc5ba5ae2433e27d1e74d88003b5117c593
refs/heads/master
2023-02-15T18:07:30.470404
2021-01-17T06:47:18
2021-01-17T06:47:18
null
0
0
null
null
null
null
UTF-8
R
false
false
5,527
r
ResampledWells.R
# model for the 271 resampled wells ###################################### library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) setwd("/Users/YulingYao/Documents/Research/arsenic") data_repeated308=read.csv("./data/4. repeated308.csv") data_repeated14_15=read.csv("./data/2014vs15.csv",sep=',') head(data_repeated308) head(data_repeated14_15) y1= data_repeated308$Arsenic_lab2000 y2= data_repeated308$Arsenic_lab2014 y3=data_repeated14_15$As2015 depth=data_repeated14_15$Depth.2000 loc_x=data_repeated14_15$X2000 loc_y=data_repeated14_15$Y2000 scale_map=max(loc_x)-min(loc_x) loc_x=(loc_x-min(loc_x))/scale_map loc_y=(loc_y-min(loc_y))/scale_map loc_x=loc_x[complete.cases(y3)] loc_y=loc_y[complete.cases(y3)] y1=log(y1[complete.cases(y3)]) y2=log(y2[complete.cases(y3)]) y3=log(y3[complete.cases(y3)]) # hyper-parameter is from optimization *(Type-2 MAP) stan_fit_mu=stan(file="gp_map_mu.stan", iter=2000, data = list(N=length(y1),y1=y1, y2=y2, y3=y3, loc_X=cbind(loc_x,loc_y), rho= 0.04169228, alpha=1.06221590, sigma=1.16859570)) # hyper-parameter is from optimization *(Type-2 MAP) stan_fit=stan(file="gp_resampled.stan", iter=2000, init=ttt, data = list(N=length(y1),y1=y1, y2=y2, y3=y3, loc_X=cbind(loc_x,loc_y), spline_degree=3, rho= 0.04169228, alpha=1.06221590, sigma=1.16, num_knots=10, knots= unname( quantile(c(y1,y2,y3) ,probs=seq(from=0, to=1, length.out = 10))) , N_grid=24, f_grid= sort( c( seq(-0.8, 6.6, length.out = 20) , log (c(40,50,60,80)))) )) sss=extract(stan_fit, pars=c("mu", "mu0")) sss2=extract(stan_fit_mu, pars=c("mu", "mu0")) f_grid= (sort( c( seq(-0.8, 6.6, length.out = 20) , log (c(40,50,60,80))))) f12= sss$change_grid ppb50=c() ppb100=c() ppb10=c() for( i in 1:length(f_grid)){ ppb10[i]= mean(f_grid[i]+ f12[,i] + rnorm(dim(f12)[1], 0, 1.16859570) > log(10)) ppb50[i]= mean(f_grid[i]+ f12[,i] + rnorm(dim(f12)[1], 0, 1.16859570) > log(50)) ppb100[i]= mean(f_grid[i]+ f12[,i] + rnorm(dim(f12)[1], 0, 1.16859570) > log(100)) } for( i in 1:24){ f12[,i]=f12[,i]+sss$sigma_change*rnorm(4000,0,1) } library(RColorBrewer) red_c=brewer.pal(n = 4, name = "YlOrRd")[2:4] # graphing pdf("~/Desktop/spline.pdf", height=2.6, width=7.5) layout(matrix(c(1:3),nrow=1), width = c(1.2,1,0.6),height = c(1)) f_grid= (sort( c( seq(-0.8, 6.6, length.out = 20) , log (c(40,50,60,80))))) par(oma=c(1.5,2.7,2,0), pty='m',mar=c(1,1.5,1,1) ,mgp=c(1.5,0.25,0), lwd=0.5,tck=-0.01, cex.axis=1, cex.lab=0.6, cex.main=0.7) plot(f_grid, colMeans(sss$change_grid), axes=F,ylab="",xlab="",type='l',col="#8F2727",lwd=1.5, ylim=c(-1.39,1.39 ) , xlim= log(c(0.9,1000)) , xaxs='i' ) axis(2, las=2,lwd=0.5,at=log(c(1/4, 1/2, 1,2, 4)), labels = c("1/4" , "1/2" ,"1\n no change" ,2, 4 ) ) abline(h=0,col= 'darkgreen', lwd=0.8 ) zz=apply(sss$change_grid, 2, quantile, c(0.975, 0.75, 0.25, 0.025)) polygon(x=c(f_grid,rev(f_grid)), y=c(zz[1,],rev(zz[4,] ) ), border=NA,col= adjustcolor( "#B97C7C", alpha.f = 0.45), xpd=F) polygon(x=c(f_grid,rev(f_grid)), y=c(zz[2,],rev(zz[3,] ) ), border=NA,col= adjustcolor( "#B97C7C", alpha.f = 0.45), xpd=F) axis(1, padj=-0.1,lwd=0.5,at=log(c(0.1,1,10,50,100,1000)), labels = c(expression(10^-1), 1 , 10, 50,expression(10^2),expression(10^3) ) ) lines(f_grid, colMeans(sss$change_grid),col="#8F2727",lwd=1.5, xpd=F) box(lwd=0.5,bty='l') abline(h=log(c(1/4,1/2,2 ,4)), lty=2, col='grey40') abline(v= log( c(10,50, 100)), lty=2, col='grey40') mtext(1, line=1.3,text="baseline As in 2000 (ppb)",cex=0.8) mtext(3, text="expected multiplicative change in 2014/15",cex=0.8,line=1) abline(v= ( c(100, 200,300, 400)), lty=2, col='grey40') plot(f_grid, ppb10, col=red_c[1], lwd=1.5, type='l' , axes=F,ylab="",xlab="",yaxs='i', xpd=T, ylim=c(0,1), xlim=log( c(1,800))) lines(f_grid, ppb50, col=red_c[2], xpd=T,lwd=1.5) lines(f_grid, ppb100, col=red_c[3], xpd=T,lwd=1.5) axis(1, padj=-0.1,lwd=0.5,at=log(c(0.1,1,10,50,100,1000)), labels = c(expression(10^-1), 1 , 10, 50,expression(10^2),expression(10^3) ) ) axis(2, las=2,lwd=0.5,at=c(0,0.5,1)) abline(h=c(0.25,0.5,0.75), lty=2, col='grey40') #text(x= c(1.8, 3.5, 5.5), y=c(0.45,0.54,0.55), col= red_c, labels = c(">10", ">50", ">100") , cex=1) text(x= (c(1.8, 3.6, 5.8)), y=c(0.45,0.54,0.55), col= red_c, labels = c(">10", ">50", ">100 ppb") , cex=1) mtext(1, line=1.3,text="baseline As in 2000 (ppb)",cex=0.8) mtext(3, text="probablity of excessing safe levels \n in 15 years",cex=0.8, line=0.5) abline(v= log( c(10,50, 100)), lty=2, col='grey40') abline(v= ( c(100,200,300,400)), lty=2, col='grey40') plot(c(0,0), axes=F,ylab="",xlab="",xaxs='i', xpd=T, xlim=c(0.5, 2.2), ylim=c(30,75)) points(c(1,2), y= c(mean( exp(sss2$mu0)) , mean( exp(sss2$mu) ) ), pch=18, col="#8F2727", cex=2) abline(h=c(30,40,50,60,70), col='gray40', lty=2) lines(x=c(1,1), y= as.vector( quantile( exp(sss2$mu0), c(0.975, 0.025) )), lwd=1, col="#B97C7C" ) lines(x=c(1,1), y= as.vector( quantile(exp(sss2$mu0), c(0.75, 0.25) )), lwd=3, col="#8F2727" ) lines(x=c(2,2), y= as.vector( quantile(exp(sss2$mu), c(0.975, 0.025) )), lwd=1, col="#B97C7C" ) lines(x=c(2,2), y= as.vector( quantile( exp(sss2$mu), c(0.75, 0.25) )), lwd=3, col="#8F2727" ) axis(2, las=2, at=c(30,50,70)) mtext(3, text="spatial mean",cex=0.8, line=0.5) text(x= c(1.3, 1.7), y=c(53,41), col= 1, labels = c(" 2000", "14-15 ") , cex=1) dev.off()
086c05399ac8cbd5398a1f7e7251b9db0ac33e05
3a5086aa8460c8253cf45a49be933be875bca99f
/cachematrix.R
5e436983545c201c81758278eae58c34e104af00
[]
no_license
MarinePGT/ProgrammingAssignment2
d6d7d882a58b26f92924cf5c7bc5de4a40ecd935
659b94f96ee05b69f7afea23a429b2ddac020594
refs/heads/master
2020-04-06T04:11:20.435325
2015-10-21T17:10:22
2015-10-21T17:10:22
44,611,405
0
0
null
2015-10-20T14:20:52
2015-10-20T14:20:52
null
UTF-8
R
false
false
1,574
r
cachematrix.R
### Coursera R programming ##Programming Assignment 2: Caching the Inverse of a Matrix # ======================================================== # (21st October 2015) # Author: M Pouget ## functions: # 1.makeCacheMatrix: fct creates "matrix" object that cache its inverse # 2.cacheSolve: fct computes inverse of the "matrix" returned by makeCacheMatrix # If inverse already calculated (and matrix not changed), # then cachesolve retrieves inverse from cache # set working directory setwd("C://Users/mpouget/Desktop/Coursera/assigment2") ### 1 makeCacheMatrix <- function(x = matrix()) { ## return: a list containing functions to ## 1. set the matrix ## 2. get the matrix ## 3. set the inverse ## 4. get the inverse inv <- NULL setmatrix <- function(y) { x <<- y inv <<- NULL } getmatrix = function() x setinverse = function(inverse) inv <<- inverse getinverse = function() inv list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse) } dump("makeCacheMatrix", file="makeCacheMatrix.R") ### 2 cacheSolve <- function (x=matrix(), ...) { ## return: inverse of the original matrix input to makeCacheMatrix() inv <- x$getinverse() if(!is.null(inv)){ message("getting cached data") return(inv) } y <- x$getmatrix() inv <- solve(y, ...) x$setinverse(inv) inv } dump("cacheSolve",file="cacheSolve.R") source("cachematrix.R") source("cacheSolve.R")
19e6a33dc6254bab3907f5b92c6605311a07574e
a1d2e832de47d6c00ff49ba47f02a61e9d157210
/RandomUserTest.R
0d6a3dff0590a9bc6015d9b1dbac11a295f48d08
[]
no_license
KelseyChetnik/MigrateTest
25ee73301ff9aab19f10661683f02ad65701ac87
823caa2a0460e4984a775ef5b6a38e0ace6f1295
refs/heads/master
2023-01-28T08:33:51.891568
2020-12-08T15:24:17
2020-12-08T15:24:17
318,604,223
0
1
null
2020-12-08T15:13:35
2020-12-04T18:32:53
R
UTF-8
R
false
false
78
r
RandomUserTest.R
# random user test print("I'm a random user and I want to change your code!")
f8d81f316a29cdabbb3a24f82c65aaf10a368781
72119643f835f5d00531048b647fe79a581748b3
/R/summary_stats.R
2f602694685114c602697a3ff0eb9d890c172020
[ "MIT" ]
permissive
Diaz-Oscar/tidycyte
e8dd78031569c8451b04bf1bc3fa3c4103654ba7
94a4933fefb57846eacc4e8ee0f9541461f5cd46
refs/heads/master
2023-08-20T22:38:54.789143
2021-10-08T04:29:38
2021-10-08T04:29:38
null
0
0
null
null
null
null
UTF-8
R
false
false
1,383
r
summary_stats.R
#' Calculate summary statistics for tidycyte data #' #' Gives n, mean, standard deviation, standard error of the mean, and confidence interval. #' #' @param .data A data frame containing tidycyte data. #' @param measurevar <[`data-masking`][dplyr::dplyr_data_masking]> The masked name of the column containing tidycyte values. Typically this will be named \code{value}. #' @param ... <[`data-masking`][dplyr::dplyr_data_masking]> The masked names of the grouping variables for summary statistics. Values sharing each of these parameters will be grouped together for calculating statistics. Typically will be \code{elapsed,treatment,cell,metric}. #' @param .ci Numeric value specifying the confidence interval range as a fraction. Defaults to 0.95. #' @param .na.rm A boolean that indicates whether to ignore \code{NA} values. Defaults to FALSE. #' @keywords tidycyte summary standard error statistics #' @export #' @examples #' df %>% summary_stats(value,elapsed,treatment,cell,metric,.ci = 0.95) summary_stats <- function(.data, measurevar, ..., .ci = 0.95, .na.rm = FALSE) { options(dplyr.summarise.inform = F) .data %>% group_by(...) %>% summarise(n = n(), mean = mean({{measurevar}}, na.rm = .na.rm), sd = sd({{measurevar}}, na.rm = .na.rm), se = sd/sqrt(n), ci = se * qt(.ci/2 + 0.5, n-1)) %>% ungroup() }
1620d330fc2ece4d3e1d3b84ebd7b46d6d1894a1
0d7a326479fbddc1f8405a49fb6dcdd7690e6165
/file23.R
09406f069005ff0b081ba9375fb4b5213e1281f3
[]
no_license
jinishashah1998/R-practice
0019ba6787f3ebf84a7d9594b7e7cd66ac062145
94d726a171c00e1b0c7b2334593883c36cd38528
refs/heads/master
2022-10-19T18:52:05.728503
2020-06-10T21:28:04
2020-06-10T21:28:04
null
0
0
null
null
null
null
UTF-8
R
false
false
1,676
r
file23.R
library(dslabs) library(tidyverse) set.seed(1986) #for R 3.5 or earlier #if using R 3.6 or later, use `set.seed(1986, sample.kind="Rounding")` instead n <- round(2^rnorm(1000, 8, 1)) set.seed(1) #for R 3.5 or earlier #if using R 3.6 or later, use `set.seed(1, sample.kind="Rounding")` instead mu <- round(80 + 2*rt(1000, 5)) range(mu) schools <- data.frame(id = paste("PS",1:1000), size = n, quality = mu, rank = rank(-mu)) schools %>% top_n(10, quality) %>% arrange(desc(quality)) set.seed(1) #for R 3.5 or earlier #if using R 3.6 or later, use `set.seed(1, sample.kind="Rounding")` instead mu <- round(80 + 2*rt(1000, 5)) scores <- sapply(1:nrow(schools), function(i){ scores <- rnorm(schools$size[i], schools$quality[i], 30) scores }) schools <- schools %>% mutate(score = sapply(scores, mean)) schools %>% top_n(10, score) %>% arrange(desc(score)) %>% select(id, size, score) schools %>% summarize(median(size)) schools %>% top_n(10, score) %>% summarize(median(size)) schools %>% top_n(-10, score) %>% summarize(median(size)) overall <- mean(sapply(scores, mean)) alpha <- 25 schools5 <- schools %>% mutate(score_dev = overall + (score - overall) * size / (size + alpha)) %>% arrange(desc(score_dev)) # mutate(quality_new = score_dev-80) schools5 %>% top_n(10, score_dev) overall <- mean(sapply(scores, mean)) alpha <- 135 schools5 <- schools %>% mutate(score_dev = overall + (score - overall) * size / (size + alpha)) %>% arrange(desc(score_dev)) # mutate(quality_new = score_dev-80) schools5 %>% top_n(10, score_dev)
8509fefe79a61d54ff58df4ea1b57ed98fcc8bac
3d328d07eac93537021760bfdf9d5afc1943fdfd
/ui.R
188aa4b04fe9ef1fdb8cfdb135ba032ebb9246c2
[]
no_license
fss14142/DevelopingDataProductsProject
b360de61fc65c2c4058aa3b468a44bf15fbfd9ae
fd62654c7ba6b71761a5553f0e818c53c3612eae
refs/heads/master
2021-01-18T13:58:26.744358
2014-11-23T10:51:38
2014-11-23T10:51:38
null
0
0
null
null
null
null
UTF-8
R
false
false
1,750
r
ui.R
require(shiny) require(rCharts) require(httr) source("global.R") shinyUI( #bootstrapPage( fluidPage( title = "Bird distribution data from eBird.", titlePanel("Migrating birds observations across the US, using data from Ebird."), sidebarLayout( sidebarPanel( #h5('See the App Documentation tab in the main panel.'), includeHTML("abouDocumentation.html"), h4('State selection:'), selectInput(inputId = 'stateSel', label = '', choices = sort(states$name), selected = 'Arkansas'), h4('Bird species selection:'), selectInput(inputId = 'speciesSel', label = '', choices = birdSpecies$comName, selected = birdSpecies$comName[1]), h4('Bird species selection:'), p('Only the observations from the past 30 days are available from Ebird. Use the slider to choose how many days you want.'), sliderInput(inputId = 'daysSel', label = 'Select a number of days', min = 1, max = 30, value = 7, step = 1), h4(''), imageOutput('birdPic', height=200) ), mainPanel( tabsetPanel( tabPanel("State summary and Map", textOutput('stateSummary'), mapOutput('map_container') ), tabPanel("App Documentation", includeMarkdown("documentation.Rmd")) #tabPanel("Table", tableOutput("table")) ) ) # mainPanel( # textOutput('stateSummary'), # mapOutput('map_container') # ) # ) ) ) )
4eb322a6a592457d9234c0ec5a65d3b73c223dd9
7172e02a32a1046ec1a2ec0a8631f0ae3d2dd761
/R/data_functions.R
1751a6690df5bce0682e4ec3e78c855a4e0f61d0
[ "MIT" ]
permissive
nps-jeff/OZAB
24358d5fbb17b0c50778b66f1fd2c9c2986441ad
f98d494630e4fb38903a09e32fd2aec6bcfb0441
refs/heads/master
2023-03-03T20:52:48.088345
2021-02-11T19:14:52
2021-02-11T19:14:52
337,487,343
0
0
NOASSERTION
2021-02-11T19:14:53
2021-02-09T17:46:01
null
UTF-8
R
false
false
2,686
r
data_functions.R
#' NPS Data Format Transformation #' #' @param nps_data Tibble containing NPS Formatted Data #' @param covariate_cols Columns which refer to covariates such as Year, Location, Grade, etc. #' #' @return Long format tibble #' @export #' #' @examples #' \dontrun{ #' pivot_nps_data(nps_data_ex, c(Year, Loc_Name, SCOSA)) #' } #' @importFrom rlang .data pivot_nps_data <- function(nps_data, covariate_cols = NULL){ tidyr::pivot_longer( data = nps_data, cols = -c({{ covariate_cols }}), names_to = 'Species', values_to = 'Cover Class' ) %>% dplyr::mutate( `Cover Class` = forcats::fct_rev(forcats::as_factor(.data$`Cover Class`)) ) } #' Add Presence Column #' #' @param df Tibble containing Cover Class Data #' @param cover_class_col Column Name for Cover Class Data #' @param absence_value Value for Absence -- Assumed to be Zero #' #' @return Provided tibble with column of logicals indicating presence / absence appended #' @export #' #' @examples #' @importFrom rlang .data add_presence <- function(df, cover_class_col = .data$`Cover Class`, absence_value = 0){ df %>% dplyr::mutate(Presence = ifelse({{ cover_class_col }} == absence_value, FALSE, TRUE)) } compose_ozab_data <- function(df, presence_formula, abundance_formula, cutpoint_scheme){ ## Check if response of presence_formula is in dataframe presence_response_col <- all.vars(presence_formula)[1] if(!(presence_response_col %in% names(df))){ stop(glue::glue('Response of presence-absence formula, { presence_response_col }, not found in provided data')) } ## Check if response of abundance_formula is in dataframe abundance_response_col <- all.vars(abundance_formula)[1] if(!(abundance_response_col %in% names(df))){ stop(glue::glue('Response of abundance formula, { abundance_response_col }, not found in provided data')) } ## Make sure a single column is not response for both abundance and presence formula if(presence_response_col == abundance_response_col){ stop('Response columns of abundance and presence-absence cannot be identical') } ## Make sure presence_column only has two levels ## TODO ## Data Composition y <- as.numeric(df[[presence_response_col]]) * as.numeric(df[[abundance_response_col]]) N <- length(y) presence_matrix <- as.matrix(modelr::model_matrix(df, presence_formula)) Kp <- ncol(presence_matrix) abundance_matrix <- as.matrix(modelr::model_matrix(df, abundance_formula)) Ka <- ncol(abundance_matrix) c <- cutpoint_scheme K <- length(c) + 1 list( N = N, K = K, c = c, y = y, Kp = Kp, Xp = presence_matrix, Ka = Ka, Xa = abundance_matrix ) }
43f93eaacc4f4576534cbaa1cab2ef440816fb94
7da7a83763b7c4b4ab9608ffec1599d30f673662
/R/race-wrapper.R
e3a23aa8275abe1ce78db5711663ba6843b808aa
[]
no_license
abeham/irace
57dfaf83b8c6891e01a5bbdcec7e7927fb40c254
90467edf1ac9fc3950b76780c5fba90c62c3d309
refs/heads/master
2020-03-14T03:19:24.959619
2017-08-03T16:34:51
2017-08-03T16:34:51
131,417,353
0
0
null
2018-04-28T14:39:37
2018-04-28T14:39:37
null
UTF-8
R
false
false
15,111
r
race-wrapper.R
# FIXME: This is needed because race.R is not divided in two-stages # run/evaluate like irace is, so there is no way to communicate data # from the first stage to the second. # # FIXME: In fact, we should use this trick also in irace.R to avoid # pass-by-copy-on-write of huge matrices and data.frames and instead # pass-by-reference an environment containing those. .irace <- new.env() buildCommandLine <- function(values, switches) { irace.assert(length(values) == length(switches)) command <- "" # FIXME: This probably can be implemented faster with apply() and # paste(collapse=" "). But how to get the index i in that case? for (i in seq_along(values)) { value <- values[i] if (!is.na(value)) { command <- paste0(command, " ", switches[i], value) } } return(command) } # This function tries to convert a, possibly empty, character vector into a # numeric vector. parse.output <- function(outputRaw, verbose) { if (verbose) { cat (outputRaw, sep = "\n") } # Initialize output as raw. If it is empty stays like this. output <- outputRaw # strsplit crashes if outputRaw == character(0) if (length(outputRaw) > 0) { output <- strsplit(trim(outputRaw), "[[:space:]]+")[[1]] } # suppressWarnings to avoid messages about NAs introduced by coercion output <- suppressWarnings (as.numeric (output)) return (output) } target.error <- function(err.msg, output, scenario, target.runner.call, target.evaluator.call = NULL) { if (!is.null(target.evaluator.call)) { err.msg <- paste0(err.msg, "\n", .irace.prefix, "The call to targetEvaluator was:\n", target.evaluator.call) } if (!is.null(target.runner.call)) { err.msg <- paste0(err.msg, "\n", .irace.prefix, "The call to targetRunner was:\n", target.runner.call) } if (is.null(output$outputRaw)) { # Message for a function call. # FIXME: Ideally, we should print the list as R would print it. output$outputRaw <- toString(output) advice.txt <- paste0( "This is not a bug in irace, but means that something failed in ", "a call to the targetRunner or targetEvaluator functions provided by the user.", " Please check those functions carefully.") } else { # Message for an external script. advice.txt <- paste0( "This is not a bug in irace, but means that something failed when", " running the command(s) above or they were terminated before completion.", " Try to run the command(s) above from the execution directory '", scenario$execDir, "' to investigate the issue.") } irace.error(err.msg, "\n", .irace.prefix, "The output was:\n", paste(output$outputRaw, collapse = "\n"), "\n", .irace.prefix, advice.txt) } check.output.target.evaluator <- function (output, scenario, target.runner.call = NULL) { if (!is.list(output)) { target.error ("The output of targetEvaluator must be a list", list(), scenario, target.runner.call = target.runner.call) return(NULL) } err.msg <- output$error if (is.null(err.msg)) { if (is.null(output$cost)) { err.msg <- "The output of targetEvaluator must be one number 'cost'!" } else if (is.na (output$cost)) { err.msg <- "The output of targetEvaluator is not numeric!" } else if (is.infinite(output$cost)) { err.msg <- "The output of targetEvaluator is not finite!" } } if (!is.null(err.msg)) { target.error (err.msg, output, scenario, target.runner.call = target.runner.call, target.evaluator.call = output$call) } } exec.target.evaluator <- function (experiment, num.configurations, all.conf.id, scenario, target.runner.call) { output <- .irace$target.evaluator(experiment, num.configurations, all.conf.id, scenario, target.runner.call) check.output.target.evaluator (output, scenario, target.runner.call = target.runner.call) return (output) } target.evaluator.default <- function(experiment, num.configurations, all.conf.id, scenario, target.runner.call) { configuration.id <- experiment$id.configuration instance.id <- experiment$id.instance seed <- experiment$seed instance <- experiment$instance execDir <- scenario$execDir debugLevel <- scenario$debugLevel targetEvaluator <- scenario$targetEvaluator if (as.logical(file.access(targetEvaluator, mode = 1))) { irace.error ("targetEvaluator", shQuote(targetEvaluator), "cannot be found or is not executable!\n") } cwd <- setwd (execDir) # FIXME: I think we don't even need to paste the args, since system2 handles this by itself.' args <- paste(configuration.id, instance.id, seed, instance, num.configurations, all.conf.id) output <- runcommand(targetEvaluator, args, configuration.id, debugLevel) setwd (cwd) cost <- time <- NULL err.msg <- output$error if (is.null(err.msg)) { v.output <- parse.output(output$output, verbose = (scenario$debugLevel >= 2)) if (length(v.output) != 1) { err.msg <- paste0("The output of targetEvaluator must be one number 'cost'!") } else { cost <- v.output[1] } } return(list(cost = cost, error = err.msg, outputRaw = output$output, call = paste(targetEvaluator, args))) } check.output.target.runner <- function (output, scenario) { if (!is.list(output)) { output <- list() err.msg <- paste0("The output of targetRunner must be a list") target.error (err.msg, output, scenario, target.runner.call = NULL) return(output) } err.msg <- output$error if (is.null(err.msg)) { if (is.null.or.na(output$cost)) output$cost <- NULL if (is.null.or.na(output$time)) output$time <- NULL # When targetEvaluator is provided targetRunner must return only the time. if (!is.null(.irace$target.evaluator)) { if (scenario$maxTime > 0 && is.null(output$time)) { err.msg <- paste0("The output of targetRunner must be one number 'time'!") } else if (!is.null(output$cost)) { err.msg <- paste0("The output of targetRunner must be empty or just one number 'time'!") } } else if (scenario$maxTime > 0 && (is.null (output$cost) || is.null(output$time))) { err.msg <- paste0("The output of targetRunner must be two numbers 'cost time'!") } else if (scenario$maxExperiments > 0 && is.null (output$cost)) { err.msg <- paste0("The output of targetRunner must be one number 'cost'!") } else if (!is.null(output$time) && output$time < 0) { err.msg <- paste0("The value of time returned by targetRunner cannot be negative (", output$time, ")!") } if (!is.null (output$cost)) { if (is.na(output$cost)) { err.msg <- paste0("The cost returned by targetRunner is not numeric!") } else if (is.infinite(output$cost)) { err.msg <- paste0("The cost returned by targetRunner is not finite!") } } if (!is.null (output$time)) { if (is.na(output$time)) { err.msg <- paste0("The time returned by targetRunner is not numeric!") } else if (is.infinite(output$time)) { err.msg <- paste0("The time returned by targetRunner is not finite!") } } # Fix too small time. output$time <- if (is.null(output$time)) NA else max(output$time, 0.01) } if (!is.null(err.msg)) { target.error (err.msg, output, scenario, target.runner.call = output$call) } return (output) } # This function invokes target.runner. When used on a remote node by Rmpi, # environments do not seem to be shared and the default value is evaluated too # late, thus we have to pass .irace$target.runner explicitly. exec.target.runner <- function(experiment, scenario, target.runner = .irace$target.runner) { doit <- function(experiment, scenario) { x <- target.runner(experiment, scenario) return (check.output.target.runner (x, scenario)) } retries <- scenario$targetRunnerRetries while (retries > 0) { output <- try (doit(experiment, scenario)) if (!inherits(output, "try-error") && is.null(output$error)) { return (output) } irace.note("Retrying (", retries, " left).\n") retries <- retries - 1 } output <- doit(experiment, scenario) return (output) } target.runner.default <- function(experiment, scenario) { debugLevel <- scenario$debugLevel configuration.id <- experiment$id.configuration instance.id <- experiment$id.instance seed <- experiment$seed configuration <- experiment$configuration instance <- experiment$instance switches <- experiment$switches targetRunner <- scenario$targetRunner if (as.logical(file.access(targetRunner, mode = 1))) { irace.error ("targetRunner ", shQuote(targetRunner), " cannot be found or is not executable!\n") } args <- paste(configuration.id, instance.id, seed, instance, buildCommandLine(configuration, switches)) output <- runcommand(targetRunner, args, configuration.id, debugLevel) cost <- time <- NULL err.msg <- output$error if (is.null(err.msg)) { v.output <- parse.output(output$output, verbose = (scenario$debugLevel >= 2)) if (length(v.output) > 2) { err.msg <- paste0("The output of targetRunner should not be more than two numbers!") } else if (length(v.output) == 1) { if (!is.null(scenario$targetEvaluator)) { time <- v.output[1] } else { cost <- v.output[1] } } else if (length(v.output) == 2) { cost <- v.output[1] time <- v.output[2] } } return(list(cost = cost, time = time, error = err.msg, outputRaw = output$output, call = paste(targetRunner, args))) } execute.experiments <- function(experiments, scenario) { parallel <- scenario$parallel mpi <- scenario$mpi execDir <- scenario$execDir if (!isTRUE (file.info(execDir)$isdir)) { irace.error ("Execution directory '", execDir, "' is not found or not a directory\n") } cwd <- setwd (execDir) on.exit(setwd(cwd), add = TRUE) target.output <- vector("list", length(experiments)) if (!is.null(scenario$targetRunnerParallel)) { # User-defined parallelization target.output <- scenario$targetRunnerParallel(experiments, exec.target.runner, scenario = scenario) } else if (scenario$batchmode != 0) { target.output <- cluster.lapply (experiments, scenario = scenario) } else if (parallel > 1) { if (mpi) { if (scenario$loadBalancing) { target.output <- Rmpi::mpi.applyLB(experiments, exec.target.runner, scenario = scenario, target.runner = .irace$target.runner) } else { # Without load-balancing, we need to split the experiments into chunks # of size parallel. target.output <- unlist(use.names = FALSE, tapply(experiments, ceiling(1:length(experiments) / parallel), Rmpi::mpi.apply, exec.target.runner, scenario = scenario, target.runner = .irace$target.runner)) } # FIXME: if stop() is called from mpi.applyLB, it does not # terminate the execution of the parent process, so it will # continue and give more errors later. We have to terminate # here, but is there a nicer way to detect this and terminate? if (any(sapply(target.output, inherits, "try-error"))) { # FIXME: mclapply has some bugs in case of error. In that # case, each element of the list does not keep the output of # each configuration and repetitions may occur. cat(unique(unlist(target.output[sapply( target.output, inherits, "try-error")])), file = stderr(), sep = "") irace.error("A slave process terminated with a fatal error") } } else { if (.Platform$OS.type == 'windows') { irace.assert(!is.null(.irace$cluster)) if (scenario$loadBalancing) { target.output <- parallel::parLapplyLB(.irace$cluster, experiments, exec.target.runner, scenario = scenario) } else { target.output <- parallel::parLapply(.irace$cluster, experiments, exec.target.runner, scenario = scenario) } # FIXME: if stop() is called from parLapply, then the parent # process also terminates, and we cannot give further errors. } else { target.output <- parallel::mclapply(experiments, exec.target.runner, # FALSE means load-balancing. mc.preschedule = !scenario$loadBalancing, mc.cores = parallel, scenario = scenario) # FIXME: if stop() is called from mclapply, it does not # terminate the execution of the parent process, so it will # continue and give more errors later. We have to terminate # here, but is there a nicer way to detect this and terminate? if (any(sapply(target.output, inherits, "try-error")) || any(sapply(target.output, is.null))) { # FIXME: mclapply has some bugs in case of error. In that # case, each element of the list does not keep the output of # each configuration and repetitions may occur. cat(unique(unlist( target.output[sapply( target.output, inherits, "try-error")])), file = stderr()) irace.error("A child process triggered a fatal error") } } } } else { # One process, all sequential for (k in seq_along(experiments)) { target.output[[k]] <- exec.target.runner(experiments[[k]], scenario = scenario) } } return(target.output) } execute.evaluator <- function(experiments, scenario, target.output, configurations.id) { ## FIXME: We do not need the configurations.id argument: # configurations.id <- sapply(experiments, function(x) x[["id.configuration"]]) all.conf.id <- paste(configurations.id, collapse = " ") ## Evaluate configurations sequentially for (k in seq_along(experiments)) { output <- exec.target.evaluator(experiment = experiments[[k]], num.configurations = length(configurations.id), all.conf.id, scenario = scenario, target.runner.call = target.output[[k]]$call) target.output[[k]]$cost <- output$cost if (is.null(target.output[[k]]$call)) target.output[[k]]$call <- output$call if (is.null(target.output[[k]]$time)) target.output[[k]]$time <- output$time } return(target.output) }
72e60a756b692289faaabd33d356a39ec26d7d5b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/naniar/examples/miss_scan_count.Rd.R
66a23ae926977e401c8e1e6ef158c0f32d7439d4
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
564
r
miss_scan_count.Rd.R
library(naniar) ### Name: miss_scan_count ### Title: Search and present different kinds of missing values ### Aliases: miss_scan_count ### ** Examples dat_ms <- tibble::tribble(~x, ~y, ~z, 1, "A", -100, 3, "N/A", -99, NA, NA, -98, -99, "E", -101, -98, "F", -1) miss_scan_count(dat_ms,-99) miss_scan_count(dat_ms,c(-99,-98)) miss_scan_count(dat_ms,c("-99","-98","N/A")) miss_scan_count(dat_ms,common_na_strings)
1f500f6d35237a9aff0f851e8cacf51af2261228
c77069c2dc6dbf3f9449a44e06d70b540a1912b5
/man/fitRMU_MHmcmc.Rd
aa93adcdbd305dca7baf378e841639fe90549668
[]
no_license
cran/phenology
62b323a9231c3701568de58c57a804e043abe6a2
991d2c35dcbcf1fcff23cbcc0c2f82b74a868dfb
refs/heads/master
2023-04-15T03:37:51.464388
2023-04-01T09:10:02
2023-04-01T09:10:02
17,698,504
1
0
null
null
null
null
UTF-8
R
false
true
4,452
rd
fitRMU_MHmcmc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitRMU_MHmcmc.R \name{fitRMU_MHmcmc} \alias{fitRMU_MHmcmc} \title{Run the Metropolis-Hastings algorithm for RMU.data} \usage{ fitRMU_MHmcmc( result = stop("An output from fitRMU() must be provided"), n.iter = 10000, parametersMCMC = stop("A parameter set from fitRMU_MHmcmc_p() must be provided"), n.chains = 1, n.adapt = 0, thin = 1, adaptive = FALSE, adaptive.lag = 500, adaptive.fun = function(x) { ifelse(x > 0.234, 1.3, 0.7) }, trace = FALSE, traceML = FALSE, intermediate = NULL, filename = "intermediate.Rdata", previous = NULL ) } \arguments{ \item{result}{An object obtained after a SearchR fit} \item{n.iter}{Number of iterations for each step} \item{parametersMCMC}{A set of parameters used as initial point for searching with information on priors} \item{n.chains}{Number of replicates} \item{n.adapt}{Number of iterations before to store outputs} \item{thin}{Number of iterations between each stored output} \item{adaptive}{Should an adaptive process for SDProp be used} \item{adaptive.lag}{Lag to analyze the SDProp value in an adaptive content} \item{adaptive.fun}{Function used to change the SDProp} \item{trace}{TRUE or FALSE or period, shows progress} \item{traceML}{TRUE or FALSE to show ML} \item{intermediate}{Period for saving intermediate result, NULL for no save} \item{filename}{If intermediate is not NULL, save intermediate result in this file} \item{previous}{Previous result to be continued. Can be the filename in which intermediate results are saved.} } \value{ A list with resultMCMC being mcmc.list object, resultLnL being likelihoods and parametersMCMC being the parameters used } \description{ Run the Metropolis-Hastings algorithm for RMU.data.\cr The number of iterations is n.iter+n.adapt+1 because the initial likelihood is also displayed.\cr I recommend thin=1 because the method to estimate SE uses resampling.\cr As initial point is maximum likelihood, n.adapt = 0 is a good solution.\cr The parameters intermediate and filename are used to save intermediate results every 'intermediate' iterations (for example 1000). Results are saved in a file of name filename.\cr The parameter previous is used to indicate the list that has been save using the parameters intermediate and filename. It permits to continue a mcmc search.\cr These options are used to prevent the consequences of computer crash or if the run is very very long and computer processes at time limited.\cr } \details{ fitRMU_MHmcmc runs the Metropolis-Hastings algorithm for RMU.data (Bayesian MCMC) } \examples{ \dontrun{ library("phenology") RMU.names.AtlanticW <- data.frame(mean=c("Yalimapo.French.Guiana", "Galibi.Suriname", "Irakumpapy.French.Guiana"), se=c("se_Yalimapo.French.Guiana", "se_Galibi.Suriname", "se_Irakumpapy.French.Guiana")) data.AtlanticW <- data.frame(Year=c(1990:2000), Yalimapo.French.Guiana=c(2076, 2765, 2890, 2678, NA, 6542, 5678, 1243, NA, 1566, 1566), se_Yalimapo.French.Guiana=c(123.2, 27.7, 62.5, 126, NA, 230, 129, 167, NA, 145, 20), Galibi.Suriname=c(276, 275, 290, NA, 267, 542, 678, NA, 243, 156, 123), se_Galibi.Suriname=c(22.3, 34.2, 23.2, NA, 23.2, 4.3, 2.3, NA, 10.3, 10.1, 8.9), Irakumpapy.French.Guiana=c(1076, 1765, 1390, 1678, NA, 3542, 2678, 243, NA, 566, 566), se_Irakumpapy.French.Guiana=c(23.2, 29.7, 22.5, 226, NA, 130, 29, 67, NA, 15, 20)) cst <- fitRMU(RMU.data=data.AtlanticW, RMU.names=RMU.names.AtlanticW, colname.year="Year", model.trend="Constant", model.SD="Zero") pMCMC <- fitRMU_MHmcmc_p(result=cst, accept=TRUE) fitRMU_MCMC <- fitRMU_MHmcmc(result = cst, n.iter = 10000, parametersMCMC = pMCMC, n.chains = 1, n.adapt = 0, thin = 1, trace = FALSE) } } \seealso{ Other Fill gaps in RMU: \code{\link{CI.RMU}()}, \code{\link{fitRMU_MHmcmc_p}()}, \code{\link{fitRMU}()}, \code{\link{logLik.fitRMU}()}, \code{\link{plot.fitRMU}()} } \author{ Marc Girondot } \concept{Fill gaps in RMU}
98c5a17ad3db89f02efe36fb614f78c601da3d9b
a32d706b41bdce3e777a157d4aafeefbf4f01237
/GeneraAnalysis.R
dc25d64a84c5f57099ad8b6f73f6ec6c44294063
[]
no_license
levisimons/SCCWRP
8873f3cca4b780f4ca8dcde52442c03e817323dd
240a162517ed743842a0ad327132aeec87434700
refs/heads/master
2022-12-15T21:03:21.785069
2022-12-05T20:45:16
2022-12-05T20:45:16
110,313,993
3
1
null
null
null
null
UTF-8
R
false
false
8,287
r
GeneraAnalysis.R
library("plyr") library(dplyr) library("ggplot2") library(lubridate) library("ape") library("vegan") library("microbiome") library(data.table) library(tidyr) library(ggplot2) library(vegan) library(scales) library(grid) library(reshape2) library(phyloseq) library(reshape2) setwd("~/Desktop/SCCWRP") #Read in site data containing biological counts, water chemistry, and land usage values. GISBioData <- read.table("CAGISBioData.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE) #Filter out to taxonomic groups of interest. GISBioData <- subset(GISBioData, MeasurementType == "benthic macroinvertebrate relative abundance") #Remove duplicate measures. GISBioData <- GISBioData[!duplicated(GISBioData[,c("UniqueID","FinalID","Count")]),] #Read in sample metadata. SCCWRP <- read.table("CSCI.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE) #Merge in altitude. GISBioData <- join(GISBioData,SCCWRP[,c("UniqueID","altitude")],by=c("UniqueID")) #Add the number of genera per sample taxaBySample <- count(GISBioData,UniqueID) colnames(taxaBySample) <- c("UniqueID","nTaxa") GISBioData <- join(GISBioData,taxaBySample,by=c("UniqueID")) #Add the number of samples per watershed WSBySample <- as.data.frame(table(SCCWRP$Watershed)) colnames(WSBySample) <- c("Watershed","NSamples") GISBioData <- join(GISBioData,WSBySample,by=c("Watershed")) #Read in watershed metadata #HUC2 = State level, HUC4 = super-regional level, HUC6 = regional level, HUC8 = local level Watersheds <- read.table("SCCWRPWatersheds.tsv", header=TRUE, sep="\t",as.is=T,skip=0,fill=TRUE,check.names=FALSE) Watersheds$Watershed <- gsub('\\s+', '', Watersheds$Watershed) #Merge in watershed area GISBioData <- join(GISBioData,Watersheds,by=c("Watershed")) #Read in functional feeding group for each taxon. #Abbreviations used in denoting functional feeding groups are as follows ( http://www.safit.org/Docs/CABW_std_taxonomic_effort.pdf ): #P= predator MH= macrophyte herbivore OM= omnivore #PA= parasite PH= piercer herbivore XY= xylophage (wood eater) #CG= collector-gatherer SC= scraper #CF= collector filterer SH= shredder FFG <- read.table("metadata.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE) # Filter data so only known functional feeding groups are kept. FFG <- subset(FFG, FunctionalFeedingGroup != "") # Generate functional feeding group data frame. FFG <- FFG[,c("FinalID","LifeStageCode","FunctionalFeedingGroup")] FFG <- subset(FFG,LifeStageCode=="L" | LifeStageCode=="X" | FinalID=="Hydrophilidae" | FinalID=="Hydraenidae") FFG <- FFG[!duplicated(FFG$FinalID),] #Merge in functional feeding groups into sample data. GISBioData <- join(GISBioData,FFG[,c("FinalID","FunctionalFeedingGroup")],by=c("FinalID")) FFGCounts <- na.omit(as.data.frame(unique(GISBioData$FunctionalFeedingGroup))) colnames(FFGCounts) <- c("FunctionalFeedingGroup") FFGCounts$FunctionalFeedingGroup <- as.character(as.factor(FFGCounts$FunctionalFeedingGroup)) FFGCounts <- arrange(FFGCounts,FunctionalFeedingGroup) #Add column containing the sum of taxa, by functional feeding groups, within each sample. tmp <- GISBioData[,c("UniqueID","FunctionalFeedingGroup","Count")] colnames(tmp) <- c("UniqueID","FunctionalFeedingGroup","FFGCount") tmp <- aggregate(tmp$FFGCount, by=list(Category=tmp$UniqueID,tmp$FunctionalFeedingGroup), FUN=sum) colnames(tmp) <- c("UniqueID","FunctionalFeedingGroup","FFGCount") tmp <- arrange(tmp,UniqueID,FunctionalFeedingGroup) GISBioData <- join(GISBioData,tmp,by=c("UniqueID","FunctionalFeedingGroup")) #Find watersheds with a larger enough set of samples for downstream analysis. sampleMin <- 25 GISBioDataLWS <- subset(GISBioData,NSamples>=sampleMin) selected <- GISBioDataLWS #Get unique genera for the heavily sampled statewide data set. GeneraCounts <- na.omit(as.data.frame(unique(selected$FinalID))) colnames(GeneraCounts) <- c("FinalID") tmp <- selected[,c("UniqueID","FinalID","Count")] #Construct a table of counts by genera in a Phyloseq otu table format otudata <- arrange(GeneraCounts,FinalID) for(ID in unique(selected$UniqueID)){ tmp2 <- subset(tmp,UniqueID==ID) tmp2 <- tmp2[,c("FinalID","Count")] colnames(tmp2) <- c("FinalID",ID) tmp2 <- merge(tmp2,GeneraCounts,by=c("FinalID"),all=TRUE) otudata <- cbind(otudata,tmp2) otudata <- otudata[,!duplicated(colnames(otudata))] } #Create Phyloseq object with the OTU table, sample factors, and taxonomic data. otumat <- t(otudata) colnames(otumat) <- otumat[c(1),] otumat <- otumat[-c(1),] otumat[is.na(otumat)] <- 0 otumat <- as.data.frame(otumat) sampleNames <- rownames(otumat) otumat <- sapply(otumat,as.numeric) rownames(otumat) <- sampleNames OTU = otu_table(otumat,taxa_are_rows = FALSE) taxmat <- as.matrix(GeneraCounts) rownames(taxmat) <- as.data.frame(taxmat)$FinalID TAX = tax_table(taxmat) samplemat <- selected[,c("UniqueID","HUC2","HUC4","HUC6","HUC8","LU_2000_5K","Year","altitude")] samplemat <- samplemat[!duplicated(samplemat),] samplemat$HUC2 <- as.factor(samplemat$HUC2) samplemat$HUC4 <- as.factor(samplemat$HUC4) samplemat$HUC6 <- as.factor(samplemat$HUC6) samplemat$HUC8 <- as.factor(samplemat$HUC8) samplemat$LU_2000_5K <- as.numeric(as.character(samplemat$LU_2000_5K)) samplemat$Year <- as.factor(samplemat$Year) samplemat$altitude <- as.numeric(as.character(samplemat$altitude)) row.names(samplemat) <- samplemat$UniqueID sampledata <- sample_data(samplemat) physeq <- phyloseq(OTU,TAX,sampledata) #What factors are driving variations in alpha and beta diversity? #Samples are comprised of the relative abundances of taxa by genera. set.seed(1) test <- subset_samples(physeq) #test <- transform_sample_counts(test, function(x) x/sum(x)) testDF <- as(sample_data(test), "data.frame") testDF$HUC2 <- as.factor(testDF$HUC2) testDF$HUC4 <- as.factor(testDF$HUC4) testDF$HUC6 <- as.factor(testDF$HUC6) testDF$HUC8 <- as.factor(testDF$HUC8) levels(alphaDF$HUC8) <- unique(alphaDF$HUC8) alphaDF <- GISBioData[,c("UniqueID","HUC2","HUC4","HUC6","HUC8","altitude","LU_2000_5K","Year","nTaxa","CSCI")] alphaDF <- alphaDF[!duplicated(alphaDF),] alphaDF$HUC2 <- as.factor(alphaDF$HUC2) alphaDF$HUC4 <- as.factor(alphaDF$HUC4) alphaDF$HUC6 <- as.factor(alphaDF$HUC6) alphaDF$HUC8 <- as.factor(alphaDF$HUC8) levels(alphaDF$HUC8) <- unique(alphaDF$HUC8) summary(aov(nTaxa~HUC2+HUC4+HUC6+HUC8+altitude+LU_2000_5K+Year,data=alphaDF)) testBeta <- adonis(distance(test,method="jaccard") ~ HUC2+HUC4+HUC6+HUC8+altitude+LU_2000_5K+Year, data=testDF, permutations=1000) testBeta #Determine land use deciles for the full state data set. LUdf <- GISBioData[,c("UniqueID","LU_2000_5K")] LUdf <- LUdf[!duplicated(LUdf),] LUquantile <- quantile(LUdf$LU_2000_5K,probs=seq(0,1,0.1)) #Determine altitude deciles for the full state data set. Altitudedf <- GISBioData[,c("UniqueID","altitude")] Altitudedf <- Altitudedf[!duplicated(Altitudedf),] Altitudequantile <- quantile(Altitudedf$altitude,probs=seq(0,1,0.1)) #Compute trends in beta diversity groups of samples set by #altitude, year, and land use. GeneraTrends <- data.frame() for(i in 1:length(LUquantile)){ if(i>1){ LULow <- LUquantile[i-1] LUHigh <- LUquantile[i] MidLU <- 0.5*(LULow+LUHigh) for(j in 1:length(Altitudequantile)){ if(j>1){ AltitudeLow <- Altitudequantile[j-1] AltitudeHigh <- Altitudequantile[j] MidAltitude <- 0.5*(AltitudeLow+AltitudeHigh) sampleDF <- subset(GISBioData,LU_2000_5K >= LULow & LU_2000_5K <= LUHigh & altitude >= AltitudeLow & altitude <= AltitudeHigh) if(length(unique(sampleDF$UniqueID))>2){ physeqLUYear <- subset_samples(physeq,LU_2000_5K >= LULow & LU_2000_5K <= LUHigh & altitude >= AltitudeLow & altitude <= AltitudeHigh) if(nrow(otu_table(physeqLUYear)) > 2){ sampleBeta <- distance(physeqLUYear,method="bray") print(paste(MidLU,MidAltitude,length(unique(sampleDF$UniqueID)),mean(sampleBeta),sd(sampleBeta))) row <- t(as.data.frame(c(MidLU,MidAltitude,length(unique(sampleDF$UniqueID)),mean(sampleBeta),sd(sampleBeta)))) GeneraTrends <- rbind(row,GeneraTrends) } } } } } } colnames(GeneraTrends) <- c("MidLU","MidAltitude","NSamples","MeanBeta","SDBeta") rownames(GeneraTrends) <- 1:nrow(GeneraTrends)
f10905bc35c1f8b60eb00e2628b3c853ad9be799
b51a4d510afd3aa852e446290d7bba9695ae8e72
/products/ui.R
36cbcafa875999ec710551bf3dd947fd8aca183b
[ "MIT" ]
permissive
sharksmhi/SHARK4R
8ed9e733b3cab91862401cc233d1fb503187d26d
acb28d26660f7daa45c7f2395329a67e038534dd
refs/heads/master
2023-08-29T23:07:56.765132
2021-10-25T06:25:23
2021-10-25T06:25:23
270,549,422
0
0
null
null
null
null
UTF-8
R
false
false
508
r
ui.R
library(SHARK4R) library(shiny) library(htmltools) library(leaflet) library(DT) library(rmarkdown) shinyUI(pageWithSidebar( headerPanel("SHARK4R QC"), sidebarPanel( helpText(), fileInput('file1', 'Choose ZIP File', accept=c('.zip')), tags$hr(), leafletOutput("mymap"), tags$hr(), downloadButton('report', 'Generate report') ), mainPanel( DT::DTOutput(outputId = "table") ) ))
369e0605cb9a5238633818b75548f72ca99b9ec2
da44d6b825853e76fa449ff1d477c8c4b4748f84
/man/vizectionExampleGenes.Rd
7ecf668059b7199fb3c38374635c51935048a131
[]
no_license
shamansim/Vizection
fde7f22552cf1bcb487dcb2a50d72d2c0064fb59
37246952e43d2a1a9132087ba8b92718ddf625e8
refs/heads/master
2020-12-25T14:38:25.793023
2017-10-11T14:43:51
2017-10-11T14:43:51
64,266,012
0
2
null
2017-10-11T14:44:05
2016-07-27T01:12:26
R
UTF-8
R
false
true
521
rd
vizectionExampleGenes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vizectionServerFunctions.R \name{vizectionExampleGenes} \alias{vizectionExampleGenes} \title{vizection example \dQuote{genes} table} \usage{ vizectionExampleGenes() } \description{ Toy example for the \dQuote{genes} table to be used in vizection. } \details{ This is for debugging purposes; the example does not reflect well the kind of real data that is expected. } \examples{ summary(vizectionExampleGenes()) head(vizectionExampleGenes()) }
7c965ccc9e7c7780c676dcb12953fe40417d76b1
644c06fefbf06d8b6b96e7ca4e9f39607b9dc83a
/plot1.R
a6538f2707002c1a1b31b9c258a8ada7a3277d64
[]
no_license
Wil1999/ExData_Plotting1
162984a609e1f235fbcdd13f348573253edc1abd
73c9c77ada0f227daecf052857e4e40db44f0598
refs/heads/master
2022-12-02T15:12:22.522399
2020-08-17T00:10:30
2020-08-17T00:10:30
287,833,174
0
0
null
2020-08-15T22:17:19
2020-08-15T22:17:18
null
UTF-8
R
false
false
642
r
plot1.R
# Unzip the data fileurl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileurl,destfile = "data.zip",method = "curl") unzip("data.zip") # Loading and tyding data power <- read.table("household_power_consumption.txt",sep = ";", header = TRUE, stringsAsFactors = FALSE, dec = ".") # Loading of the subset subpower <- power[power$Date %in% c("1/2/2007","2/2/2007"),] gactivep <- as.numeric(subpower$Global_active_power) # Plots the data "Global Active Power" png("plot1.png") hist(gactivep,col = "red",xlab = "Global Active Power (kilowatts)",main = "Global Active Power" ) dev.off()
e46a67c7cfa92518d775eee777affa301bfc8a5a
1a19c6916b7171f8e893d89bded8766d10936719
/man/fars_summarize_years.Rd
4239d891e61f98fd9a2c054a7e2bb8004f4593d9
[]
no_license
richcmwang/Fars
b7604e3626dde8555c57e84ad8c45d69fada5b4d
40912d938e13b1b5060701286fe62e33cb8cec2d
refs/heads/master
2021-01-22T11:20:51.788174
2017-05-29T21:57:38
2017-05-29T21:57:38
92,684,096
0
0
null
null
null
null
UTF-8
R
false
true
708
rd
fars_summarize_years.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars_functions.R \name{fars_summarize_years} \alias{fars_summarize_years} \title{Show a summary of accidents during given years} \usage{ fars_summarize_years(years) } \arguments{ \item{years}{a vector of integers} } \value{ a tibble showing summary of number of accidents by month for each input year } \description{ Show a summary of accidents during given years } \note{ "dplyr" package is required. If input years contains a year where no data exists, then the function will returns summary for valid years and a warning of invalid year. error and warning message are returned when the list of years contains no valid year. }
79cacd0351bd26bb47c4422fe136b446a2887dd2
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/solvebio/examples/DatasetExport.delete.Rd.R
3516f347ba9fd4916b5e8f8bd5bc259ea15a23fe
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
199
r
DatasetExport.delete.Rd.R
library(solvebio) ### Name: DatasetExport.delete ### Title: DatasetExport.delete ### Aliases: DatasetExport.delete ### ** Examples ## Not run: ##D DatasetExport.delete(<ID>) ## End(Not run)
047e069ab7e2067520ce062f12657e35659d0ee5
12eab6d1208f29f6a4210cee0a712e8304240814
/mrds/R/is.linear.logistic.R
003e503dd87ce1649806402b6c773c4262c090c1
[]
no_license
jlaake/mrds
c8920ffdd91c411f2bfb91a15abd786f8b0b1d0e
64bb11a5682a2fd56611cef9860be476ea5f14be
refs/heads/master
2021-01-18T06:25:57.563386
2013-12-18T19:13:48
2013-12-18T19:13:48
2,009,699
1
2
null
null
null
null
UTF-8
R
false
false
1,705
r
is.linear.logistic.R
#' Collection of functions for logistic detection functions #' #' These functions are used to test whether a logistic detection function is a #' linear function of distance (\code{is.linear.logistic}) or is constant #' (varies by distance but no other covariates) \code{is.logistic.constant}). #' Based on these tests, the most appropriate manner for integrating the #' detection function with respect to distance is chosen. The integrals are #' needed to estimate the average detection probability for a given set of #' covariates. #' #' If the logit is linear in distance then the integral can be computed #' analytically. If the logit is constant or only varies by distance then only #' one integral needs to be computed rather than an integral for each #' observation. #' #' @param xmat data matrix #' @param g0model logit model #' @param zdim number of columns in design matrix #' @param width transect width #' @return Logical TRUE if condition holds and FALSE otherwise #' @author Jeff Laake #' @keywords utility is.linear.logistic <- function(xmat,g0model,zdim,width){ xmat$distance <- rep(width/2, nrow(xmat)) beta <- rep(1,zdim) logit1 <- mean(beta %*% t(setcov(xmat, g0model)$cov)) xmat$distance <- rep(width, nrow(xmat)) logit2 <- mean(beta %*% t(setcov(xmat, g0model)$cov)) xmat$distance <- rep(0, nrow(xmat)) logit0 <- mean( beta %*% t(setcov(xmat, g0model)$cov)) if(logit1-logit0==0){ integral.numeric <- FALSE }else if((logit2-logit0)/(logit1-logit0) <= 2.00001 & (logit2-logit0)/(logit1-logit0) >= 1.99999){ integral.numeric <- FALSE }else{ integral.numeric <- TRUE } return(integral.numeric) }
5044d1b51604e7e5dc8c2aa2318880755e589a59
8466ef1c1a8697e0409c8d2894ef74644d00f155
/packages.R
a852f8270792e52ddcfa315cec6c977baf9eee4d
[]
no_license
MilesMcBain/carbon_au
1b17df25dbb44ea5fbc91268f1007630cb806850
c03044830818a1551f395e19be27f7e17482590d
refs/heads/master
2020-12-14T07:59:09.785595
2020-03-06T10:12:55
2020-03-06T10:12:55
234,681,059
0
0
null
null
null
null
UTF-8
R
false
false
264
r
packages.R
## library() calls go here library(conflicted) library(dotenv) library(drake) library(rvest) library(tidyverse) library(stringr) library(curl) library(glue) library(pdftools) conflict_prefer("pluck", "purrr") conflict_prefer("filter", "dplyr") library(rmarkdown)
058c856942ff19555071ea6dfa8eb5bb269dbec1
fb0f8dacbfc6f7afb90d5f1a5da9626119104697
/man/scmap.Rd
c2cbd439edcf7e0d81519840d38ece26575d3487
[]
no_license
dewittpe/sccm
1777cebddb4042843db85a8a10eea099bddb24a4
dfbd83862461fd59116ee3b6a05d75319c0fd9a8
refs/heads/master
2021-06-18T01:27:49.423285
2021-02-18T19:11:12
2021-02-18T19:11:12
67,158,240
2
0
null
null
null
null
UTF-8
R
false
true
1,060
rd
scmap.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scmap.R \name{scmap} \alias{scmap} \title{Schwarz-Christoffel Mapping (Disk to Polygon)} \usage{ scmap(x, wc, nptsq = 12) } \arguments{ \item{x}{\code{sccm_ch} or \code{sccm_pg}, object.} \item{wc}{the center of the polygon, i.e., where the origin will be mapped to. The default is c(mean(x), mean(y))} \item{nptsq}{the number of points per subinterval used in Gauss-Jacobi quadrature.} } \value{ an object } \description{ Generates the Schwarz-Christoffel mapping from the unit disk to the provided 2D polygon. } \details{ These functions might be used later to allow end users to have more control over the mmappings. Until then, it would be best for end users to use \code{\link{d2p}} for disk to polygon, \code{\link{p2d}} for polygon to disk, and \code{\link{p2p}} for mapping one polygon to another via a disk. } \seealso{ \code{\link{d2p}} for disk to polygon, \code{\link{p2d}} for polygon to disk, and \code{\link{p2p}} for mapping one polygon to another via a disk. }
82af3b78e696aedca5ee8ace08cdf7bee20220e2
6d734eeb3982656c23de22c7a0d2d8f9902417bb
/analysis/crop_field_extent.R
e1f6373a27dfc579e654aa413b1ca6b34be1e9d2
[]
no_license
weecology/TreeSegmentation
cadc8fa79dd5e5b24a1f4b898ce709134c5461af
cf10386eaf0d5da286a855d7484d57918d607d29
refs/heads/master
2022-10-21T07:49:52.103033
2022-10-07T03:41:03
2022-10-07T03:41:03
130,730,841
8
3
null
2019-03-15T18:09:21
2018-04-23T17:15:37
HTML
UTF-8
R
false
false
959
r
crop_field_extent.R
#Find crops library(raster) library(TreeSegmentation) rgb_filename<-"/home/b.weinstein/TreeSegmentation/data/field_crowns/2018_HARV_5_733000_4698000_image_crop.tif" basename <- stringr::str_match(rgb_filename,"/(\\w+).tif")[,2] #CROP CHM crop_target_CHM(siteID="MLBS",rgb_filename,year="2018",tif_base_dir="/orange/ewhite/NeonData",save_base_dir="/orange/ewhite/b.weinstein/NEON") #crop Hyperspectral 3 band ext <- raster::extent(raster::raster(rgb_filename)) easting <- as.integer(ext@xmin/1000)*1000 northing <- as.integer(ext@ymin/1000)*1000 geo_index <- paste(easting,northing,sep="_") crop_target_hyperspectral(siteID="HARV", rgb_filename=rgb_filename, geo_index=geo_index, false_color=TRUE, year="2018", h5_base_dir="/orange/ewhite/NeonData/", save_base_dir="/orange/ewhite/b.weinstein/NEON/")
3d545783244fc8047f4d6c264c782864540510e8
0a906cf8b1b7da2aea87de958e3662870df49727
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131239-test.R
78d7e5ef0e4de0c2efaf9f582f43fae6fafa7a9e
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
202
r
1610131239-test.R
testlist <- list(a = 0L, b = 0L, x = c(-488505344L, -488505344L, 2748927L, -1L, -256L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L )) result <- do.call(grattan:::anyOutside,testlist) str(result)
3e6021a8909ecad9a44bbb3fcfd252c5680b8674
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/CLVTools/R/f_s3generics_clvfittedtransactions_plot.R
4ec06c09f75c3765b916790c041bed2dd7a59198
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
false
13,810
r
f_s3generics_clvfittedtransactions_plot.R
#' @title Plot expected and actual repeat transactions #' @param x The fitted clv model to plot #' @param newdata An object of class clv.data for which the plotting should be made with the fitted model. If none or NULL is given, the plot is made for the data on which the model was fit. #' @param transactions Whether the actual observed repeat transactions should be plotted. #' @param cumulative Whether the cumulative expected (and actual) transactions should be plotted. #' @param plot Whether a plot should be created or only the assembled data is returned. #' @param label Character string to label the model in the legend #' @template template_param_predictionend #' @template template_param_verbose #' @template template_param_dots #' #' @description #' Plot the actual repeat transactions and overlay it with the repeat transaction as predicted #' by the fitted model. Currently, following previous literature, the in-sample unconditional #' expectation is plotted in the holdout period. In the future, we might add the option to also #' plot the summed CET for the holdout period as an alternative evaluation metric. #' #' @template template_details_predictionend #' #' @details Note that only whole periods can be plotted and that the prediction end might not exactly match \code{prediction.end}. #' See the Note section for more details. #' #' @template template_details_newdata #' #' #' @note Because the unconditional expectation for a period is derived as the difference of #' the cumulative expectations calculated at the beginning and at end of the period, #' all timepoints for which the expectation is calculated are required to be spaced exactly 1 time unit apart. #' #' If \code{prediction.end} does not coincide with the start of a time unit, the last timepoint #' for which the expectation is calculated and plotted therefore is not \code{prediction.end} #' but the start of the first time unit after \code{prediction.end}. #' #' #' @seealso \code{\link[CLVTools:plot.clv.fitted.spending]{plot}} for spending models #' #' @return #' An object of class \code{ggplot} from package \code{ggplot2} is returned by default. #' If the parameter \code{plot} is \code{FALSE}, the data that would have been melted and used to #' create the plot is returned. It is a \code{data.table} which contains the following columns: #' \item{period.until}{The timepoint that marks the end (up until and including) of the period to which the data in this row refers.} #' \item{Number of Repeat Transactions}{The number of actual repeat transactions in #' the period that ends at \code{period.until}. Only if \code{transactions} is \code{TRUE}.} #' \item{"Name of Model" or "label"}{The value of the unconditional expectation for the period that ends on \code{period.until}.} #' #' @examples #' \donttest{ #' #' data("cdnow") #' #' # Fit ParetoNBD model on the CDnow data #' pnbd.cdnow <- pnbd(clvdata(cdnow, time.unit="w", #' estimation.split=37, #' date.format="ymd")) #' #' # Plot actual repeat transaction, overlayed with the #' # expected repeat transactions as by the fitted model #' plot(pnbd.cdnow) #' #' # Plot cumulative expected transactions of only the model #' plot(pnbd.cdnow, cumulative=TRUE, transactions=FALSE) #' #' # Plot forecast until 2001-10-21 #' plot(pnbd.cdnow, prediction.end = "2001-10-21") #' #' # Plot until 2001-10-21, as date #' plot(pnbd.cdnow, #' prediction.end = lubridate::dym("21-2001-10")) #' #' # Plot 15 time units after end of estimation period #' plot(pnbd.cdnow, prediction.end = 15) #' #' # Save the data generated for plotting #' # (period, actual transactions, expected transactions) #' plot.out <- plot(pnbd.cdnow, prediction.end = 15) #' #' # A ggplot object is returned that can be further tweaked #' library("ggplot2") #' gg.pnbd.cdnow <- plot(pnbd.cdnow) #' gg.pnbd.cdnow + ggtitle("PNBD on CDnow") #' #' } #' # # Compose plot from separate model plots # # pnbd vs bgnbd # p.m1 <- plot(pnbd.cdnow, transactions = TRUE) # # # static cov model # p.m2 <- plot(pnbd.cdnow.cov, transactions = FALSE) # p.m1 + geom_line(mapping=p.m2$mapping, data=p.m2$data, # color="blue") #' @importFrom graphics plot #' @include class_clv_fitted.R #' @method plot clv.fitted.transactions #' @aliases plot #' @export plot.clv.fitted.transactions <- function (x, prediction.end=NULL, newdata=NULL, cumulative=FALSE, transactions=TRUE, label=NULL, plot=TRUE, verbose=TRUE,...) { period.until <- period.num <- NULL # Check if can plot ----------------------------------------------------------------------------------------- # Cannot plot if there are any NAs in any of the prediction.params clv.controlflow.check.prediction.params(clv.fitted = x) # Newdata ------------------------------------------------------------------------------------------------ # Because many of the following steps refer to the data stored in the fitted model, # it first is replaced with newdata before any other steps are done if(!is.null(newdata)){ # check newdata clv.controlflow.check.newdata(clv.fitted = x, user.newdata = newdata, prediction.end=prediction.end) # Replace data in model with newdata # Deep copy to not change user input x@clv.data <- copy(newdata) # Do model dependent steps of adding newdata x <- clv.model.process.newdata(clv.model = x@clv.model, clv.fitted=x, verbose=verbose) } # Check inputs ------------------------------------------------------------------------------------------------------ err.msg <- c() err.msg <- c(err.msg, .check_user_data_single_boolean(b=cumulative, var.name="cumulative")) err.msg <- c(err.msg, .check_user_data_single_boolean(b=plot, var.name="plot")) err.msg <- c(err.msg, .check_user_data_single_boolean(b=verbose, var.name="verbose")) err.msg <- c(err.msg, .check_user_data_single_boolean(b=transactions, var.name="transactions")) err.msg <- c(err.msg, check_user_data_predictionend(clv.fitted=x, prediction.end=prediction.end)) err.msg <- c(err.msg, check_user_data_emptyellipsis(...)) if(!is.null(label)) # null is allowed = std. model name err.msg <- c(err.msg, .check_userinput_single_character(char=label, var.name="label")) check_err_msg(err.msg) # do fitted object specific checks (ie dyncov checks cov data length) clv.controlflow.plot.check.inputs(obj=x, prediction.end=prediction.end, cumulative=cumulative, plot=plot, label.line=label, verbose=verbose) # Define time period to plot ----------------------------------------------------------------------------------------- # Use table with exactly defined periods as reference and to save all generated data # End date: # Use same prediction.end date for clv.data (actual transactions) and clv.fitted (unconditional expectation) # If there are not enough transactions for all dates, they are set to NA (= not plotted) dt.dates.expectation <- clv.time.expectation.periods(clv.time = x@clv.data@clv.time, user.tp.end = prediction.end) tp.data.start <- dt.dates.expectation[, min(period.until)] tp.data.end <- dt.dates.expectation[, max(period.until)] if(verbose) message("Plotting from ", tp.data.start, " until ", tp.data.end, ".") if(clv.data.has.holdout(x@clv.data)){ if(tp.data.end < x@clv.data@clv.time@timepoint.holdout.end){ warning("Not plotting full holdout period.", call. = FALSE, immediate. = TRUE) } }else{ if(tp.data.end < x@clv.data@clv.time@timepoint.estimation.end){ warning("Not plotting full estimation period.", call. = FALSE, immediate. = TRUE) } } # Get expectation values ----------------------------------------------------------------------------------------- dt.expectation <- clv.controlflow.plot.get.data(obj=x, dt.expectation.seq=dt.dates.expectation, cumulative=cumulative, verbose=verbose) if(length(label)==0) label.model.expectation <- x@clv.model@name.model else label.model.expectation <- label setnames(dt.expectation,old = "expectation", new = label.model.expectation) # Get repeat transactions ---------------------------------------------------------------------------------------- if(transactions){ label.transactions <- "Actual Number of Repeat Transactions" dt.repeat.trans <- clv.controlflow.plot.get.data(obj=x@clv.data, dt.expectation.seq=dt.dates.expectation, cumulative=cumulative, verbose=verbose) setnames(dt.repeat.trans, old = "num.repeat.trans", new = label.transactions) } # Plot data, if needed -------------------------------------------------------------------------------------------- # Merge data for plotting # To be sure to have all dates, merge data on original dates dt.dates.expectation[, period.num := NULL] if(transactions){ dt.dates.expectation[dt.expectation, (label.model.expectation) := get(label.model.expectation), on="period.until"] dt.dates.expectation[dt.repeat.trans, (label.transactions) := get(label.transactions), on="period.until"] dt.plot <- dt.dates.expectation }else{ dt.dates.expectation[dt.expectation, (label.model.expectation) := get(label.model.expectation), on="period.until"] dt.plot <- dt.dates.expectation } # data.table does not print when returned because it is returned directly after last [:=] # " if a := is used inside a function with no DT[] before the end of the function, then the next # time DT or print(DT) is typed at the prompt, nothing will be printed. A repeated DT or print(DT) # will print. To avoid this: include a DT[] after the last := in your function." dt.plot[] # Only plot if needed if(!plot) return(dt.plot) if(transactions) line.colors <- setNames(object = c("black", "red"), nm = c(label.transactions, label.model.expectation)) else line.colors <- setNames(object = "red", nm = label.model.expectation) # Plot table with formatting, label etc return(clv.controlflow.plot.make.plot(dt.data = dt.plot, clv.data = x@clv.data, line.colors = line.colors)) } #' @importFrom ggplot2 ggplot aes geom_line geom_vline labs theme scale_fill_manual guide_legend element_text element_rect element_blank element_line rel clv.controlflow.plot.make.plot <- function(dt.data, clv.data, line.colors){ # cran silence period.until <- value <- variable <- NULL # Melt everything except what comes from the standard expectation table meas.vars <- setdiff(colnames(dt.data), c("period.num", "period.until")) data.melted <- melt(data=dt.data, id.vars = c("period.until"), variable.factor = FALSE, na.rm = TRUE, measure.vars = meas.vars) p <- ggplot(data = data.melted, aes(x=period.until, y=value, colour=variable)) + geom_line() # Add holdout line if there is a holdout period if(clv.data.has.holdout(clv.data)){ p <- p + geom_vline(xintercept = as.numeric(clv.data@clv.time@timepoint.holdout.start), linetype="dashed", show.legend = FALSE) } # Variable color and name p <- p + scale_fill_manual(values = line.colors, aesthetics = c("color", "fill"), guide = guide_legend(title="Legend")) # Axis and title p <- p + labs(x = "Date", y= "Number of Repeat Transactions", title= paste0(clv.time.tu.to.ly(clv.time=clv.data@clv.time), " tracking plot"), subtitle = paste0("Estimation end: ", clv.time.format.timepoint(clv.time=clv.data@clv.time, timepoint=clv.data@clv.time@timepoint.estimation.end))) p <- p + theme( plot.title = element_text(face = "bold", size = rel(1.5)), text = element_text(), panel.background = element_blank(), panel.border = element_blank(), plot.background = element_rect(colour = NA), axis.title = element_text(face = "bold",size = rel(1)), axis.title.y = element_text(angle=90,vjust =2), axis.title.x = element_text(vjust = -0.2), axis.text = element_text(), axis.line = element_line(colour="black"), axis.ticks = element_line(), panel.grid.major = element_line(colour="#d2d2d2"), panel.grid.minor = element_blank(), legend.key = element_blank(), legend.position = "bottom", legend.direction = "horizontal", legend.title = element_text(face="italic"), strip.background=element_rect(colour="#d2d2d2",fill="#d2d2d2"), strip.text = element_text(face="bold", size = rel(0.8))) return(p) } # . clv.controlflow.plot.get.data --------------------------------------------------------------- setMethod(f="clv.controlflow.plot.get.data", signature = signature(obj="clv.fitted.transactions"), definition = function(obj, dt.expectation.seq, cumulative, verbose){ expectation <- i.expectation <- NULL # Pass copy of expectation table file because will be modified and contain column named expecation dt.model.expectation <- clv.model.expectation(clv.model=obj@clv.model, clv.fitted=obj, dt.expectation.seq=copy(dt.expectation.seq), verbose = verbose) # Only the expectation data dt.model.expectation <- dt.model.expectation[, c("period.until", "expectation")] if(cumulative) dt.model.expectation[, expectation := cumsum(expectation)] # add expectation to plot data # name columns by model dt.expectation.seq[dt.model.expectation, expectation := i.expectation, on = "period.until"] return(dt.expectation.seq) }) #' @exportMethod plot #' @include class_clv_fitted.R #' @rdname plot.clv.fitted.transactions setMethod("plot", signature(x="clv.fitted.transactions"), definition = plot.clv.fitted.transactions)
b51ab8fd93124ca6e032258df7db6602e2f37230
84dcc770d7766a3171efe7aa46e50dbcb496c9b5
/CUSUM/version4/cut_grid.R
4c3a83f2aced5bd74452e7ea4ce2db41ddd153e7
[]
no_license
xinzhang-nac/Spatial_CUSUM
0b36e1a04526ea26578d86c41a2eaf1ffa980023
677eedf773af01525d788f2d2ee9932a1d8f4541
refs/heads/master
2020-04-09T03:58:59.994822
2019-05-15T14:31:29
2019-05-15T14:31:29
null
0
0
null
null
null
null
UTF-8
R
false
false
233
r
cut_grid.R
grid.point <- within(grid.point, { grp.x = cut(x, (0:10)/10, labels = FALSE) grp.y = cut(y, (0:10)/10, labels = FALSE) }) plot(y ~ x, data = grid.point, pch = (15:25)[grp.x], col = grp.y) abline(v = (1:9)/10) abline(h = (1:9)/10)
935dc4d5e117a44dad7d8832722f73e42e7d0fdf
79457aaae83a0b3914a38874c10907440e0dfc61
/man/dimension_values.Rd
e8b3918e176c4de1ea08037841ae1cee2fdc1fd4
[]
permissive
appelmar/gdalcubes
be9786b36fbe4e25a5c0245968634f57a40752ad
2134f769454e147660e7a73c61afa14219de20b4
refs/heads/master
2023-08-07T20:56:02.442579
2023-07-25T06:36:46
2023-07-25T06:36:46
148,130,790
74
7
MIT
2023-03-23T19:56:08
2018-09-10T09:25:01
C++
UTF-8
R
false
true
1,315
rd
dimension_values.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cube.R \name{dimension_values} \alias{dimension_values} \title{Query coordinate values for all dimensions of a data cube} \usage{ dimension_values(obj, datetime_unit = NULL) } \arguments{ \item{obj}{a data cube proxy (class cube), or a data cube view object} \item{datetime_unit}{unit used to format values in the datetime dimension, one of "Y", "m", "d", "H", "M", "S", defaults to the unit of the cube.} } \value{ list with elements t,y,x } \description{ Dimension values give the coordinates along the spatial and temporal axes of a data cube. } \examples{ # create image collection from example Landsat data only # if not already done in other examples if (!file.exists(file.path(tempdir(), "L8.db"))) { L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"), ".TIF", recursive = TRUE, full.names = TRUE) create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"), quiet = TRUE) } L8.col = image_collection(file.path(tempdir(), "L8.db")) v = cube_view(extent=list(left=388941.2, right=766552.4, bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"), srs="EPSG:32618", nx = 497, ny=526, dt="P1M") dimension_values(raster_cube(L8.col, v)) }
326d570cb7510eb82c1f56d46593051f4839f964
1087dc5af15ff0944cf595d877f5ef6d2d951936
/tests/testthat/test-opart-gaussian.R
8969ce628d0740413508ef000be19cd5d0e055cc
[]
no_license
cran/opart
3777c97e8995ffef3a5bef6f53e7bc2b048689f2
3868f22cf67ab10c6051f92e6dd89dfd2e76ffc2
refs/heads/master
2020-12-22T01:48:33.382168
2019-09-04T12:40:02
2019-09-04T12:40:02
236,634,128
0
0
null
null
null
null
UTF-8
R
false
false
2,313
r
test-opart-gaussian.R
library(testthat) context("opart-gaussian") library(opart) data(neuroblastoma, package="neuroblastoma") #tests for invalid scenarios test_that("opart gives error for negative penalty", { expect_error({ opart_gaussian(c(1,2,3), -2) }, "penalty value must be greater than 0", fixed=TRUE) }) test_that("opart gives error when data points are less than 1", { expect_error({ opart_gaussian(c(), 1) }, "data vector must have atleast one element", fixed=TRUE) }) test_that("opart gives error when data vector has missing(NA) values", { expect_error({ opart_gaussian(c(1,NA,2,3), 1) }, "data vector has missing(NA) values", fixed=TRUE) }) test_that("opart gives error when data vector contains non-numeric values", { expect_error({ opart_gaussian(c("a", "b", 1, 3), 1) }, "data vector must contains finite numeric values", fixed=TRUE) }) test_that("opart gives error when data vector contains infinity", { expect_error({ opart_gaussian(c(Inf, 1,2), 1) }, "data vector must contains finite numeric values", fixed=TRUE) }) #test for zero penalty res <- opart_gaussian(c(1,2,3,4,5), 0) test_that("all the data points are segment ends", { expect_equal(res$end.vec, c(1,2,3,4,5)) }) #test for large penalty res <- opart_gaussian(c(1,2,3,4,5), 1000000) test_that("all the data points in one segment", { expect_equal(res$end.vec, 5) }) #test for large data res <- opart_gaussian(c(1:1000), 0) test_that("all the data points as segment ends", { expect_equal(res$end.vec, c(1:1000)) }) #test for correctness res <- opart_gaussian(c(1,2,3,4), 1) test_that("cost vector should match manual calculation", { expect_equal(res$cost.vec, c(-1, -4.5, -12.5, -28)) }) #test for neuroblastoma data set with profile.id = 1 and chromosome = 1 selProfile <- subset(neuroblastoma$profiles, profile.id=="1" & chromosome=="1") nrows <- nrow(selProfile) #test for large penalty res <- opart_gaussian(selProfile$logratio, 10000000) test_that("all the data points in one segment", { expect_equal(res$end.vec, nrows) }) #test for small penalty res <- opart_gaussian(selProfile$logratio, 1) test_that("all the data points are segment ends", { expect_equal(length(res$end.vec), 4) })
653233b02be0ceffe044e4d8dabd79c3fb6cfb35
e2a88b360595b345d64f1ae2ded271ac0f39203b
/cachematrix.R
52cf7004ff4d90c438f7642a42ec3cd273c7c97f
[]
no_license
arkim42/ProgrammingAssignment2
958cf4c368df9919a2f8fbf30b6bbc7d89fb88de
80961bdc7a0858673fe052295433ebc04c55da76
refs/heads/master
2022-11-13T11:02:27.069156
2020-06-19T21:20:54
2020-06-19T21:20:54
273,445,411
0
0
null
2020-06-19T08:35:17
2020-06-19T08:35:16
null
UTF-8
R
false
false
1,194
r
cachematrix.R
## The first function, makeCacheMatrix creates a special "matrix", ## which is reall a list containing a function to ## 1. set : set the value of the matrix ## 2. get: get the value of the matrix ## 3. setinverse : set the value of the inverse of the matrix ## 4. getinverse : get the value of the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { iv <- NULL set <- function(y) { x <<- y iv <<- NULL } get <- function() x setinverse <- function(inverse) iv <<- inverse getinverse <- function() iv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## The following function calculates the inverse of the special "matrix" ## created with the above function. However, it first checks to see if ## the inverse has already been calculated. If so, it gets the inverse ## from the cache and skips the computation. Otherwise, it calculates ## the inverse of the data and sets the value of the inverse in the cache ## via the setinverse function. cacheSolve <- function(x, ...) { iv <- x$getinverse() if (!is.null(iv)) { message("getting cached data") return (iv) } data <- x$get() iv <- solve(data, ...) x$setinverse(iv) iv }
a5d01f1c07df08f3eca5c210ec3bf9ddf498d658
ae4a257cc62e8ee4b90fda7e877750e59c0d8ba3
/man/plotSingleTS.Rd
17378a3b9ab57a653359e102909aea858c1c4c25
[]
no_license
davidlamcm/Rtoolbox
824a5f189c5e3ec89f841fe4e73f85605d894d6a
238a6881f4df4a47365aeb0a2786fed62899e537
refs/heads/master
2021-01-19T14:09:35.775475
2018-08-22T15:16:44
2018-08-22T15:16:44
88,128,106
0
0
null
null
null
null
UTF-8
R
false
true
300
rd
plotSingleTS.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fun_plotSingleTS.R \name{plotSingleTS} \alias{plotSingleTS} \title{this function will be deprecated wrapper for ggplot} \usage{ plotSingleTS(ts, title) } \description{ this function will be deprecated wrapper for ggplot }
85fd0a52ca45b212e75a9c840865d0034c3e3c9d
9ff5451498def880e07a1f568cf58a116a1d6abb
/cachematrix.R
145356cc4296b90f237f6c3a615ce665bd57241b
[]
no_license
daniellins/ProgrammingAssignment2
f570690716696640c4b8812546008acff4177eff
970d4e44091605b87344e37856536489b0d517b6
refs/heads/master
2021-01-16T21:49:13.900652
2015-02-22T06:34:32
2015-02-22T06:34:32
31,153,720
0
0
null
2015-02-22T05:26:49
2015-02-22T05:26:48
null
UTF-8
R
false
false
1,685
r
cachematrix.R
## makeCacheMatrix: Store and retrieve a object (matrix) in the cache. ## Params: a matrix ## Result: a special object with the functions below: ## 1- set the value of the matrix (set) ## 2- get the value of the matrix (get) ## 3- set the value of the solve (setInverse) ## 4- get the value of the solve (getInverse) ## Author: Daniel Lins ## Date:02/21/2015 ## Version:1.0 makeCacheMatrix <- function(x = matrix()) { cachedInverseMatrix <- NULL set <- function(y){ x <<- y cachedInverseMatrix <<- NULL } get <- function() x setInverse <- function(inverseMatrix) cachedInverseMatrix <<- inverseMatrix getInverse <- function() cachedInverseMatrix list(set=set, get=get, setInverse=setInverse, getInverse=getInverse) } ## cacheSolve: Calculates the inverse (solve) of the matrix. However, it first ## checks if the result has already been calculated and stored in the cache. ## If yes, it gets the result from the cache. ## Params: Special object (output of MakeCacheMatrix()) ## Return: a matrix that is the inverse of the original matrix. ## Author: Daniel Lins ## Date:02/21/2015 ## Version:1.0 cacheSolve <- function(x, ...) { inverseMatrix <- x$getInverse() if(!is.null(inverseMatrix)){ message("Object already in Cache. Retrieving cached data...") return (inverseMatrix) } originalMatrix <-x$get() inverseMatrix <- solve(originalMatrix) x$setInverse(inverseMatrix) return(inverseMatrix) }
f161750daa45a085349f201e11657affcb24c468
8f6927b75d22a4f24958477c0bb23e3962126ae1
/Workbook_6/template_function_macrophage_filtered.R
1141cd25eee05b124d062c3750ee6db184ce38d2
[]
no_license
NCI-VB/pavlakis_TNBC_hetIL-15
9f160ab9e11291922515d99f4c3600fdd648870a
efbb02c019ef16079b278e8be628abd176508f42
refs/heads/main
2023-04-15T12:42:41.160189
2023-03-14T15:36:26
2023-03-14T15:36:26
606,222,869
0
0
null
null
null
null
UTF-8
R
false
false
12,656
r
template_function_macrophage_filtered.R
# [scRNA-Seq][CCBR] Filter Seurat Object by Metadata (ec3f23f9-bcba-4f3a-8a08-8ba611fbb6c7): v114 macrophage_filtered <- function(Annotate_Cell_Types, Metadata_Table, Sample_Names) { ## Libraries suppressMessages(library(ggplot2)) suppressMessages(library(Seurat)) suppressWarnings(library(gridExtra)) suppressMessages(library(grid)) suppressMessages(library(gridBase)) suppressMessages(library(cowplot)) suppressMessages(library(RColorBrewer)) suppressMessages(library(colorspace)) suppressMessages(library(tidyverse)) ## Inputs. so <- Annotate_Cell_Types$value reduction = "tsne" doCiteSeq <- FALSE #image: png imageType = "png" Keep <- FALSE plotinteractive = FALSE seed = 10 metacols = "SCT_snn_res_0_6" cols1 <- c("aquamarine3","salmon1","lightskyblue3","plum3","darkolivegreen3","goldenrod1","burlywood2","gray70","firebrick2","steelblue","palegreen4","orchid4","darkorange1","yellow","sienna","palevioletred1","gray60","cyan4","darkorange3","mediumpurple3","violetred2","olivedrab","darkgoldenrod2","darkgoldenrod","gray40","palegreen3","thistle3","khaki1","deeppink2","chocolate3","paleturquoise3","wheat1","lightsteelblue","salmon","sandybrown","darkolivegreen2","thistle2","gray85","orchid3","darkseagreen1","lightgoldenrod1","lightskyblue2","dodgerblue3","darkseagreen3","forestgreen","lightpink2","mediumpurple4","lightpink1","thistle","navajowhite","lemonchiffon","bisque2","mistyrose","gray95","lightcyan3","peachpuff2","lightsteelblue2","lightyellow2","moccasin","gray80","antiquewhite2","lightgrey") samples = eval(parse(text=gsub('\\[\\]','c()','c("Control","Treated")'))) if (length(samples) == 0) { samples = unique(SO@meta.data$sample_name) } ## Replace dots in metadata column names with underscores. colnames(so@meta.data) = gsub("\\.", "_", colnames(so@meta.data)) ## If you have protien data, then ... if (doCiteSeq) { reduction =paste("protein_", reduction,sep='') } ## Set image dimensions within Vector. imageWidth = 2000 * 2 imageHeight = 2000 dpi = 300 ## Set image format (png or svg) based on user input. if (imageType == 'png') { png( filename = "macrophage_filtered.png", width = imageWidth, height = imageHeight, units = "px", pointsize = 4, bg = "white", res = dpi, type = "cairo") } else { library(svglite) svglite::svglite( file = "macrophage_filtered.png", width = round(imageWidth/dpi,digits=2), height = round(imageHeight/dpi,digits=2), pointsize = 1, bg = "white") } ## Original color-picking code. n <- 2e3 set.seed(seed) ourColorSpace <- colorspace::RGB(runif(n), runif(n), runif(n)) ourColorSpace <- as(ourColorSpace, "LAB") distinctColorPalette <-function(k=1,seed) { currentColorSpace <- ourColorSpace@coords # Set iter.max to 20 to avoid convergence warnings. set.seed(seed) km <- kmeans(currentColorSpace, k, iter.max=20) colors <- unname(hex(LAB(km$centers))) return(colors) } ## User-selected metadata column is used to set idents. Filter.orig = so@meta.data[[metacols[1]]] colname <- metacols[1] ident_of_interest = as.factor(so@meta.data[[colname]]) names(ident_of_interest)=names(so@active.ident) so@active.ident <- as.factor(vector()) so@active.ident <- ident_of_interest ## Get colors from user parameter and add more if the default list is too short. if(class(so@meta.data[[metacols[1]]]) != "numeric"){ q = length(levels(as.factor(Filter.orig))) if(length(cols1) < q) { r = q - length(cols1) more_cols = distinctColorPalette(r,10) cols1 <- c(cols1, more_cols) } names(cols1) <- levels(as.factor(Filter.orig)) ## Keep or remove cells based on user input values. if (Keep) { subsetValue <- c("0","1","2","6","8","9","11","12") metaCol <- unique(so@meta.data[[metacols[1]]]) print("Missing values:") print(setdiff(subsetValue,metaCol)) subsetValue <- intersect(metaCol,subsetValue) } else { metaCol <- unique(so@meta.data[[colname]]) valsToRemove <- c("0","1","2","6","8","9","11","12") subsetValue <- setdiff(metaCol, valsToRemove) } ## Subset Seurat object. #SO.sub <-SubsetData(so, ident.use=subsetValue) SO.sub <- subset(so, idents = subsetValue) ## Log output of tables of cell types by samples before and after filtes. print("Breakdown of filtered data:") print(table(so@meta.data[[metacols[1]]],so@meta.data$orig_ident)) cat("\n") print("After Filtering:") print(table(SO.sub@meta.data[[metacols[1]]],SO.sub@meta.data$orig_ident)) ## Set filter for the subsetted SO. SO.sub@meta.data[[colname]] <- as.factor(as.character(SO.sub@meta.data[[colname]])) #Relevel Factors Filter.sub = SO.sub@meta.data[[colname]] ## More color stuff. #Set colors for unfiltered and filtered data by sample name: n = length(levels(as.factor(Filter.sub))) idx = vector("list", n) names(idx) <- levels(as.factor(Filter.sub)) for (i in 1:n) { id = Filter.orig %in% levels(as.factor(Filter.sub))[i] idx[[i]] <- rownames(so@meta.data)[id] } cols2 <- cols1[levels(as.factor(Filter.sub))] ## Make before and after plots. title <- paste0("filtered by ", metacols[1], " and split by ", metacols[2]) p1 = DimPlot(so, reduction=reduction, group.by=colname, pt.size=0.1) + theme_classic() + scale_color_manual(values=cols1) + theme(legend.position="right") + guides(colour=guide_legend(ncol=1,override.aes = list(size = 2))) + ggtitle(colname) p2 = DimPlot(so, reduction=reduction, cells.highlight = idx, cols.highlight= rev(cols2[1:n]), sizes.highlight = 0.5) + theme_classic() + theme(legend.position="right")+ guides(colour=guide_legend(ncol=1,reverse=TRUE,override.aes = list(size = 2))) + ggtitle(title) ## Else, filter on numeric data with a user defined threshold and direction. } else { filterDirection <-"greater than" metaCol <- unique(so@meta.data[["SCT_snn_res_0_6"]]) value <- 0.5 if (filterDirection =="greater than") { SO.sub <- subset(so, subset = SCT_snn_res_0_6 > 0.5) } else { SO.sub <- subset(so, subset = SCT_snn_res_0_6 < 0.5) } drawtsne <- function(SO,reduction,scalecol,colgrad){ SO.clus <- SO@meta.data[[m]] p1 <- DimPlot(SO, reduction = reduction, group.by = "ident") class(p1$data$ident) <- "numeric" if(reduction=="tsne"){ clusmat=data.frame(umap1=p1$data$tSNE_1,umap2=p1$data$tSNE_2, clusid=as.numeric(SO@meta.data[[m]])) } else if(reduction=="umap"){ clusmat=data.frame(umap1=p1$data$UMAP_1,umap2=p1$data$UMAP_2, clusid=as.numeric(SO@meta.data[[m]])) } else if (reduction=="pca"){ clusmat=data.frame(umap1=p1$data$PC_1,umap2=p1$data$PC_2, clusid=as.numeric(SO@meta.data[[m]])) } else if (reduction=="protein_tsne"){ clusmat=data.frame(umap1=p1$data$protein_tsne_1,umap2=p1$data$protein_tsne_2, clusid=as.numeric(SO@meta.data[[m]])) } else if (reduction=="protein_umap"){ clusmat=data.frame(umap1=p1$data$protein_umap_1,umap2=p1$data$protein_umap_2, clusid=as.numeric(SO@meta.data[[m]])) } else { clusmat=data.frame(umap1=p1$data$protein_pca_1,umap2=p1$data$protein_pca_2, clusid=as.numeric(SO@meta.data[[m]])) } clusmat %>% group_by(clusid) %>% summarise(umap1.mean=mean(umap1), umap2.mean=mean(umap2)) -> umap.pos title=as.character(m) clusmat %>% dplyr::arrange(clusid) -> clusmat p2 <- ggplot(clusmat, aes(x=umap1, y=umap2)) + theme_bw() + theme(legend.title=element_blank()) + geom_point(aes(colour=clusid),alpha=0.5,shape = 20,size=0.1) + #scale_color_gradientn(colours = c("blue4", "lightgrey", "red"), values = scales::rescale(c(min, midpt2,midpt,midpt3, max), limits = c(0, 1))) + #scale_color_gradientn(colours = c("blue4", "lightgrey", "red"), values = c(min, midpt2,midpt,midpt3, max)) + #scale_color_gradientn(colors=brewer.pal(n = 5, name = "RdBu"), values = scales::rescale(c(min, midpt2,midpt,midpt3, max))) + scale_color_gradientn(colors=brewer.pal(n = 5, name = colgrad), values = scalecol) + guides(colour = guide_legend(override.aes = list(size=5, alpha = 1))) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) + ggtitle(title) + xlab("umap-1") + ylab("umap-2") return(p2) } m = metacols clusid = so@meta.data[[m]] maxclus = max(clusid) clusmid = 0.01/maxclus min = min(clusid) midpt1 = 0.99*value midpt = value midpt2 = 1.01*value max = max(clusid) colpoints <- c(min,midpt1,midpt,midpt2,max) colpoints <- scales::rescale(colpoints,c(0,1)) p1 <- drawtsne(so,reduction,colpoints,"RdBu") clusid = scales::rescale(SO.sub@meta.data[[m]], to=c(0,1)) clus.quant=quantile(clusid[clusid>0],probs=c(0,.25,.5,.75,1)) min = clus.quant[1] midpt = clus.quant[3] midpt3 = clus.quant[2] midpt4 = clus.quant[4] max = clus.quant[5] colpoints2 <- c(min,midpt3,midpt,midpt4,max) p2 <- drawtsne(SO.sub,reduction,colpoints2,"Blues") } ## If interactive plot requested, then ... if (plotinteractive == TRUE) { gp1 <- ggplotly(p1) gp2 <- ggplotly(p2) p <- subplot(gp1, gp2, nrows=2) print(p) } ## Else, print non-interactive plot. else { print(plot_grid(p1,p2,nrow=1)) } ## Return the subsetted Seurat object. return(list(value=SO.sub)) } ## Commented out below on 7/28/21 because it had started failing in training. It is believed to be vestigial. -- Josh M. ################################################# ## Global imports and functions included below ## ################################################# # # } # # # Functions defined here will be available to call in # the code for any table. print("template_function_macrophage_filtered.R #########################################################################") library(plotly);library(ggplot2);library(jsonlite); currentdir <- getwd() rds_output <- paste0(currentdir,'/rds_output') var_Annotate_Cell_Types<-readRDS(paste0(rds_output,"/var_Annotate_Cell_Types.rds")) Input_is_Seurat_count <- 0 for(item in var_Annotate_Cell_Types){ if (class(item)=="Seurat"){Input_is_Seurat_count = Input_is_Seurat_count + 1}} if(Input_is_Seurat_count == 0 ){ var_Annotate_Cell_Types<-as.data.frame(var_Annotate_Cell_Types)}else{var_Annotate_Cell_Types <- var_Annotate_Cell_Types} currentdir <- getwd() rds_output <- paste0(currentdir,'/rds_output') var_Metadata_Table<-readRDS(paste0(rds_output,"/var_Metadata_Table.rds")) Input_is_Seurat_count <- 0 for(item in var_Metadata_Table){ if (class(item)=="Seurat"){Input_is_Seurat_count = Input_is_Seurat_count + 1}} if(Input_is_Seurat_count == 0 ){ var_Metadata_Table<-as.data.frame(var_Metadata_Table)}else{var_Metadata_Table <- var_Metadata_Table} currentdir <- getwd() rds_output <- paste0(currentdir,'/rds_output') var_Sample_Names<-readRDS(paste0(rds_output,"/var_Sample_Names.rds")) Input_is_Seurat_count <- 0 for(item in var_Sample_Names){ if (class(item)=="Seurat"){Input_is_Seurat_count = Input_is_Seurat_count + 1}} if(Input_is_Seurat_count == 0 ){ var_Sample_Names<-as.data.frame(var_Sample_Names)}else{var_Sample_Names <- var_Sample_Names} invisible(graphics.off()) var_macrophage_filtered<-macrophage_filtered(var_Annotate_Cell_Types,var_Metadata_Table,var_Sample_Names) invisible(graphics.off()) saveRDS(var_macrophage_filtered, paste0(rds_output,"/var_macrophage_filtered.rds"))
f58221a0a3dba8bf2e184566efc46fabb316c40f
c8879eea856503907a134000df4ef1acf1aa5751
/cachematrix.R
eee08040943da2b90dd083084687cb764ace4c64
[]
no_license
mlg3672/ProgrammingAssignment2
3922ba5ac4eb1f63c5ac33f6db5bd973cdbf3645
4ae59d5b257801f0c93013e356e8e709b0687a23
refs/heads/master
2020-04-04T23:55:15.852721
2015-04-20T06:05:19
2015-04-20T06:05:19
34,235,305
0
0
null
2015-04-20T03:14:55
2015-04-20T03:14:55
null
UTF-8
R
false
false
919
r
cachematrix.R
## the functions below makeCacheMatrix and cacheSolve # executes 4 actions ## (1) get the value of matrix x ## (2) set the value of the matrix ## (3) get the value of the inverse matrix ## (4) set the value of the inverse matrix makeCacheMatrix <- function(x = matrix()) { ## this function returns a list m<- NULL set <-function(y) { x<<-y m<<-NULL } get <- function() x getInverse <- solve(x) setInverse <- function(inverse) m<<-inverse list(set = set, get = get, setInverse = setInverse,getInverse = getInverse) } ## the cacheSolve function calculates matrix inverse # created with makeCacheMatrix, first checks to see if # matrix has already been created cacheSolve <- function(x,...) { m<-makeCacheMatrix(x)$getInverse m<-NULL if(!is.null(m)) { message("getting cached data") return(m) } data<-makeCacheMatrix(x)$get() m <-solve(data) makeCacheMatrix(x)$setInverse(m) m }
7c7b47fdd7405aa7c014c38a3bf2d77bbb6b3685
8c126aade4c65ce684ee5eeeae10d6e6efa2fbf5
/cachematrix.R
a4a99b909ce48764677c0c3c92c09bfd056398f2
[]
no_license
ccali930/ProgrammingAssignment2
afb1c670ffba9eb9b15105865b3ec6efdefc951d
8dfe6e71f39e04ee93760fbc8b4f6e9db8fda8cd
refs/heads/master
2020-04-05T23:24:57.111579
2014-08-21T03:16:57
2014-08-21T03:16:57
null
0
0
null
null
null
null
UTF-8
R
false
false
1,100
r
cachematrix.R
## makeCacheMatrix and cacheSolve are two functions that can be used together to create a matrix, ## perform a calculation of the inverse of the matrix, then cache the value of the inverse so it can be ## easily called ## Creats a matrix object and functions for getting and setting the inverse of the ## matrix object makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## Takes inverse of the matrix and caches it if the inverse has not yet been taken cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data) x$setinv(m) m ## Return a matrix that is the inverse of 'x' }
f780daf541116d6350eb39f81a1c165542bfcbb0
ebeaf5e8dd0de49dd34d7a49b4a168d322c92716
/man/select.k.func.Rd
c235e52d8516fbba88cd4e8e56ba7863245871d9
[]
no_license
cran/EXRQ
d82b4c185019aae8767aba404f969faef244dbd6
cfaff6808a8effda090131b4c6868e27e73555fe
refs/heads/master
2021-01-20T18:58:30.027434
2016-07-06T23:48:41
2016-07-06T23:48:41
62,774,040
0
0
null
null
null
null
UTF-8
R
false
false
1,413
rd
select.k.func.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/EXRQ.R \name{select.k.func} \alias{select.k.func} \title{Selection of the Tuning Parameter k} \usage{ select.k.func(y, x, Lam.y, lam, a, max.tau, grid.k, n) } \arguments{ \item{y}{a vector of n untransformed responses} \item{x}{a n x p matrix of n observations and p predictors} \item{Lam.y}{a vector of n power-transformed responses} \item{lam}{the power-transformation parameter} \item{a}{location shift parameter in the power transformation (introduced to avoid negative y values)} \item{max.tau}{the upper bound of the intermediate quantile levels} \item{grid.k}{the grid for the number of upper order statistics involved in Hill estimator} \item{n}{the number of observations} } \value{ the selected k is returned } \description{ This function selects the tuning parameter k, the number of upper order statistics involved in Hill estimator of EVI among a grid of points following the method described in Section 3.3 of Wang and Li (2013). The method selects k as the value that minimizes the discrepancy between the estimated x-dependent EVI on the transformed scale and lam times the estimated x-dependent EVI on the original scale } \references{ Wang, H. and Li, D. (2013). Estimation of conditional high quantiles through power transformation. Journal of the American Statistical Association, 108, 1062-1074. }
f79d68f383f8ecfac83facd0647e26ba2e788fa2
007c850db5e6cf12cba2007526002ecc897e2f27
/man/TS.Rd
bbc7ec78d783daa19bc0a63f959bfbb73662f970
[]
no_license
rdshankar/RImmPort
12cf59a049f434fd7d4fb5735d507b17b52b28b0
5b74cc64d969c353cf7abd2f33fc1dd09b2e4c4a
refs/heads/master
2021-01-17T13:34:11.016039
2016-03-13T03:20:33
2016-03-13T03:20:33
42,664,085
5
2
null
2015-10-13T19:38:57
2015-09-17T15:21:27
R
UTF-8
R
false
true
1,347
rd
TS.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/TrialSummary.R \name{TS} \alias{TS} \title{Trial Summary Domain Variables} \description{ { \tabular{ll}{ \strong{Variable Name} \tab \strong{Variable Label} \cr STUDYID \tab Study Identifier \cr DOMAIN \tab Domain Abbreviation \cr TSSEQ \tab Sequence Number \cr TSPARMCD \tab Trial Summary Parameter Short Name \cr TSPARM \tab Trial Summary Parameter \cr TSVAL \tab Parameter Value } } } \note{ The following table enumerates the values in TSPARMCD and TSPARM variables { \tabular{ll}{ \strong{TSPARMCD} \tab \strong{TSPARM} \cr TITLE \tab Trial Title \cr DESCR \tab Trial Description \cr INDIC \tab Trial Indication \cr TRT \tab Investigational Therapy or Treatment \cr HYPOTHS \tab Trial Hypotheses \cr SSTDTC \tab Study Start Date \cr SENDTC \tab Study End Date \cr PLANSUB \tab Planned Number of Subjects \cr ACTSUB \tab Actual Number of Subjects \cr AGEMAX \tab Planned Maximum Age of Subjects \cr AGEMIN \tab Planned Minimum Age of Subjects \cr AGEU \tab Age Units \cr SEXPOP \tab Sex of Participants \cr SPONSOR \tab Clinical Study Sponsor \cr PUBRLDAT \tab Public Release Date \cr ISTRIAL \tab Study Type \cr RESFOCUS \tab Trial Research Focus } } }
f7ec22944141962da7bd8a08496c1469766a6a5c
abfdbe31cc8413c0b3af32e7fa2f04966a933b84
/Exploration_Stuff.R
087480caff6f4ead95e3a779aac58dc3ddcdaa46
[]
no_license
ZTerechshenko/Event-Data
0ceeada80cfa771f173dc8cb820e0e075ee053dc
23f84a06e3846b1497746fb259fec82c095ddc63
refs/heads/master
2021-09-15T04:11:06.256587
2017-12-14T07:03:39
2018-05-25T14:41:56
106,856,280
0
0
null
null
null
null
UTF-8
R
false
false
7,009
r
Exploration_Stuff.R
#### Script for processing Phoenix event data and creating summary graphics #### load packages #### #install.packages("ggplot2") #install.packages("dplyr") library(ggplot2) library(dplyr) #### Read NYT Phoenix #### NYT <- read.csv(file ="Phoenix/PhoenixNYT_1945-2005.csv") #head(NYT) #nrow(NYT) # Convert to proper date format NYT <- NYT %>% mutate(date = as.Date(story_date, format="%m/%d/%Y") ) #str(NYT$date) ##### NYT Phoenix counts over time ##### # Create NYT Phoenix Geolocated Subset NYT_Geo <- NYT %>% filter(!is.na(lat)) # Summarize count by day, all NYT NYT_count_all <- count(NYT, story_date) # Summarize count by day, just Geo NYT_Geo_count <- count(NYT_Geo, story_date) # Join the Geo count to get both in same tibble NYT_count <- NYT_count_all %>% full_join(NYT_Geo_count, by = "story_date") %>% mutate(story_date = as.Date(story_date, format="%m/%d/%Y") ) %>% rename(all = n.x) %>% rename(geo = n.y) #remove used datasets rm(NYT_count_all) rm(NYT_Geo_count) head(NYT_count) #### Read FBIS Phoenix #### SWB <- read.csv(file ="Phoenix/PhoenixSWB_1979-2015.csv") #head(SWB) #nrow(SWB) # Convert to proper date format SWB <- SWB %>% mutate(date = as.Date(story_date, format="%m/%d/%Y") ) # Create SWB Phoenix Geolocated Subset SWB_Geo <- SWB %>% filter(!is.na(lat)) # Summarize count by day, all SWB SWB_count_all <- count(SWB, story_date) # Summarize count by day, just Geo SWB_Geo_count <- count(SWB_Geo, story_date) # Join the Geo count to get both in same tibble SWB_count <- SWB_count_all %>% full_join(SWB_Geo_count, by = "story_date") %>% mutate(story_date = as.Date(story_date, format="%m/%d/%Y") ) %>% rename(all = n.x) %>% rename(geo = n.y) #remove used datasets rm(SWB_count_all) rm(SWB_Geo_count) head(SWB_count) #### Read FBIS Phoenix #### SWB <- read.csv(file ="Phoenix/PhoenixSWB_1979-2015.csv") #head(SWB) #nrow(SWB) # Convert to proper date format SWB <- SWB %>% mutate(date = as.Date(story_date, format="%m/%d/%Y") ) #### SWB count, time series #### # line chart ggplot(NYT_count, aes(x = story_date, y = all)) + geom_line(alpha = .5 ) + scale_x_date(breaks = seq( as.Date("1945-01-01"), as.Date("2006-12-31"), by = "5 years"), date_minor_breaks = "1 year", date_labels = "%Y") + labs( title ="Phoenix NYT Events", x = "Time", y = "Daily Event Count") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) # Bar chart ggplot(NYT_count, aes(x = story_date, y = all)) + geom_bar(stat = "identity", alpha = .6, fill = "blue") + scale_y_continuous(limits = c(0, 150)) + scale_x_date(breaks = seq( as.Date("1945-01-01"), as.Date("2006-12-31"), by = "5 years"), date_minor_breaks = "1 year", date_labels = "%Y") + labs( title ="Phoenix NYT Events", x = "Time", y = "Daily Event Count") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) # Histogram showing missing dates ggplot(NYT_count, aes(x = story_date)) + geom_histogram(binwidth = 10, alpha = .6, fill = "blue") + scale_x_date(breaks = seq( as.Date("1945-01-01"), as.Date("2006-12-31"), by = "5 years"), date_minor_breaks = "1 year", date_labels = "%Y") + labs( title ="Phoenix NYT Events - Missing Records", x = "Time") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) # Events per day histogram ggplot(NYT_count) + geom_histogram(aes(x = all), binwidth = 5) + labs( title ="Histogram of Events per Day") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) #### NYT 1961 anomaly #### ggplot(NYT_count, aes(x = story_date, y = all)) + geom_bar(stat = "identity", alpha = .6, fill = "blue") + scale_x_date( limits = c( as.Date("1960-06-01"), as.Date("1962-01-01")), date_breaks = "2 month", date_minor_breaks = "1 month", date_labels = "%b-%Y") + scale_y_continuous(limits = c(0, 200)) + annotate("rect", xmin = as.Date("1961-01-01"), xmax = as.Date("1961-11-16"), ymin = 152, ymax = 165, alpha = .8, fill = "firebrick1")+ annotate("text", x = as.Date("1961-06-15"), y = 160, label= "January 1 to November 16", color = "white") + labs( title ="1961 Anamoly", x = "Time", y = "Daily Event Count") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) #### NYT 1978 anomaly #### ggplot(NYT_count, aes(x = story_date, y = all)) + geom_bar(stat = "identity", alpha = .6, fill = "blue") + scale_x_date( limits = c( as.Date("1978-06-01"), as.Date("1978-12-31")), date_breaks = "1 month", date_minor_breaks = "1 month", date_labels = "%b-%Y") + scale_y_continuous(limits = c(0, 200)) + annotate("rect", xmin = as.Date("1978-08-09"), xmax = as.Date("1978-11-06"), ymin = 152, ymax = 165, alpha = .8, fill = "firebrick1")+ annotate("text", x = as.Date("1978-10-01"), y = 160, label= "August 9 to November 11", color = "white") + labs( title ="1978 Anamoly", x = "Time", y = "Daily Event Count") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 12) ) #### NYT Zoom in on random sample #### ggplot(NYT_count, aes(x = story_date, y = all)) + geom_bar(stat = "identity", fill = "blue") + scale_x_date( limits = c( as.Date("1985-01-01"), as.Date("1987-12-31")), date_breaks = "3 months", date_minor_breaks = "1 month", date_labels = "%b-%Y") + scale_y_continuous(limits = c(0, 200)) + labs( title ="Random Area", x = "Time", y = "Daily Event Count") + theme( title = element_text(size = 14, face = "bold"), axis.text = element_text(size = 10) ) # annotate("segment", # x = as.Date("1961-01-01"), # xend = as.Date("1961-01-01"), # y = 0, # yend = 165, # alpha = .8, # colour = "firebrick1", # linetype = 2)+ ggplot(NYT, aes(x=date)) + geom_histogram(bins = 56) ggplot(NYT, aes(x=date)) + geom_freqpoly( bins = 56) ggplot(NYT_Geo, aes(x=date)) + geom_freqpoly( bins = 56)
850ac2696aef128fcf530b531f4692e32d9fe590
a5b8244731689344004c67af107b1a531f7e9e2f
/src/00_data_wrangle/03_identify_drops.R
f40b9059f0efa0a4833f65ef897bcbb834301c5a
[]
no_license
jvenzor23/DefensiveCoverageNet
4efcb0f36d6806c71a1750fa9b58ba63c55e3929
85eef09aeede123aa32cb8ad3a8075cd7b7f3e43
refs/heads/master
2023-02-13T22:14:23.396421
2021-01-07T22:52:32
2021-01-07T22:52:32
317,361,746
4
0
null
null
null
null
UTF-8
R
false
false
4,339
r
03_identify_drops.R
# This code animates a given play in the player tracking data, while # also displaying the epa values for all receivers (targeted receiver # shown in red) # Clean workspace rm(list=ls()) # Setting Working Directory setwd("~/Desktop/NFL_BIG_DATA_BOWL_2021/inputs/") # Calling Necessary Libraries library(tidyverse) library(dplyr) library(ggplot2) library(lubridate) library(reticulate) library(rootSolve) library(modeest) library(gganimate) library(magick) # Reading in The Data ----------------------------------------------------- players = read.csv("~/Desktop/CoverageNet/inputs/players.csv") games = read.csv("~/Desktop/CoverageNet/inputs/games.csv") plays = read.csv("~/Desktop/CoverageNet/inputs/plays.csv") targeted_receiver = read.csv("~/Desktop/CoverageNet/inputs/targetedReceiver.csv") # Finding Drops ----------------------------------------------------------- setwd("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/") files = dir()[startsWith(dir(), "week")] drops_tot = data.frame() for(file in files){ pbp_data = read.csv(paste0("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/", file)) pbp_data_incompletions = pbp_data %>% inner_join(plays %>% filter(passResult == "I") %>% distinct(gameId, playId)) %>% filter(event == "pass_arrived") incompletions_off_target = plays %>% inner_join(pbp_data %>% distinct(gameId, playId)) %>% filter(passResult == "I") %>% distinct(gameId, playId) %>% anti_join(pbp_data_incompletions %>% distinct(gameId, playId)) pbp_data_targeted_receiver = pbp_data_incompletions %>% inner_join(targeted_receiver, by = c("gameId", "playId", "nflId" = "targetNflId")) pbp_data_football = pbp_data_incompletions %>% filter(is.na(nflId)) %>% dplyr::select(gameId, playId, frameId, x, y) %>% rename(x_football = x, y_football = y) pbp_data_defense = pbp_data_incompletions %>% filter(!IsOnOffense, !is.na(nflId)) %>% dplyr::select(gameId, playId, frameId, x, y) receiver_close_enough = pbp_data_targeted_receiver %>% inner_join(pbp_data_football) %>% mutate(receiver_football_dist = sqrt((x - x_football)^2 + (y - y_football)^2)) %>% filter(receiver_football_dist <= 1.5, y > 0, y < 53 + 1/3, x > 0, x < 120) defenders_close_enough = pbp_data_defense %>% inner_join(pbp_data_football) %>% mutate(receiver_football_dist = sqrt((x - x_football)^2 + (y - y_football)^2)) %>% filter(receiver_football_dist <= 1.5) %>% distinct(gameId, playId) defenders_close_enough_to_receiver = pbp_data_targeted_receiver %>% inner_join(pbp_data_defense %>% rename(x_def = x, y_def = y)) %>% mutate(receiver_defender_dist = sqrt((x - x_def)^2 + (y - y_def)^2)) %>% filter(receiver_defender_dist <= 1.5) %>% distinct(gameId, playId) drops_plays = receiver_close_enough %>% anti_join(defenders_close_enough) %>% anti_join(defenders_close_enough_to_receiver) %>% distinct(gameId, playId, nflId, displayName) drops_tot = rbind(drops_plays, drops_tot) } # QA ---------------------------------------------------------------------- player_drops = drops_tot %>% group_by(displayName, nflId) %>% summarize(drops = n()) %>% arrange(desc(drops)) setwd("~/Desktop/NFL_PBP_DATA/") pbp_data_2018 = read_csv("reg_pbp_2018.csv", col_types = cols()) %>% dplyr::select(play_id, game_id, interception_player_name, pass_defense_1_player_name, pass_defense_2_player_name) %>% rename(gameId = game_id, playId = play_id) pbus = pbp_data_2018 %>% filter(!is.na(pass_defense_1_player_name)) ints = pbp_data_2018 %>% filter(!is.na(interception_player_name)) # should be 0! drops_and_pbu = drops_tot %>% inner_join(pbus) drops_and_ints = drops_tot %>% inner_join(ints) drops_clean = drops_tot %>% anti_join(drops_and_pbu) %>% distinct(gameId, playId) write.csv(drops_clean, "~/Desktop/CoverageNet/src/00_data_wrangle/helper_tables/drops.csv", row.names = FALSE)
c371c509788868a706291d345227b0d627752249
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/yum/examples/load_and_simplify.Rd.R
25bfda535555708dfa814edaa8d880cc1799aca0
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
437
r
load_and_simplify.Rd.R
library(yum) ### Name: load_and_simplify ### Title: Load YAML fragments in one or multiple files and simplify them ### Aliases: load_and_simplify load_and_simplify_dir ### ** Examples yum::load_and_simplify(text=" --- firstObject: id: firstFragment --- Outside of YAML --- otherObjectType: - id: secondFragment parentId: firstFragment - id: thirdFragment parentId: firstFragment --- Also outside of YAML");
56b3e0729b873babea32c465d1a769e6b6198b45
659d3ecae15a0673b0fb5ca2298209071e226a4b
/man/bounds.r.Rd
fb1ae216e4d9b502b6f3721943fe9ef592723135
[]
no_license
dstanley4/intervalTraining
c1c431c5114c4d4dafd7874f32ea7ee2630edd2a
adb3c31d04e1ca55ec8700e6b1c8d55be9f3dc2a
refs/heads/master
2020-05-19T22:20:13.489805
2019-05-31T13:42:53
2019-05-31T13:42:53
185,244,262
0
0
null
null
null
null
UTF-8
R
false
true
502
rd
bounds.r.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_functions.R \name{bounds.r} \alias{bounds.r} \title{Obtain the range of sample correlations for a given population correlation and sample size} \usage{ bounds.r(samples, level = 0.95) } \arguments{ \item{level}{Confidence level (0 to 1), default .95} \item{pop_r}{Population correlation} \item{n}{Sample size} } \description{ Obtain the range of sample correlations for a given population correlation and sample size }
9c487573fd095a48385de6916778a10f17217f1d
d4cd3909b5c5ff996e405a9dbcdb830a9f18599f
/networkSetosa.R
f9dc6658df014eabde11a52085151d81fc0697e0
[]
no_license
jevzimite/Projects.compiled
6bb39df27ed44871c240fea4408967248f76293d
df1fdcaa12bf8d339a2ca782e28c425a44c12409
refs/heads/main
2023-05-01T13:30:24.549058
2021-05-25T22:11:07
2021-05-25T22:11:07
332,641,146
0
0
null
null
null
null
UTF-8
R
false
false
1,015
r
networkSetosa.R
# spc = readline(prompt = "What species would you like to build a network for?") # spc = as.character(spc) spc = "setosa" df = iris df = iris %>% filter(Species == spc) df$Species = NULL round(cor(df),2) cmb = combn(colnames(df), 2) from = cmb[1,] to = cmb[2,] netdf = data.frame(from, to) weights = c(.74, .27, .28, .18, .23, .33) netdf = cbind(netdf, weights) routes_igraph <- graph_from_data_frame(d = netdf, vertices = colnames(df), directed = FALSE) plot(routes_igraph) routes_igraph_tidy <- as_tbl_graph(routes_igraph) ggraph(routes_igraph_tidy, layout = "linear" ) + # geom_edge_arc(aes(colour = 2))+ geom_node_point(size = 33, color = 2) + geom_edge_arc(aes(width = netdf$weights), alpha = 0.75, linetype = 7, color = 2) + scale_edge_width(range = c(.1, 5)) + geom_node_text(aes(label = colnames(df)), repel = 0, color = "black", fontface = 2) + labs(edge_width = "Correlation") + ylim(c(-.1,1.2))+xlim(c(-.2,5))+ theme_graph()
71d4397a345b9eb90a85e99313a536837a64623d
8edf0521ebc0ca53ec618d6d220c47c851caaa71
/man/SSplotMovementRates.Rd
edc834dc053cc8ed922b19fc79f872d12018c1b6
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
amart/r4ss
9b730038ee4c4b6d38aaabe81b6ad9fddf0eb4f3
fbccbace9a70e846401d32577aeab9f25cb31ba5
refs/heads/master
2021-01-17T06:03:03.172272
2020-10-04T01:38:14
2020-10-04T01:38:14
24,735,775
1
1
null
null
null
null
UTF-8
R
false
true
2,005
rd
SSplotMovementRates.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SSplotMovementRates.R \name{SSplotMovementRates} \alias{SSplotMovementRates} \title{Plot movement rates from model output} \usage{ SSplotMovementRates( replist, plot = TRUE, print = FALSE, subplots = 1:2, plotdir = "default", colvec = "default", ylim = "default", legend = TRUE, legendloc = "topleft", moveseas = "all", min.move.age = 0.5, pwidth = 6.5, pheight = 5, punits = "in", res = 300, ptsize = 10, cex.main = 1, verbose = TRUE ) } \arguments{ \item{replist}{list created by \code{\link{SS_output}}} \item{plot}{plot to active plot device?} \item{print}{print to PNG files?} \item{subplots}{which subplots to create} \item{plotdir}{where to put the plots (uses model directory by default)} \item{colvec}{vector of colors for each movement rate in the plot} \item{ylim}{optional input for y range of the plot. By default plot ranges from 0 to 10\% above highest movement rate (not including fish staying in an area).} \item{legend}{add a legend designating which color goes with which pair of areas?} \item{legendloc}{location passed to legend function (if used)} \item{moveseas}{choice of season for which movement rates are shown} \item{min.move.age}{Minimum age of movement (in future will come from Report file)} \item{pwidth}{width of plot} \item{pheight}{height of plot} \item{punits}{units for PNG file} \item{res}{resolution for PNG file} \item{ptsize}{point size for PNG file} \item{cex.main}{Character expansion parameter for plot titles} \item{verbose}{Print information on function progress.} } \description{ Plots estimated movement rates in final year for each area/season with movement as reported in Report.sso. If movement is time-varying, an additional figure shows pattern across years. } \examples{ \dontrun{ SSplotMovementRates(myreplist) } } \seealso{ \code{\link{SS_output}}, \code{\link{SSplotMovementRates}}, } \author{ Ian Taylor }
c5f410d52fc5bf285cc1b3bcb1bbf2f4c3e89ca9
2b81de418ef2616e2f8be9dfa8be9ff785d28431
/other_analyses_not_in_paper/round_2/round_2_scoring.R
641f90211d3af800a5137bed19ac5ad6c2208108
[ "Apache-2.0" ]
permissive
Sage-Bionetworks/IDG-DREAM-Challenge-Analysis
006538ac946e0fc13b03c9c48be111b03efdf2c8
19710fcd1714c938312048f78b768ebbe91f6cd9
refs/heads/master
2023-03-28T02:11:24.834047
2021-03-31T00:14:33
2021-03-31T00:14:33
157,908,491
2
1
null
null
null
null
UTF-8
R
false
false
6,513
r
round_2_scoring.R
source('R/bootstrap.R') library(reticulate) library(tidyverse) library(doMC) doMC::registerDoMC(cores = detectCores()) use_python("/usr/local/bin/python2") synapse <- import("synapseclient") syn <- synapse$Synapse() synutils <- synapse$utils syn$login() source_python('https://raw.githubusercontent.com/Sage-Bionetworks/IDG-DREAM-Challenge-Analysis/master/round_1b/evaluation_metrics_python2.py?token=AE3WNSGB6AYUBWIOM75LP4S4ZB2WK') spearman_py <- function(gold, pred){ gold_py <- gold %>% np_array() pred_py <- pred %>% np_array() spearman(gold_py, pred_py) } rmse_py <- function(gold, pred){ gold_py <- gold %>% np_array() pred_py <- pred %>% np_array() rmse(gold_py, pred_py) } fv <- syn$tableQuery("select id, submissionId AS objectId, teamId, userId from syn18513076")$filepath %>% read_csv() leaderboard <- read_csv(syn$get("syn18520916")$path) %>% full_join(fv) get_path <- function(id){ syn$get(id)$path } gold <- read_csv(syn$get("syn18421225")$path) disqualified_submissions <- c("9686285") ####SPEARMAN ANALYSIS best_spearman_by_team <- leaderboard %>% group_by(submitterId) %>% top_n(1, spearman) %>% ungroup() %>% arrange(-spearman) %>% filter(!objectId %in% disqualified_submissions) paths <- map_chr(best_spearman_by_team$id, get_path) best_path <- paths[1] other_paths <- paths[-1] results_spearman <- finalScoring(predictions = other_paths, predictionColname = 'pKd_[M]_pred', predictionIds = best_spearman_by_team$objectId[-1], goldStandard = gold, goldStandardColname = 'pKd_[M]', bestPrediction = best_path, bestPredictionId = best_spearman_by_team$objectId[1], keyColumns = colnames(gold)[1:6], doParallel = TRUE, scoreFun = spearman_py) tidy_bayes <- tibble('objectId' = colnames(results_spearman$bootstrappedScores), 'bayes' = results_spearman$bayes) # tidy_bayes$bayes[tidy_bayes$objectId == 9686285] <- 0 tidy_res <- results_spearman$bootstrappedScores %>% as.data.frame %>% tidyr::gather(objectId, bootstrappedScore) %>% left_join(tidy_bayes) %>% left_join(leaderboard %>% mutate(objectId = as.character(objectId))) ggplot(data = tidy_res) + geom_boxplot(aes(x = reorder(objectId, bootstrappedScore, fun = median), y = bootstrappedScore, color = cut(bayes, c(-Inf, 3, 5,Inf))), outlier.shape = NA)+ coord_flip() + scale_color_manual(values = c("#30C31E","#FFCA3E","#FF3E3E"), name = "Bayes Factor", labels = c("<3","3-5",">5")) + labs(x = "Submission", y = "Bootstrapped Spearman") + theme_minimal() + theme(axis.text.y = element_text(size = 10)) ggplot(data = tidy_res %>% filter(bayes <20)) + geom_boxplot(aes(x = reorder(objectId, bootstrappedScore, fun = median), y = bootstrappedScore, color = cut(bayes, c(-Inf, 3, 5,Inf))), outlier.shape = NA)+ coord_flip() + geom_label(data = tidy_res %>% select(objectId, bayes, average_auc) %>% distinct %>% filter(bayes <5), aes(x = objectId, label = paste0("AUC: ", round(average_auc, 3))), y = 0.35) + scale_color_manual(values = c("#30C31E","#FFCA3E","#FF3E3E"), name = "Bayes Factor", labels = c("<3","3-5",">5")) + labs(x = "Submission", y = "Bootstrapped Spearman") + theme_minimal() + theme(axis.text.y = element_text(size = 10)) ####RMSE ANALYSIS best_rmse_by_team <- leaderboard %>% group_by(submitterId) %>% top_n(1, -rmse) %>% ungroup() %>% arrange(rmse) paths <- map_chr(best_rmse_by_team$id, get_path) best_path <- paths[1] other_paths <- paths[-1] results_rmse <- finalScoring(predictions = other_paths, predictionColname = 'pKd_[M]_pred', predictionIds = best_rmse_by_team$objectId[-1], goldStandard = gold, goldStandardColname = 'pKd_[M]', bestPrediction = best_path, bestPredictionId = best_rmse_by_team$objectId[1], keyColumns = colnames(gold)[1:6], doParallel = TRUE, scoreFun = rmse_py, largerIsBetter = F) tidy_bayes <- tibble('objectId' = colnames(results_rmse$bootstrappedScores), 'bayes' = results_rmse$bayes) tidy_res_rmse <- results_rmse$bootstrappedScores %>% as.data.frame %>% tidyr::gather(objectId, bootstrappedScore) %>% left_join(tidy_bayes) %>% left_join(leaderboard %>% mutate(objectId = as.character(objectId))) ggplot(data = tidy_res_rmse) + geom_boxplot(aes(x = reorder(objectId, -bootstrappedScore, fun = median), y = log10(bootstrappedScore), color = cut(bayes, c(-Inf, 3, 5, 20, Inf))), outlier.shape = NA)+ coord_flip() + scale_color_manual(values = c("#30C31E","#FFCA3E","#FF3E3E",'#000000'), name = "Bayes Factor", labels = c("<3","3-5",">5")) + labs(x = "Submission", y = "log10(Bootstrapped RMSE)") + theme_minimal() + theme(axis.text.y = element_text(size = 10)) ggplot(data = tidy_res_rmse %>% filter(bayes <20)) + geom_boxplot(aes(x = reorder(objectId, -bootstrappedScore, fun = median), y = log10(bootstrappedScore), color = cut(bayes, c(-Inf, 3, 5, 20, Inf))), outlier.shape = NA)+ coord_flip() + geom_label(data = tidy_res_rmse %>% select(objectId, bayes, average_auc) %>% distinct %>% filter(bayes <5), aes(x = objectId, label = paste0("AUC: ", round(average_auc, 3))), y = 0.0050) + scale_color_manual(values = c("#30C31E","#FFCA3E","#FF3E3E",'#000000'), name = "Bayes Factor", labels = c("<3","3-5",">5")) + labs(x = "Submission", y = "log10(Bootstrapped RMSE)") + theme_minimal() + theme(axis.text.y = element_text(size = 10)) ###comparison of rounds r1b <- syn$tableQuery('select * from syn18487972')$asDataFrame() r1 <- syn$tableQuery('SELECT * FROM syn17054253')$asDataFrame() length(unique(leaderboard$submitterId[leaderboard$submitterId %in% c(r1b$submitterId)])) length(unique(leaderboard$submitterId[!leaderboard$submitterId %in% c(r1b$submitterId)])) length(unique(leaderboard$submitterId[leaderboard$userId %in% c(r1$userId)])) length(unique(leaderboard$submitterId[!leaderboard$userId %in% c(r1$userId)]))
3818cbe7a82281444a613070d933f49a8456e9be
50e4e1244a771566be1a3cb3c20c735338a33e13
/run-cdn-ccc-experiments.R
e2a887879b10593c86230c1c40a2b7a9dd1eb275
[]
no_license
llorenc/cdn-ccc
023a075afa522d81ee22c1466684fbed923caae5
ab0b6110250d45bfab8ce093cb917120b674c69d
refs/heads/master
2021-01-01T05:27:43.463093
2016-05-23T18:05:23
2016-05-23T18:05:23
57,230,255
0
0
null
null
null
null
UTF-8
R
false
false
4,991
r
run-cdn-ccc-experiments.R
################################################################################# ## Simulation of CDN CCC. ## (c) Llorenç Cerdà, 2016 ################################################################################# source("r6-cdn-ccc.R") source("run-simulation.R") library('ggplot2') ## library('utils') # Rprof() ## Rprof() ## ## Simulation parameters ## debug.events <- F # TRUE number.of.proxies <- 10 simu.time <- 300 # seconds ## Proxies configuration bw1 <- 10e6/8 # proxies with lower Bw bw2 <- 20e6/8 # proxies with medium Bw bw3 <- 100e6/8 # proxies with higher Bw bw1.num <- round(number.of.proxies/3) bw2.num <- round(number.of.proxies/3) bw3.num <- number.of.proxies-2*round(number.of.proxies/3) proxy.access.bw <- # Bw in Bps (Bytes per second) of each proxy c(rep(bw1, len=bw1.num), rep(bw2, len=bw2.num), rep(bw3, len=bw3.num)) ##fname <- paste0(format(Sys.time(), "%Y-%m-%d_%H-%M-%S"), ".trace") sla.bw <- 1 # in Mbps alpha <- 1 ## ## testing ## debug.events <- F simu.time <- 20 # seconds zrr <- run.simulation( proxy.access.bw=proxy.access.bw, sla.bw=sla.bw/alpha, # SLA Bw in Mbps cdn.load=0.5, background.load=0, #0.05, Rweight=1e10, # gamma in equation (3) zrr.update.time=1, owd.mean=0, # owd.mean=20, owd.coef.var=0.1, choose.proxy.type='roundrobin', max.queue.len=300, seed=0 # simulation seed ) zrr$Qreq zrr$Rqueue zrr$Penalty ## print the queue of one proxy zrr$proxies[[1]]$print.queues() zrr$proxies[[2]]$print.queues() zrr$proxies[[3]]$print.queues() zrr$proxies[[4]]$print.queues() zrr$proxies[[6]]$print.queues() ## ## run simulations varying weights (w) and update interval (ut) ## simu.res <- list() for(ut in c(0.1, 0.5, 1)) { for(w in c(1, 10, 100)) { ## for(ut in c(1)) { ## for(w in c(10, 100)) { fname <- paste0(format(Sys.time(), "%Y-%m-%d-t"), ut, "w", w, "s", simu.time, ".trace") message(fname) ## zrr <- run.simulation( fname=fname, sla.bw=sla.bw*1.5, # SLA Bw in Bps (Bytes per second) cdn.load=0.5, background.load=0.01, Rweight=w*1000*sla.bw, # alpha in equation (3) Pweight=w*10*sla.bw, # Penalty weight zrr.update.time=ut, seed=0 # simulation seed ) simu.res <- c(simu.res, list(list(zrr=zrr, ut=ut, w=w))) } } ## ## check one trace ## fname <- '2016-04-25-t0.1w100s600.trace' ## trace.ta <- read.table(header=T, gzfile(paste0(fname, '.gz'))) trace.ta <- cbind(trace.ta, Bw=NA) trace.ta$Bw[trace.ta$proxy %in% 1:bw1.num] <- as.character(bw1*8e-6) trace.ta$Bw[trace.ta$proxy %in% (bw1.num+1):(bw1.num+bw2.num)] <- as.character(bw2*8e-6) trace.ta$Bw[trace.ta$proxy %in% (bw1.num+bw2.num+1):number.of.proxies] <- as.character(bw3*8e-6) ggplot(data=trace.ta, aes(QLen, color=Bw)) + stat_ecdf() + xlab("Queue Length") + ylab("ECDF") ## trace.ta.cdn$proxy <- as.factor(trace.ta.cdn$proxy) trace.ta.cdn <- trace.ta[trace.ta$type=='cdn',] ggplot(data=trace.ta.cdn, aes(throughput*8e-6, color=Bw)) + stat_ecdf() + scale_x_log10() + geom_vline(xintercept = sla.bw*8e-6) + annotation_logticks(sides="b") + xlab("throughput [Mbps] (log scale)") + ylab("ECDF") ## print Q and R queues of the ZRR zrr$Qreq zrr$Rqueue zrr$Penalty ## print the queue of one proxy zrr$proxies[[1]]$print.queues() zrr$proxies[[2]]$print.queues() zrr$proxies[[3]]$print.queues() zrr$proxies[[4]]$print.queues() zrr$proxies[[6]]$print.queues() ## ## facet_grid of all traces ## trace.ta.all <- data.frame() for(ut in c(0.1, 0.5, 1)) { for(w in c(1, 10, 100)) { ## for(ut in c(1)) { ## for(w in c(10, 100)) { fname <- paste0("2016-04-25-t", ut, "w", w, "s", simu.time, ".trace") trace.ta.all <- rbind(trace.ta.all, cbind(read.table(header=T, gzfile(paste0(fname, '.gz'))), ut=ut, w=w, Link=NA)) } } trace.ta.all$Link[trace.ta.all$proxy %in% 1:bw1.num] <- as.character(bw1*8e-6) trace.ta.all$Link[trace.ta.all$proxy %in% (bw1.num+1):(bw1.num+bw2.num)] <- as.character(bw2*8e-6) trace.ta.all$Link[trace.ta.all$proxy %in% (bw1.num+bw2.num+1):number.of.proxies] <- as.character(bw3*8e-6) trace.ta.all.cdn <- trace.ta.all[trace.ta.all$type=='cdn',] ggplot(data=trace.ta.all.cdn, aes(throughput*8e-6, color=Link)) + facet_grid(ut ~ w) + stat_ecdf() + scale_x_log10() + geom_vline(xintercept = sla.bw*8e-6) + annotation_logticks(sides="b") + xlab("throughput [Mbps] (log scale)") + ylab("ECDF") ggsave(file="throughput-ecdf.pdf") ggplot(data=trace.ta.all, aes(QLen, color=Link)) + facet_grid(ut ~ w) + stat_ecdf() + xlab("Queue Length") + ylab("ECDF") ggsave(file="queue-length-ecdf.pdf") ## summaryRprof(filename = "Rprof.out")
73b738db3cca33ee53fdffffacafb55766befb20
7244d6ecc30aeda541713f7310a28382645a5999
/R/ag.ds.histogram.R
6fbc35109cab9babdf16a6c8c16ab6dd5a59b950
[]
no_license
datashield/ag.dev.cl
52cab8e410c1b67a98b55222ad7edf9cb7a53423
717a75ad29bac2ad3848770a76e5390186b0fbdd
refs/heads/master
2021-01-19T08:24:05.981000
2013-09-01T01:05:59
2013-09-01T01:05:59
null
0
0
null
null
null
null
UTF-8
R
false
false
5,969
r
ag.ds.histogram.R
#' #' @title Plots a histogram #' @description This function plots histogram of the given data values. #' It calls a datashield server side function that produces the #' histogram objects to plot. The objects to plot do not contain bins with #' counts < 5. The function allows for the user to plot disctinct histograms #' (one for each study) or a combine histogram that merges the single plots. #' @param opals a list of opal object(s) obtained after login in to opal servers; #' these objects hold also the data assign to R, as \code{dataframe}, from opal #' datasources. #' @param xvect vector of values for which the histogram is desired. #' @param type a character which represent the type of graph to display. #' If \code{type} is set to 'combine', a histogram that merges the single #' plot is displayed. Each histogram is plotted separately if If \code{type} #' is set to 'split'. #' @return one or more histogram plot depending on the argument \code{type} #' @author Gaye, A. #' @export #' @examples { #' #' # load that contains the login details #' data(logindata) #' #' # login and assign specific variable(s) #' myvar <- list("LAB_TSC") #' opals <- ag.ds.login(logins=logindata,assign=TRUE,variables=myvar) #' #' # Example 1: plot a combined histogram of the variable 'LAB_TSC' - default behaviour #' ag.ds.histogram(opals=opals, xvect=quote(D$LAB_TSC)) #' #' # Example 2: Plot the histograms separately (one per study) #' ag.ds.histogram(opals=opals, xvect=quote(D$LAB_TSC), type="split") #' #' # Example 3: Plot the histograms of the first and second study #' ag.ds.histogram(opals=opals[1:2], xvect=quote(D$LAB_TSC), type="split") #' #' # Example 4: Plot the histogram of the third study only #' ag.ds.histogram(opals=opals[3], xvect=quote(D$LAB_TSC), type="split") #' } #' ag.ds.histogram <- function(opals=opals, xvect=NULL, type="combine"){ if(is.null(opals)){ cat("\n\n ALERT!\n") cat(" No valid opal object(s) provided.\n") cat(" Make sure you are logged in to valid opal server(s).\n") stop(" End of process!\n\n", call.=FALSE) } if(is.null(xvect)){ cat("\n\n ALERT!\n") cat(" Please provide a valid numeric vector\n") stop(" End of process!\n\n", call.=FALSE) } # get the name of the variable used for the histogram variable <- strsplit(deparse(xvect), "\\$", perl=TRUE)[[1]][2] # call the function that checks the variables are available and not empty vars2check <- list(xvect) opals <- ag.ds.checkvar(opals, vars2check) # get the range from each studyand produce the 'global' range cally1 <- call("ag.range.ds", xvect) ranges <- datashield.aggregate(opals, cally1) minrs <- c() maxrs <- c() for(i in 1:length(ranges)){ minrs <- append(minrs, ranges[[i]][1]) maxrs <- append(maxrs, ranges[[i]][2]) } range.arg <- c(min(minrs), max(maxrs)) # call the function that produces the histogram object to plot cally2 <- call("ag.histogram.ds", xvect, range.arg[1], range.arg[2]) hist.objs <- vector("list", length(opals)) asterix2plot <- vector("list", length(opals)) for(i in 1: length(opals)){ output <- datashield.aggregate(opals[i], cally2) hist.objs[[i]] <- output[[1]]$histobject asterix2plot[[i]] <- output[[1]]$aterix2plot } # combine the histogram objects # 'breaks' and 'mids' are the same for all studies global.counts <- rep(0, length(hist.objs[[i]]$counts)) global.density <- rep(0, length(hist.objs[[i]]$density)) global.intensities <- rep(0, length(hist.objs[[i]]$intensities)) for(i in 1:length(opals)){ global.counts <- global.counts + hist.objs[[i]]$counts global.density <- global.density + hist.objs[[i]]$density global.intensities <- global.intensities + hist.objs[[i]]$intensities } global.density <- global.density/3 global.intensities <- global.intensities/3 # generate the combined histogram object to plot combined.histobject <- hist.objs[[1]] combined.histobject$counts <- global.counts combined.histobject$density <- global.density combined.histobject$intensities <- global.intensities # plot the individual histograms on the same graph # if the argument 'type'="combine" plot a combined histogram and if 'type'="split" plot single histograms separately if(type=="combine"){ colour <- "red" par(mfrow=c(1,1)) plot(combined.histobject,col=colour, xlab=variable, main='Histogram of the pooled data') }else{ if(type=="split"){ # set the graph area and plot ll <- length(opals) colour <- rainbow(ll) if(ll > 1){ if((ll %% 2) == 0){ numr <- ll/2 }else{ numr <- (ll+1)/2} numc <- 2 par(mfrow=c(numr,numc)) for(i in 1:ll){ plot(hist.objs[[i]], col=colour[i], xlab=variable, main=paste("Histogram of ", names(opals)[i], sep="")) # if there are cells with count > 0 and < mention them as an '*' on the graph if(length(asterix2plot[[i]]) > 0){ text(asterix2plot[[i]], rep(10, length(asterix2plot[[i]])), "*", pos=3, cex=1.2) xpos <- min(hist.objs[[i]]$breaks, na.rm=TRUE) ypos <- max(hist.objs[[i]]$counts, na.rm=TRUE) text(xpos, ypos, "'*' Cells were 0 < count < 5", pos=3, cex=1.2) } } }else{ par(mfrow=c(1,1)) plot(hist.objs[[1]], col=colour[1], xlab=variable, main=paste("Histogram of ", names(opals)[1], sep="")) # if there are cells with count > 0 and < mention them as an '*' on the graph if(length(asterix2plot[[1]]) > 0){ text(asterix2plot[[1]], rep(10, length(asterix2plot[[1]])), "*", pos=3, cex=1.2) xpos <- min(hist.objs[[1]]$breaks, na.rm=TRUE) ypos <- max(hist.objs[[1]]$counts, na.rm=TRUE) text(xpos, ypos, "'*' Cells were 0 < count < 5", pos=3, cex=1.2) } } }else{ stop('Function argument "type" has to be either "combine" or "split"') } } }
a09c71e864db6acd6bd9c042293e0e7f402eac66
cad6f58627cdf4ccbb1363ae81bde60e4aeb65fb
/man/plot.param.Rd
6fc67c7af598bbaa61909e5eaa05eeb789655386
[]
no_license
WangTJ/glidars
d011a7f94778e6d62f6243e963d702a3b0857878
bcee3000b1afdcff501ea218f7d72dcca829bc1e
refs/heads/master
2021-08-06T20:43:15.353332
2021-01-19T01:37:49
2021-01-19T01:37:49
239,854,611
2
0
null
null
null
null
UTF-8
R
false
true
654
rd
plot.param.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/func_visual.R \name{plot.param} \alias{plot.param} \title{Plot function for class "param"} \usage{ \method{plot}{param}(beta, lambda.index = NULL) } \arguments{ \item{beta}{beta is of class "param"} \item{lambda.index}{when the param beta contains multiple values of the tuning parameter, the user need to specify the index of the tuning parameter to be plotted} } \description{ This function plot the coefficients as an object "param" (see \code{\link{as.param}} for details) on a 2-way T/N table, using the darkness of the color to represent the values of coefficients. }
43401dea30849edf827a45a67b7aac7f00ae3331
fb37979cd9880d94815d60a76caeb2d51b90585d
/R/describe.R
0f8af8302a65145ac2817c6876fd0880441c6763
[]
no_license
cran/onewaytests
6ffcb87235f3b6557c96544a2f5c127d6eacf404
00eb041971b92468ff56a01dd84796832e5bb46b
refs/heads/master
2023-01-29T09:10:25.715174
2023-01-18T14:00:02
2023-01-18T14:00:02
41,308,667
0
0
null
null
null
null
UTF-8
R
false
false
1,923
r
describe.R
describe <- function(formula, data){ data <- model.frame(formula, data) dp <- as.character(formula) DNAME <- paste(dp[[2L]], "and", dp[[3L]]) if (any(colnames(data)==dp[[3L]])==FALSE) stop("The name of group variable does not match the variable names in the data. The group variable must be one factor.") if (any(colnames(data)==dp[[2L]])==FALSE) stop("The name of response variable does not match the variable names in the data.") y = data[[dp[[2L]]]] group = data[[dp[[3L]]]] if (!(is.factor(group)|is.character(group))) stop("The group variable must be a factor or a character.") if (is.character(group)) group <- as.factor(group) if (!is.numeric(y)) stop("The response must be a numeric variable.") if (length(y)!=length(group)) stop("Lengths of y and group variables are not equal.") x.levels <- levels(factor(group)) y.NAs <- NULL for (i in x.levels) { y.NAs[i] <- length(y[group == i][is.na(y[group == i])]) } completeObs <- complete.cases(y) y <- y[completeObs] group <- group[completeObs] y.skew <- y.kurtosis <-y.means <- y.n <- y.medians <- y.firstqs <- y.thirdqs <- y.mins <- y.maxs <- y.sds <- NULL for (i in x.levels) { y.means[i] <- mean(y[group == i]) y.n[i] <- length(y[group == i]) y.medians[i] <- median(y[group == i]) y.firstqs[i] <- as.numeric(quantile(y[group == i])[2]) y.thirdqs[i] <- as.numeric(quantile(y[group == i])[4]) y.mins[i] <- min(y[group == i]) y.maxs[i] <- max(y[group == i]) y.sds[i] <- sd(y[group == i]) y.skew[i] <- skewness(y[group == i]) y.kurtosis[i] <- kurtosis(y[group == i]) } out=as.data.frame(cbind(y.n,y.means,y.sds,y.medians,y.mins,y.maxs,y.firstqs,y.thirdqs,y.skew,y.kurtosis,y.NAs)) colnames(out) = c("n", "Mean", "Std.Dev", "Median", "Min", "Max", "25th","75th","Skewness","Kurtosis","NA") return(out) }
6c213353bf3d5bcae9d28dc5aa5e5d7e8a9cce7a
31d82d53c384486bd569f7f11f37308eba9f7774
/tests/testthat/testCreate.R
0505ecc6c759261646c08617a5615695059de3a5
[]
no_license
mirsinarm/big.data.frame
228fc762990e1d966e9f90074f319530fc3e5291
2b71c2e3efb80ea8d8fe29ea16349c26af734e6b
refs/heads/master
2020-05-30T11:02:46.212306
2014-12-15T15:37:08
2014-12-15T15:37:08
null
0
0
null
null
null
null
UTF-8
R
false
false
6,165
r
testCreate.R
context("Creation and characteristics") test_that("Creating and characteristics 1", { x <- big.data.frame(10, c("double", "character"), names=c("first", "second"), init=list(-1.23, "A"), maxchar=c(NA, 10)) y <- data.frame(first=rep(-1.23, 10), second=rep("A", 10), stringsAsFactors=FALSE) expect_that(x[], equals(y)) expect_that(x[,1:2], equals(y[,1:2])) expect_that(x[,1], equals(y[,1])) expect_that(x$first, equals(y$first)) expect_that(x[,1,drop=FALSE], equals(y[,1,drop=FALSE])) expect_that(x[,2], equals(y[,2])) expect_that(x$second, equals(y$second)) expect_that(x[,2,drop=FALSE], equals(y[,2,drop=FALSE])) expect_that(nrow(x), equals(nrow(y))) expect_that(ncol(x), equals(ncol(y))) expect_that(length(x), equals(length(y))) expect_that(dim(x), equals(dim(y))) }) test_that("Creating and characteristics 2", { z <- big.data.frame(10, c("double", "character"), names=c("first", "second"), init=list(-1.23, "A"), maxchar=c(NA, 10), location="testdir") y <- data.frame(first=rep(-1.23, 10), second=rep("A", 10), stringsAsFactors=FALSE) x <- attach.big.data.frame("testdir") expect_that(x[], equals(y)) expect_that(x[,1:2], equals(y[,1:2])) expect_that(x[,1], equals(y[,1])) expect_that(x$first, equals(y$first)) expect_that(x[,1,drop=FALSE], equals(y[,1,drop=FALSE])) expect_that(x[,2], equals(y[,2])) expect_that(x$second, equals(y$second)) expect_that(x[,2,drop=FALSE], equals(y[,2,drop=FALSE])) expect_that(nrow(x), equals(nrow(y))) expect_that(ncol(x), equals(ncol(y))) expect_that(length(x), equals(length(y))) expect_that(dim(x), equals(dim(y))) expect_that(dim(x), equals(dim(y))) }) test_that("Creating and characteristics 3", { x <- big.data.frame(10, c("double", "character"), names=c("first", "second"), init=list(-1.23, "A"), maxchar=c(NA, 10)) y <- data.frame(first=rep(-1.23, 10), second=rep("A", 10), stringsAsFactors=FALSE) expect_that(x[], equals(y)) expect_that(x[,1:2], equals(y[,1:2])) expect_that(x$first, equals(y$first)) expect_that(x[,1,drop=FALSE], equals(y[,1,drop=FALSE])) expect_that(x$second, equals(y$second)) expect_that(x[,2,drop=FALSE], equals(y[,2,drop=FALSE])) expect_that(nrow(x), equals(nrow(y))) expect_that(ncol(x), equals(ncol(y))) expect_that(length(x), equals(length(y))) expect_that(dim(x), equals(dim(y))) }) test_that("Extractions 1", { x <- big.data.frame(10, c("double", "character"), names=c("first", "second"), init=list(-1.23, "A"), maxchar=c(NA, 10)) y <- data.frame(first=rep(-1.23, 10), second=rep("A", 10), stringsAsFactors=FALSE) expect_that(x[], equals(y)) expect_that(x[1,], equals(y[1,])) # this fails ... no idea why ... print, class, typeof all look identical #expect_that(x[1,], equals(y[1,])) # not working; need to fix expect_that(x[1:2,], equals(y[1:2,])) expect_that(x[-c(1:2),], equals(y[-c(1:2),])) # removes the last two rows instead of the first two rows expect_that(x[1,1], equals(y[1,1])) expect_that(x[1:2,1], equals(y[1:2,1])) expect_that(x[-c(1:2),1], equals(y[-c(1:2),1])) expect_that(x[1,2], equals(y[1,2])) expect_that(x[1:2,2], equals(y[1:2,2])) expect_that(x[-c(1:2),2], equals(y[-c(1:2),2])) }) ########################### # big.read.table tests # Make the data frame wh <- data.frame(rep(c(1L:26L), 20), rep(letters, 20), rep(rnorm(26), 20)) names(wh) <- c("int", "factor", "num") write.csv(wh, "withheader.csv", row.names=F) write.table(wh, "withoutheader.csv", sep=",", row.names=FALSE, col.names=FALSE) # Reading CSV test_that("CSV with header, reading in in 1 chunk", { x <- big.read.table(file="withheader.csv", header=TRUE) y <- read.csv("withheader.csv", header=TRUE, as.is=TRUE) expect_that(x[], equals(y)) # check everything expect_that(x[,1:2], equals(y[,1:2])) # check columns expect_that(nrow(x), equals(nrow(y))) # check nrow: count number of rows expect_that(ncol(x), equals(ncol(y))) # check ncol: count number of cols expect_that(length(x), equals(length(y))) # check length expect_that(dim(x), equals(dim(y))) # check dim }) test_that("CSV without header, reading in in 1 chunk", { x <- big.read.table(file="withoutheader.csv", header=FALSE) y <- read.csv("withoutheader.csv", header=FALSE, as.is=TRUE) expect_that(x[], equals(y)) # check everything expect_that(x[,1:2], equals(y[,1:2])) # check columns expect_that(nrow(x), equals(nrow(y))) # check nrow: count number of rows expect_that(ncol(x), equals(ncol(y))) # check ncol: count number of cols expect_that(length(x), equals(length(y))) # check length expect_that(dim(x), equals(dim(y))) # check dim }) test_that("CSV with header, reading in in multiple chunks", { # chunks are in size of 100 rows x <- big.read.table(file="withheader.csv", header=TRUE, nrows=100) y <- read.csv("withheader.csv", header=TRUE, as.is=TRUE) expect_that(x[], equals(y)) # check everything expect_that(x[,1:2], equals(y[,1:2])) # check columns expect_that(nrow(x), equals(nrow(y))) # check nrow: count number of rows expect_that(ncol(x), equals(ncol(y))) # check ncol: count number of cols expect_that(length(x), equals(length(y))) # check length expect_that(dim(x), equals(dim(y))) # check dim }) test_that("CSV without header, reading in in multiple chunks", { # chunks are in size of 100 rows x <- big.read.table(file="withoutheader.csv", header=FALSE, nrows=100) y <- read.csv("withoutheader.csv", header=FALSE, as.is=TRUE) expect_that(x[], equals(y)) # check everything expect_that(x[,1:2], equals(y[,1:2])) # check columns expect_that(nrow(x), equals(nrow(y))) # check nrow: count number of rows expect_that(ncol(x), equals(ncol(y))) # check ncol: count number of cols expect_that(length(x), equals(length(y))) # check length expect_that(dim(x), equals(dim(y))) # check dim })
ba6d1f6d6ccc20054598ab24406d9d04d92fba8c
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.security.identity/man/iam_delete_ssh_public_key.Rd
3e5039fd98a2dbc2b5fddf2da6ddcf7e80a02c62
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
1,053
rd
iam_delete_ssh_public_key.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iam_operations.R \name{iam_delete_ssh_public_key} \alias{iam_delete_ssh_public_key} \title{Deletes the specified SSH public key} \usage{ iam_delete_ssh_public_key(UserName, SSHPublicKeyId) } \arguments{ \item{UserName}{[required] The name of the IAM user associated with the SSH public key. This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-} \item{SSHPublicKeyId}{[required] The unique identifier for the SSH public key. This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters that can consist of any upper or lowercased letter or digit.} } \description{ Deletes the specified SSH public key. See \url{https://www.paws-r-sdk.com/docs/iam_delete_ssh_public_key/} for full documentation. } \keyword{internal}
e2f3a6577be120f0a2f8c090a6baa63e278d11b8
96ec24aba788f6cb1ce17c619809715515db7788
/network/run_genLevel_prep.R
5c5d02511f334461d0f473690a4e0737c39cac0d
[]
no_license
hawaiiDimensions/geb_paper
96fdca932be53a6daeac6a34c9cf0cb5ac31222a
031ba3d23e7db7bf0f874d1b7cd852005f34b0a8
refs/heads/master
2021-01-10T11:36:05.975874
2015-12-04T17:11:49
2015-12-04T17:11:49
47,417,181
0
0
null
null
null
null
UTF-8
R
false
false
806
r
run_genLevel_prep.R
library(bipartite) library(vegan) library(igraph) source('netFuns.R') ## get site info and specify which sites to analyze site.info <- read.csv('data/siteInfo.csv', header=TRUE, as.is=TRUE) good.sites <- c('Kil', 'Koh', 'Hal', 'KoK') ## load data bimat <- lapply(good.sites, function(i) { as.matrix(read.csv(sprintf('data/%s_unconmat.csv', i), row.names=1, header=TRUE, as.is=TRUE)) }) ## aggregate by genus bimat <- lapply(bimat, function(x) { x <- aggregate(as.data.frame(t(x)), list(gsub('\\..*', '', colnames(x))), sum) rownames(x) <- x[, 1] x <- x[, -1] as.matrix(t(x)) }) ## run parameters nsim <- 999
445e8f5b5049707450735ebd8f19b9a9c7a8a8be
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/tea/examples/avhill.Rd.R
325d44ee6e743ad871d7726ed8d2c0ede7c944b0
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
avhill.Rd.R
library(tea) ### Name: avhill ### Title: Averaged Hill Plot ### Aliases: avhill ### ** Examples data(danish) avhill(danish)
e8506b5a67544707095a1d5998c2ae1d798cc37b
c452a79f3cc26d1d154292706c12d7d174767511
/1.Pre-process_microarry_data/1.ArrayExpress_multiple_tissue_multiple_chip.R
f50025cea5076872606a4ffc2f40645f3e7f3f12
[]
no_license
hamelpatel/brain_expression
af1fea5e3776082221a3cd20779442615342fd3a
cb25bb42e432c44add9b89cf2b5247b1f776c0e3
refs/heads/master
2020-04-06T06:50:20.525551
2017-07-06T08:16:28
2017-07-06T08:16:28
58,650,399
0
0
null
null
null
null
UTF-8
R
false
false
4,132
r
1.ArrayExpress_multiple_tissue_multiple_chip.R
########################################################################################################################################## #### #### ### ### ## ## # DOWNLOAD E-GEOD-3790 # ## ## ### ### #### #### ########################################################################################################################################## # QC PIPELINE VERSION: 1.0 # DATE: 30/01/2017 # ARRAY EXPRESS NUMBER: E-GEOD-3790 # DISORDER: Huntingdon's Disease # MICROARRAY PLATFORM: Affymetrix # EXPRESSION CHIP: HG-U133A + HG-U133B # NUMBER OF SAMPLES: # TISSUE: Cerebellum, Frontal Cortex, Caudate nucleus # # NOTES - # ONLY READING RAW DATA AND CREATEING R OBJECT # e.g given using E-GEOD-3970 dataset # ##### SET PARAMETERS ##### rm(list=ls()) options=(stringAsFactors=FALSE) ##### SET DIRECTORIES #### work_dir="/media/hamel/Workspace/Dropbox/Projects/Brain_expression/1.Data/1.Re-process_with_new_pipeline/NON-AD-NEUROLOGICAL/Huntingdons_Disease/E-GEOD-3790" setwd(work_dir) # create directory for raw data dir.create(paste(work_dir,"Raw_Data", sep="/")) raw_dir=paste(work_dir,"Raw_Data", sep="/") ##### LOAD LIBRARIES #### library(ArrayExpress) library(affy) ##### DOWNLOAD RAW DATA ##### setwd(raw_dir) #data_raw=getAE("E-GEOD-3790", type = "raw") #data_raw=getAE("E-GEOD-3790", type = "processed") data_raw=getAE("E-GEOD-3790", type = "full") ##### CREATE R EXPRESSION OBJECT FROM RAW DATA ##### # METHOD 1 - convert MAGE-TAB files into expresssion set - USING RAW DATA expression_data = ae2bioc(mageFiles = data_raw) expression_data # # METHOD 2 - convert MAGE-TAB files into expresssion set - USING RAW DATA # # expression_data<-ReadAffy() # # METHOD 3 - convert MAGE-TAB files into expresssion set - USING PROCESSED DATA # # cnames=getcolproc(data_raw) # # cnames # # expression_data=procset(data_raw, cnames[2]) # # expression_data ##### SPLIT DATA INTO CHIPS ###### table((pData(expression_data[[1]]))$Factor.Value..DiseaseState.) table((pData(expression_data[[1]]))$Characteristics..OrganismPart.) table((pData(expression_data[[2]]))$Factor.Value..DiseaseState.) table((pData(expression_data[[2]]))$Characteristics..OrganismPart.) #create objects Affy_U133A<-expression_data[[1]] Affy_U133B<-expression_data[[2]] ##### BACKGROUND CORRECT DATA ##### Affy_U133A_bc<-mas5(Affy_U133A) Affy_U133B_bc<-mas5(Affy_U133B) ##### CREATE SUB DIRECTORIES FOR EACH CHIP AN TISSUE AND SAVE ##### # create directory for each chip and write expression table setwd(work_dir) dir.create(paste(work_dir,"Affy_U133A", sep="/")) Affy_U133A_dir=paste(work_dir,"Affy_U133A", sep="/") dir.create(paste(work_dir,"Affy_U133B", sep="/")) Affy_U133B_dir=paste(work_dir,"Affy_U133B", sep="/") setwd(Affy_U133A_dir) save(Affy_U133A_bc, file="E-GEOD-3790_Affy_U133A_bc.Rdata") setwd(Affy_U133B_dir) save(Affy_U133B_bc, file="E-GEOD-3790_Affy_U133B_bc.Rdata") # create directory for cerebellum + frontal lobe in each chip setwd(Affy_U133A_dir) dir.create(paste(Affy_U133A_dir,"Cerebellum", sep="/")) dir.create(paste(Affy_U133A_dir,"Frontal_Lobe", sep="/")) setwd(Affy_U133B_dir) dir.create(paste(Affy_U133B_dir,"Cerebellum", sep="/")) dir.create(paste(Affy_U133B_dir,"Frontal_Lobe", sep="/"))
d2062a890943f00bf5d3a9935180788cf7aa7356
35fac53e593ad39a3c092f84fa0d88fa11f46fa7
/man/add_quotes.Rd
acd91cee09fc1d078ca704110686d4a3dd65aa3b
[]
no_license
gastonstat/cranium
a916a61595e7432385a410e5a90fe21e6a41cae4
1ba7a60fd7b3ef62c2d2f38d66151494f382f837
refs/heads/master
2020-05-24T13:24:13.206253
2015-08-26T00:03:01
2015-08-26T00:03:01
23,559,996
0
0
null
null
null
null
UTF-8
R
false
false
429
rd
add_quotes.Rd
\name{add_quotes} \alias{add_quotes} \title{Add Quotes} \usage{ add_quotes(str) } \arguments{ \item{str}{character string} } \value{ quoted character string } \description{ Adds double quotation marks around a character string } \details{ This function is used to create the JSON file for package dependencies and reverse dependencies } \examples{ \dontrun{ s = 'quotations' add_quotes(s) } } \keyword{internal}
6b0ceb3fda39dd0dad84345f76312833267014e4
91903e21fa8472ae02cc94485ec3c0a52d725b68
/Course Project 1/plot4.R
56b9ec272b4b9b6c04be26959b4926f24c2a734e
[]
no_license
lycansoul/ExData_Plotting1
9ca9d70d73c1928b081c8d3ef220477aefa35a15
1326f2b73bba3daf31d275579a896f3c4aab0583
refs/heads/master
2021-01-18T10:26:31.821832
2014-07-11T14:00:15
2014-07-11T14:00:15
null
0
0
null
null
null
null
UTF-8
R
false
false
1,066
r
plot4.R
#Reading data data <- read.table("./eda/household_power_consumption.txt",header = TRUE,sep = ";",na.strings= "?") datatest <- data data[,1] <- as.character(data[,1]) data[,2] <- as.character(data[,2]) #Cleaning data targetdate <- c("1/2/2007","2/2/2007") ndata <- subset(data, data[,1] %in% targetdate) ndata <- na.omit(ndata) ndata[,10] <- paste(ndata[,1],ndata[,2]) names(ndata)[10] <- "Paste" DT <- strptime(ndata[,10], "%d/%m/%Y %H:%M:%S") Sys.setlocale("LC_TIME","C") #plot4 png(filename = "plot4.png",width = 480, height = 480) par(mfrow = c(2,2)) plot(DT, ndata[,3], xlab="",ylab= "Global Active Power (kilowatts)",type= "l") plot(DT, ndata[,5], xlab="datetime",ylab= "Voltage",type= "l") plot(DT, ndata[,7], xlab="",ylab= "Energy sub metering",type= "l") lines(DT, ndata[,8], col = "red") lines(DT,ndata[,9],col = "blue") legend("topright", col = c("black", "red", "blue"), lty= "solid", bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") ) plot(DT, ndata[,4], xlab="datetime",ylab= "Global_reactive_power",type= "l") dev.off()
aa1d537d644c0b7a8b79a474521fff2558f1623c
339eaa009c2f9f3870c8ef2bdb1bf708e0cfcf09
/dlmSmc/FuncPoisson.R
c14a783ac28a882e308a3ed6f76461aaa2aee9a9
[]
no_license
miltonluaces/data_science_in_R
ac9ea2b78b817ec01c0383afd8ef9ca4e4c84316
872c3ce1ab2497d57febaeb24590b561260f3067
refs/heads/master
2021-06-30T20:30:16.997520
2021-02-27T16:03:41
2021-02-27T16:03:41
226,901,456
0
0
null
null
null
null
UTF-8
R
false
false
4,409
r
FuncPoisson.R
# FUNCTIONS DLM-SMC WITH POISSON DISTRIBUTION # ========================================================================================================== library(dlm) library(mvtnorm) # Poisson DLM-SMC. Estimate Poisson parameter lambda for each time t of a time series using DLM with SMC algorithm #----------------------------------------------------------------------------------------------------------------- # Paramets : the time series Data, F & G DLM matrices, m0 & C0 initial distribution, V & W error terms and nW # Returns : an array of estimation of lambda for each time t DlmSmcPoisson = function(Data, F, G, m0, C0, V, W, nW) { #Declarations nD = length(Data) nF = length(F) A = array(0, dim=c(nF,nW,nD+1)) InvArav = matrix(0, nD+1,nW) m = array(0, dim=c(nF,nW,nD+1)) C = array(0, dim=c(nF,nF,nW,nD+1)) w = matrix(0,nD+1,nW) theta = array(0, dim=c(nF,nW,nD+1)) thetaT = array(0, dim=c(nF,nW,nD)) thetaHat = matrix(0,nF,nD) LambdaHat = numeric(0); #Initial values w[1,] = 1/nW m[,,1] = m0 C[,,,1] = C0 y = c(0, Data) for(i in 1:nW) { v = rmvnorm(1, m[,i,1], C[,,i,1]) theta[,i,1] = t(v) } #DLM steps for( t in 2:(nD+1)) { for(i in 1:nW) { #Prior states at time t R = G %*% C[,,i,t-1] %*% t(G) + W a = G %*% m[,i,t-1] A[,i,t] = exp(t(F) %*% a)[1,1] * F InvArav[t,i] = solve(t(A[,i,t]) %*% R %*% A[,i,t] + V) #i.d.f. mean: m_t = E(t_t|t_t-1,y_t) = G * m_t-1 * R_t * A_t * (A'_t * R_t * A_t + V)^-1 * (y_t - mG) m[,i,t] = a + R %*% A[,i,t] %*% InvArav[t,i] %*% (y[t] - exp(t(F) %*% a)) #i.d.f. cov: C_t = Var(t_t|t_t-1,y_t) = R_t - R_t * A_t * (A'_t * R_t * A_t + V)^-1 * A'_t * R_t C[,,i,t] = R - (R %*% A[,i,t] %*% InvArav[t,i]) %*% t(A[,i,t]) %*% R #draw theta_t = g(theta_t | theta_t-1, y_t) theta[,i,t] = rmvnorm(1, m[,i,t], C[,,i,t]) #importance density function: multivariate normal for Kalman filter impDens = dmvnorm(theta[,i,t], m[,i,t], C[,,i,t]) #Posterior states at time t #t_t = G(t_t-1) + w_t probTt = dmvnorm(theta[,i,t], G %*% m[,i,t-1], R) #y_t|t_t = p(y_t, Poisson(F' t_t)) probYt = dpois(round(y[t]), exp(t(F) %*% theta[,i,t])) #calculate importance weights w[t,i] = w[t-1,i] * probYt * probTt / impDens } #normalize importance weights (sum to 1) w[t,] = w[t,]/(sum(w[t,])) #algorithm to avoid weight degradation nEff = 1/crossprod(w[t,]) if(nEff < nW/3) { index = sample(nW, nW, replace=TRUE, prob = w[t,]) theta[,,t] = theta[,index,t] w[t,] = 1/nW } for(i in 1:nW) { thetaT[,i,t-1] = w[t,i] * theta[,i,t] } for(f in 1:nF) { thetaHat[f, t-1] = sum(thetaT[f,,t-1]) } # integral : int[ p(y_t| t_t) * p(t_t|D_t-1)] lambdaHat = exp(t(F) %*% thetaHat[,t-1]); LambdaHat = c(LambdaHat, lambdaHat); } LambdaHat } # One-step forecast samples. Generates Poisson forecast samples for each time t given some lambda estimations #------------------------------------------------------------------------------------------------------------ # Paramets : number of samples nSamples and array of estimations of Lambda (from DlmSmcPoisson) # Returns : a matrix with columns of nSamples for each time t of the time series PoissonFcstSamples = function(nSamples, LambdaHat) { nD = length(LambdaHat); yHat = matrix(nrow=nSamples,ncol=nD); for(t in 2:(nD+1)) { yh = rpois(nSamples, LambdaHat[t-1]); yHat[,t-1] = yh; } yHat } # One-step forecast mean for each time t #----------------------------------------------------------------------------------------------------------- # Paramets : n forecast samples for each time t (from PoissonFcstSamples) # Returns : an array with one-step forecast for each point of the history FcstMean = function(fcstSamples) { fc = NULL for(i in 1:length(fcstSamples[1,])) { m = mean(fcstSamples[,i]) fc = c(fc, m) } fc }
886d2923db2c4c422a9dffff54242c28c1cc5fd4
6dabb731d76e567e9a89ec8545006e4d89b54355
/tests/testthat/test-EdgeConditions.R
b51feee0be520a0b63f47783849110aa15d4ff37
[ "MIT" ]
permissive
diego-urgell/BinSeg
d9b93e25ac87782d49bd390a6e26c760bd95f884
a14827f76072f38e75d2e741d4f478ddf9e4a163
refs/heads/main
2023-07-07T16:52:28.760409
2021-08-22T05:23:14
2021-08-22T05:23:14
375,764,096
5
1
MIT
2021-08-25T05:45:40
2021-06-10T16:33:12
R
UTF-8
R
false
false
1,052
r
test-EdgeConditions.R
# Title : TODO # Objective : TODO # Created by: diego.urgell # Created on: 02/08/21 library(BinSeg) test_that("Change in mean + Single data point -> Zero cost and trivial mean", { ans <- BinSeg::BinSegModel(100, "BS", "mean_norm", 1) expect_equal(logLik(ans), 0) expect_equal(ans@models_summary[["before_mean"]], 100) }) test_that("Change in mean and variance + Two data points -> Trivial params", { ans <- BinSeg::BinSegModel(c(100, 50), "BS", "meanvar_norm", 1, 2) expect_equal(ans@models_summary[["before_mean"]], 75) expect_equal(ans@models_summary[["before_var"]], 625) }) test_that(desc="Binary Segmentation + Change in mean and variace: Test 4 - No possible segmentation", { data <- c(1.1, 1, 2, 2, 2) ans <- BinSeg::BinSegModel(data, "BS", "meanvar_norm", 1, 2) expect_equal(sort(cpts(ans)), 5) }) test_that(desc="Binary Segmentation + Change in variace: Test 2 - Variance constraint", { data <- c(1.1, 1, 1, -5) ans <- BinSeg::BinSegModel(data, "BS", "var_norm", 1, 2) expect_equal(sort(cpts(ans)), 4) })
762e4c6b1debe9946a04f5f10d44e1f93c80daa1
e77609cd32fe8c55002636ba5285edeed6708b20
/006-sensitivity-analysis-one-parameter/R/006-packages.R
30a70024e8daaed4b456249aca63927f433b687b
[ "CC-BY-4.0" ]
permissive
sahirbhatnagar/raqc
6acac11e9a0bc7fd1e270cd3f35ceb646e43c6f5
fe313b2e05fd9e26fbc2e085bc0b93dfbbd32611
refs/heads/master
2020-05-21T11:42:31.324618
2019-05-27T03:22:35
2019-05-27T03:22:35
186,035,633
0
0
null
null
null
null
UTF-8
R
false
false
115
r
006-packages.R
## ---- required-packages ---- if (!require("pacman")) install.packages("pacman") pacman::p_load(knitr, here)
f9248216c671db8b0d84d4fcd8e65a1c9d364a85
8bb13ff1ae4e4ed5afa98f7590949fb3bea7130a
/Analisis_Todo.R
84becec55d9e4aad7633d9f40bcf89c208d7f51a
[]
no_license
Fedeayl/Libro-Elecciones
97110cc5b41820f0afaa2eedf29910d9bc76ed48
d3829c6b7f70ef8b391f7dc5514b25a163255e03
refs/heads/main
2023-01-30T21:11:35.061504
2020-12-16T23:35:00
2020-12-16T23:35:00
319,621,846
0
0
null
null
null
null
UTF-8
R
false
false
8,798
r
Analisis_Todo.R
################### ELECCIONES INTERNAS ################### Base <- rio::import(here::here("Internas hojas_interna2019.xlsx")) head(Base) Datos <- Base[Base$TipoHoja == "ODN", c(1,2,4,6,7)] Datos <- unique(Datos) # Hojas por sublema (ODN) sum(Datos$PartidoPolitico == "Frente Amplio") sum(Datos$PartidoPolitico == "Partido Nacional") sum(Datos$PartidoPolitico == "Partido Colorado") sum(Datos$PartidoPolitico == "Cabildo Abierto") Base2 <- rio::import(here::here("Internas totales-generales-por-circuito.xlsx")) Datos2 <- Base2 Tabla1 <- cbind(Habilitados = xtabs(Total_Habilitados ~ Departamento, Datos2), Emitidos= xtabs(Total_Votos_Emitidos ~ Departamento, Datos2), Anulados = xtabs(Total_Anulados ~ Departamento, Datos2), Blanco = xtabs(Total_En_Blanco ~ Departamento, Datos2)) # Crea la base general de habilitados, blancos, etc Base3 <- rio::import(here::here("Interna desglose-de-votos.xlsx")) Datos3 <- Base3[,c(1,2,5,6,8)] Datos3 <- Datos3[Datos3$TIPO_REGISTRO == "HOJA_ODN",] Tabla2 <- xtabs(CANTIDAD_VOTOS ~ DEPARTAMENTO + LEMA, Datos3) # Tabla de votos por lema por departamento Tabla1 <- reshape::melt(Tabla1) names(Tabla1) <- c("DEPARTAMENTO", "LEMA", "value") Tabla2 <- reshape::melt(Tabla2) Resultados <- bind_rows(Tabla1, Tabla2) ResultadosINT <- reshape::cast(Resultados, formula = DEPARTAMENTO~...) # Resultados Frente Amplio por departamento DatosFA <- Datos3[Datos3$LEMA == "Frente Amplio",] TablaFA <- xtabs(CANTIDAD_VOTOS ~ DEPARTAMENTO + DESCRIPCIÓN_1, DatosFA) # Deagrega por candidato FA #Resultados Partido Nacional por departamento DatosPN <- Datos3[Datos3$LEMA == "Nacional",] TablaPN <- xtabs(CANTIDAD_VOTOS ~ DEPARTAMENTO + DESCRIPCIÓN_1, DatosPN) # Deagrega por candidato PN #Resultados Partido Colorado por departamento DatosPC <- Datos3[Datos3$LEMA == "Partido Colorado",] TablaPC <- xtabs(CANTIDAD_VOTOS ~ DEPARTAMENTO + DESCRIPCIÓN_1, DatosPC) # Deagrega por candidato PC #################### ELECCIONES NACIONALES ##################### # Carga de datos NacionalesHAB <- rio::import(here::here("totales-generales.xlsx")) # Datos nulos, blanco, etc Nacionales <- rio::import(here::here("desglose-de-votos.xlsx")) # Datos por Lema # Crear las tablas Tabla1 <- xtabs(CantidadVotos ~ Departamento + Lema, data=Nacionales) # Tabla cruzada Depto vs Lema Tabla1b <- cbind(EnBlanco = xtabs(TotalEnBlanco ~ Departamento, data = NacionalesHAB), Anulados = xtabs(TotalAnulados ~ Departamento, data = NacionalesHAB), Emitidos = xtabs(TotalVotosEmitidos ~ Departamento, data = NacionalesHAB), Habilitados = xtabs(TotalHabilitados ~ Departamento, data = NacionalesHAB) ) # Tabla cruzada, blanco, anulado, por depto Tabla1 <- as.data.frame(reshape::melt(Tabla1)) # Cambio de formato para combinar Tabla1b <- as.data.frame(reshape::melt(Tabla1b)) names(Tabla1b) <- c("Departamento", "Lema", "value") # Asignar mismos nombres a Tabla2 que a Tabla1 Tabla <- rbind(Tabla1, Tabla1b) # Combinar ambas talas Tabla <- reshape::cast(Tabla, ... ~ Lema) # Reformatear para que quede la salida deseada TablaNAC1 <- dplyr::bind_rows(Tabla, colSums(Tabla), .id=NULL) # Resultados generales ### Se quiere obtener una tabla de votos por sublema # Crear una base con hojas por sublema Sublemas <- rio::import(here::here("integracion-de-hojas.xlsx")) Sublemas <- cbind.data.frame(Lema = Sublemas$PartidoPolitico, Hoja=Sublemas$Numero, Sublema=Sublemas$Sublema) Sublemas <- unique(Sublemas) Sublemas <- Sublemas[Sublemas$Sublema != "No aplica", ] ## FRENTE AMPLIO - Crear una tabla de votos por sublema del FA SubFA <- Sublemas[Sublemas$Lema == "Frente Amplio",] Tabla2 <- as.data.frame(xtabs(CantidadVotos ~ Descripcion1 + Lema, data=Nacionales)) # Tabla cruzada Lema vs Hoja SsFA <- Tabla2[Tabla2$Lema == "Partido Frente Amplio",] # Selecciona solamente las entradas "Frente Amplio" SsFA <- SsFA[SsFA$Freq != 0,] # Elimina las entradas con ceros names(SsFA) <- c("Hoja", "Lema","Freq") # Asigna nombres a las categorías para el merge siguiente datos <- merge(SsFA, SubFA, by = "Hoja") a <- dplyr::group_by(datos, Sublema, Lema.x, Lema.y) Frente <- unique(dplyr::summarise(a, Sublema, Votos=sum(Freq))) Frente <- Frente[c(1,4)] Frente <- as.data.frame(dplyr::arrange(Frente, desc(Frente$Votos)))# Ordenar decreciente Frente2<- cbind(as.data.frame(rep("Frente Amplio", length(Frente$Sublema))), Frente) names(Frente2) <- c("Lema", "Sublema", "Votos") Frente2 ## PARTIDO NACIONAL SubPN <- Sublemas[Sublemas$Lema == "Partido Nacional",] Tabla2 <- as.data.frame(xtabs(CantidadVotos ~ Descripcion1 + Lema, data=Nacionales)) # Tabla cruzada Lema vs Hoja SsPN <- Tabla2[Tabla2$Lema == "Partido Nacional",] # Selecciona solamente las entradas "Frente Amplio" SsPN <- SsPN[SsPN$Freq != 0,] # Elimina las entradas con ceros names(SsPN) <- c("Hoja", "Lema","Freq") # Asigna nombres a las categorías para el merge siguiente datos <- merge(SsPN, SubPN, by = "Hoja") a <- dplyr::group_by(datos, Sublema, Lema.x, Lema.y) PNacional <- unique(dplyr::summarise(a, Sublema, Votos=sum(Freq))) PNacional <- PNacional[c(1,4)] PNacional <- as.data.frame(dplyr::arrange(PNacional, desc(PNacional$Votos)))# Ordenar decreciente PNacional2<- cbind(as.data.frame(rep("P.Nacional", length(PNacional$Sublema))), PNacional) names(PNacional2) <- c("Lema", "Sublema", "Votos") PNacional2 ## PARTIDO COLORADO SubPC <- Sublemas[Sublemas$Lema == "Partido Colorado",] Tabla2 <- as.data.frame(xtabs(CantidadVotos ~ Descripcion1 + Lema, data=Nacionales)) # Tabla cruzada Lema vs Hoja SsPC <- Tabla2[Tabla2$Lema == "Partido Colorado",] # Selecciona solamente las entradas "Frente Amplio" SsPC <- SsPC[SsPC$Freq != 0,] # Elimina las entradas con ceros names(SsPC) <- c("Hoja", "Lema","Freq") # Asigna nombres a las categorías para el merge siguiente datos <- merge(SsPC, SubPC, by = "Hoja") a <- dplyr::group_by(datos, Sublema, Lema.x, Lema.y) PColorado <- unique(dplyr::summarise(a, Sublema, Votos=sum(Freq))) PColorado <- PColorado[c(1,4)] PColorado <- as.data.frame(dplyr::arrange(PColorado, desc(PColorado$Votos)))# Ordenar decreciente PColorado2 <- cbind(as.data.frame(rep("P.Colorado", length(PColorado$Sublema))), PColorado) names(PColorado2) <- c("Lema", "Sublema", "Votos") PColorado2 VotosSublemas <- dplyr::bind_rows(Frente2, PNacional2, PColorado2) ######################## REFORMA ############################## BaseP <- rio::import(here::here("PLEBISCITO.xlsx")) DatosP <- BaseP[, c(3,7,10,11,12,13,14)] names(DatosP) TablaSI <- cbind("Habilitados" = xtabs(HABILITADO~ DEPTO, DatosP), "Emitidos" = xtabs(T_EMITIDOS~ DEPTO, DatosP), "Blanco" = xtabs(EN_BLANCO~ DEPTO, DatosP), "Anulados" = xtabs(ANULADOS~ DEPTO, DatosP), "Solo SI" = xtabs(SOLO_POR_SI~ DEPTO, DatosP), "Papeleta" = xtabs(`Papeleta por SI`~ DEPTO, DatosP)) ######################## BALOTAJE ############################## BaseB <- rio::import(here::here("balotaje-2019.xlsx")) DatosB <- BaseB names(DatosB) TablaBalotaje <- cbind("Martínez-Villar" = xtabs(Total_Martinez_Villar ~ Departamento, DatosB), "Lacalle-Argimón" = xtabs(Total_Lacalle_Pou_Argimon ~ Departamento, DatosB), "Hablitados" = xtabs(Total_Habilitados ~ Departamento, DatosB), "Emitidos" = xtabs(Total_Votos_Emitidos ~ Departamento, DatosB), "Blanco" = xtabs(Total_EN_Blanco ~ Departamento, DatosB), "Anulado" = xtabs(Total_Anulados ~ Departamento, DatosB)) library(xlsx) write.xlsx(ResultadosINT, file="Resultados.xlsx", sheetName="Internas-General2",row.names=FALSE) write.xlsx(TablaFA, file="Resultados.xlsx", sheetName="Internas-FA", append=TRUE, row.names=FALSE) write.xlsx(TablaPN, file="Resultados.xlsx", sheetName="Internas-PN", append=TRUE, row.names=FALSE) write.xlsx(TablaPC, file="Resultados.xlsx", sheetName="Internas-PC", append=TRUE, row.names=FALSE) write.xlsx(TablaNAC1, file="Resultados.xlsx", sheetName="Nacional-General", append=TRUE, row.names=FALSE) write.xlsx(Frente2, file="Resultados.xlsx", sheetName="Nacional-FA", append=TRUE, row.names=FALSE) write.xlsx(PNacional2, file="Resultados.xlsx", sheetName="Nacional-PN", append=TRUE, row.names=FALSE) write.xlsx(PColorado2, file="Resultados.xlsx", sheetName="Nacional-PC", append=TRUE, row.names=FALSE) write.xlsx(VotosSublemas, file="Resultados.xlsx", sheetName="Nacional-SUBLEMAS", append=TRUE, row.names=FALSE) write.xlsx(TablaSI, file="Resultados.xlsx", sheetName="Reforma", append=TRUE, row.names=FALSE) write.xlsx(TablaBalotaje, file="Resultados.xlsx", sheetName="Balotaje", append=TRUE, row.names=FALSE)
1df5a9d252449b9ce4075cc16d885c699d4ee2d1
a1e3f742d80a225e9a2a35e8e88b3054f5408037
/R/big.gomp.R
410cad3178dece34236e2c607d32620ee4b28027
[]
no_license
cran/MXM
7590471ea7ed05944f39bf542c41a07dc831d34f
46a61706172ba81272b80abf25b862c38d580d76
refs/heads/master
2022-09-12T12:14:29.564720
2022-08-25T07:52:40
2022-08-25T07:52:40
19,706,881
0
4
null
null
null
null
UTF-8
R
false
false
17,905
r
big.gomp.R
big.gomp <- function(target = NULL, dataset, tol = qchisq(0.95, 1) + log(dim(x)[1]), test = "testIndFisher", method = "ar2") { tic <- proc.time() oop <- options(warn = -1) on.exit( options(oop) ) if ( is.null(target) ) { if (test == "censIndCR" | test == "censIndWR") { y <- survival::Surv(dataset[, 1], dataset[, 2]) x <- bigmemory::sub.big.matrix(dataset, firstCol = 3) } else { y <- dataset[, 1] x <- bigmemory::sub.big.matrix(dataset, firstCol = 2) } } else y <- target n <- dim(x)[1] phi <- NULL if ( test == "testIndFisher" ) { tic <- proc.time() ######### SSE if ( method == "sse" ) { rho <- Rfast::Var(y) * (n - 1) r <- cor(y, x[]) sel <- which.max(abs(r)) sela <- sel res <- .lm.fit(x[, sel, drop = FALSE], y)$residuals rho[2] <- sum(res^2) i <- 2 while ( (rho[i - 1] - rho[i])/(rho[i - 1]) > tol & i < n ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max(abs(r)) sela <- c(sela, sel) res <- .lm.fit(x[, sela], y)$residuals rho[i] <- sum(res^2) ind[sela] <- 0 } ## end while ( (rho[i - 1] - rho[i])/(rho[i - 1]) > tol & i < n ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Vars", "|sse|") ######### loglik } else if ( method == "loglik" ) { rho <- n * log( Rfast::Var(y) ) r <- cor(y, x[]) sel <- which.max( abs(r) ) sela <- sel res <- .lm.fit(x[, sel, drop = FALSE], y)$residuals rho[2] <- n * log( sum(res^2)/(n - i) ) i <- 2 while ( rho[i - 1] - rho[i] > tol & i < n ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max(abs(r)) sela <- c(sela, sel) res <- .lm.fit(x[, sela], y)$residuals rho[i] <- n * log( sum(res^2)/(n - i) ) } ## end while ( rho[i - 1] - rho[i] > tol & i < n ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len] + n * log(2 * pi) + n ) colnames(res) <- c("Vars", "BIC") ######### adjusted R-square } else if (method == "ar2") { down <- Rfast::Var(y) * (n - 1) rho <- 0 r <- cor(y, x[]) sel <- which.max( abs(r) ) sela <- sel res <- .lm.fit(x[, sel, drop = FALSE], y)$residuals r2 <- 1 - sum(res^2)/down rho[2] <- 1 - (1 - r2) * (n - 1)/(n - 2) i <- 2 while ( rho[i] - rho[i - 1] > tol & i < n ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max(abs(r)) sela <- c(sela, sel) res <- .lm.fit(x[, sela], y)$residuals r2 <- 1 - sum(res^2)/down rho[i] <- 1 - (1 - r2) * (n - 1)/(n - i - 1) } ## end while ( rho[i] - rho[i - 1] > tol & i < n ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Vars", "adjusted R2") } runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else { if (test == "testIndGamma") { tic <- proc.time() mod <- glm( y ~ 1, family = Gamma(log) ) rho <- mod$deviance phi <- summary(mod)[[ 14 ]] res <- y - fitted(mod) ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- glm(y ~ x[, sela], family = Gamma(log) ) res <- mod$residuals rho[2] <- mod$deviance phi[2] <- summary(mod)[[ 14 ]] i <- 2 while ( (rho[i - 1] - rho[i]) / phi[i] > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- glm(y ~ x[, sela], family = Gamma(log) ) res <- y - fitted(mod) rho[i] <- mod$deviance phi[i] <- summary(mod)[[ 14 ]] } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi[1:len], res = res) } else if ( test == "testIndNormLog" ) { tic <- proc.time() ini <- Rfast::normlog.mle(y) m <- ini$param[1] rho <- sum( (y - ini$param[1])^2 ) phi <- mod$devi/(n - 2) ela <- cor(y - m, x[]) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- Rfast::normlog.reg(y, x[, sel]) res <- y - exp( mod$be[1] + x[, sel] * mod$be[2] ) rho[2] <- mod$deviance phi[2] <- mod$devi/(n - 2) i <- 2 while ( (rho[i - 1] - rho[i]) / phi[i] > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- Rfast::normlog.reg(y, x[, sela]) res <- y - as.vector( exp( mod$be[1] + x[, sela] %*% mod$be[-1] ) ) rho[i] <- mod$deviance phi[i] <- mod$deviance/(n - length(mod$be) ) } len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi[1:len], res = res) } else if ( test == "testIndLogistic" ) { tic <- proc.time() n <- dim(x)[1] p <- sum(y)/n rho <- -2 * (n * p * log(p) + (n - n * p) * log(1 - p)) ela <- as.vector( cor(y - p, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- Rfast::glm_logistic(x[, sel], y) est <- exp(-mod$be[1] - x[, sel] * mod$be[2]) res <- y - 1/(1 + est) rho[2] <- mod$devi i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- Rfast::glm_logistic(x[, sela], y) est <- as.vector(exp(-mod$be[1] - x[, sela] %*% mod$be[-1])) res <- y - 1/(1 + est) rho[i] <- mod$devi } len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if (test == "testIndQBinom") { tic <- proc.time() p <- sum(y)/n y0 <- 1 - y rho <- 2 * sum(y * log(y/p), na.rm = TRUE) + 2 * sum(y0 * log(y0/(1 - p)), na.rm = TRUE) phi <- 1 ela <- as.vector( cor(y - p, x[]) ) sel <- which.max(abs(ela)) sela <- sel names(sela) <- NULL mod <- Rfast::prop.reg(y, x[, sel], varb = "glm") est <- exp(-mod$info[1, 1] - x[, sel] * mod$info[2, 1]) p <- 1/(1 + est) res <- y - p rho[2] <- 2 * sum(y * log(y/p), na.rm = TRUE) + 2 * sum(y0 * log(y0/(1 - p)), na.rm = TRUE) phi[2] <- mod$phi i <- 2 while ((rho[i - 1] - rho[i])/phi[i] > tol) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max(abs(r)) sela <- c(sela, sel) mod <- Rfast::prop.reg(y, x[, sela], varb = "glm") est <- as.vector( exp(-mod$info[1, 1] - x[, sela] %*% mod$info[-1, 1]) ) p <- 1/(1 + est) res <- y - p rho[i] <- 2 * sum(y * log(y/p), na.rm = TRUE) + 2 * sum(y0 * log(y0/(1 - p)), na.rm = TRUE) phi[i] <- mod$phi } len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi[1:len], res = res) } else if ( test == "testIndPois" ) { tic <- proc.time() m <- sum(y)/n rho <- 2 * sum(y * log(y), na.rm = TRUE) - 2 * n * m * log(m) ela <- as.vector( cor(y - m, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- Rfast::glm_poisson(x[, sel], y) res <- y - exp( mod$be[1] + x[, sel] * mod$be[2] ) rho[2] <- mod$devi i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- Rfast::glm_poisson(x[, sela], y) res <- y - as.vector( exp( mod$be[1] + x[, sela] %*% mod$be[-1] ) ) rho[i] <- mod$devi } len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "testIndQPois" ) { tic <- proc.time() m <- sum(y)/n rho <- 2 * sum(y * log(y), na.rm = TRUE) - 2 * n * m * log(m) phi <- 1 ela <- as.vector( cor(y - m, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- Rfast::qpois.reg(x[, sel], y) phi[2] <- mod$phi res <- y - exp( mod$be[1, 1] + x[, sel] * mod$be[2, 1] ) rho[2] <- mod$devi i <- 2 while ( (rho[i - 1] - rho[i]) / phi[i] > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- Rfast::qpois.reg(x[, sela], y) res <- y - as.vector( exp( mod$be[1, 1] + x[, sela] %*% mod$be[-1, 1] ) ) rho[i] <- mod$devi phi[i] <- mod$phi } len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi[1:len], res = res) } else if (test == "testIndNB") { tic <- proc.time() mod <- MASS::glm.nb(y ~ 1) rho <- - 2 * as.numeric( logLik(mod) ) res <- y - fitted(mod) ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- MASS::glm.nb(y ~ x[, sela], control = list(epsilon = 1e-08, maxit = 100, trace = FALSE) ) res <- mod$residuals rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- MASS::glm.nb( y ~ x[, sela], control = list(epsilon = 1e-08, maxit = 100, trace = FALSE) ) res <- y - fitted(mod) rho[i] <- - 2 * as.numeric( logLik(mod) ) } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "testIndMMReg") { tic <- proc.time() mod <- MASS::rlm(y ~ 1, method = "MM", maxit = 2000) rho <- - 2 * as.numeric( logLik(mod) ) res <- mod$residuals ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- MASS::rlm(y ~ x[, sela], method = "MM", maxit = 2000 ) res <- mod$residuals rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- try( MASS::rlm(y ~ x[, sela], method = "MM", maxit = 2000 ), silent = TRUE ) if ( identical( class(mod), "try-error" ) ) { rho[i] <- rho[i - 1] } else { res <- mod$residuals rho[i] <- - 2 * as.numeric( logLik(mod) ) } } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "testIndRQ") { tic <- proc.time() mod <- quantreg::rq(y ~ 1) rho <- - 2 * as.numeric( logLik(mod) ) res <- mod$residuals ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- quantreg::rq(y ~ x[, sela]) res <- mod$residuals rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- try( quantreg::rq(y ~ x[, sela]), silent = TRUE ) if ( identical( class(mod), "try-error" ) ) { rho[i] <- rho[i - 1] } else { res <- mod$residuals rho[i] <- - 2 * as.numeric( logLik(mod) ) } } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if (test == "testIndOrdinal") { tic <- proc.time() mod <- MASS::polr(y ~ 1) rho <- - 2 * as.numeric( logLik(mod) ) res <- ord.resid(y, mod$fitted.values) ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- MASS::polr(y ~ x[, sela]) res <- ord.resid(y, mod$fitted.values) rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- MASS::polr(y ~ x[, sela]) res <- ord.resid(y, mod$fitted.values) rho[i] <- - 2 * as.numeric( logLik(mod) ) } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "testIndTobit") { tic <- proc.time() mod <- survival::survreg(y ~ 1, dist = "gaussian") rho <- - 2 * as.numeric( logLik(mod) ) res <- resid(mod) ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- survival::survreg(y ~ x[, sela], dist = "gaussian" ) res <- resid(mod) rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- try( survival::survreg(y ~ x[, sela], dist = "gaussian" ), silent = TRUE ) if ( identical( class(mod), "try-error" ) ) { rho[i] <- rho[i - 1] } else { res <- resid(mod) rho[i] <- - 2 * as.numeric( logLik(mod) ) r[sela] <- NA } } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "censIndCR") { tic <- proc.time() mod <- survival::coxph(y ~ 1) rho <- - 2 * summary( mod)[[1]] res <- mod$residuals ## martingale residuals ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- survival::coxph(y ~ x[, sela] ) res <- mod$residuals rho[2] <- - 2 * as.numeric( logLik(mod) ) i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- try( survival::coxph(y ~ x[, sela] ), silent = TRUE ) if ( identical( class(mod), "try-error" ) ) { rho[i] <- rho[i - 1] } else { res <- mod$residuals ## martingale residuals rho[i] <- - 2 * as.numeric( logLik(mod) ) } } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } else if ( test == "censIndWR") { tic <- proc.time() mod <- survival::survreg(y ~ 1) rho <- - 2 * as.numeric( logLik(mod) ) res <- resid(mod) ela <- as.vector( cor(res, x[]) ) sel <- which.max( abs(ela) ) sela <- sel names(sela) <- NULL mod <- survival::survreg(y ~ x[, sela], control = list(iter.max = 10000) ) res <- resid(mod) rho[2] <- - 2 * as.numeric( logLik(mod) ) if ( is.na(rho[2]) ) rho[2] <- rho[1] i <- 2 while ( (rho[i - 1] - rho[i]) > tol ) { i <- i + 1 r <- cor(res, x[]) r[sela] <- NA sel <- which.max( abs(r) ) sela <- c(sela, sel) mod <- try( survival::survreg(y ~ x[, sela], control = list(iter.max = 10000) ), silent = TRUE ) if ( identical( class(mod), "try-error" ) ) { rho[i] <- rho[i - 1] } else { res <- resid(mod) rho[i] <- - 2 * as.numeric( logLik(mod) ) } } ## end while ( (rho[i - 1] - rho[i]) > tol ) len <- length(sela) res <- cbind(c(0, sela[-len]), rho[1:len]) colnames(res) <- c("Selected Vars", "Deviance") runtime <- proc.time() - tic result <- list(runtime = runtime, phi = phi, res = res) } ## end if (test == "testIndNB") } ## end if else (test == "testIndFisher") result }
286912104797eea66d53533f613cb031b0cb6b64
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/fungible/examples/seBetaCor.Rd.R
f24ce0b7b08478035f68a340d5b8f3675fe8d3b7
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
603
r
seBetaCor.Rd.R
library(fungible) ### Name: seBetaCor ### Title: Standard Errors and CIs for Standardized Regression Coefficients ### from Correlations ### Aliases: seBetaCor ### Keywords: Statistics ### ** Examples R <- matrix(c(1.0000, 0.3511, 0.3661, 0.3511, 1.0000, 0.4359, 0.3661, 0.4359, 1.0000), 3, 3) rxy <- c(0.5820, 0.6997, 0.7621) Nobs <- 46 out <- seBetaCor(R = R, rxy = rxy, Nobs = Nobs) # 95% CIs for Standardized Regression Coefficients: # # lbound estimate ubound # beta_1 0.107 0.263 0.419 # beta_2 0.231 0.391 0.552 # beta_3 0.337 0.495 0.653
8dcec43341b0d19eea3180c38053023163fc78ac
be86308f9cd95be51b4f83ec06895fa1bd4c49e7
/R/separateBlocks.R
bd245821abe71e0bc4b9d002dbf2218d3b05ce9d
[]
no_license
duncantl/CodeDepends
7f60ad0b4e3513dd4e2e723778f44bce39e47fd6
878fd24850d28037640cb21fbbf3922b51f996df
refs/heads/master
2022-05-16T00:25:38.573235
2022-04-25T22:11:20
2022-04-25T22:11:20
3,998,946
75
16
null
2020-01-14T19:30:08
2012-04-11T22:17:31
R
UTF-8
R
false
false
1,082
r
separateBlocks.R
separateExpressionBlocks = # # This allows us to take a script that is made up of # blocks and unravel them into a list of individual top-level # calls. It does not unravel the individual calls, just the # top-level blocks. And it leaves top-level calls outside of {} # alone. # # This allows us to start with a block script and then to move # to individual calls. In this way, we can work at higher resolution # and process/evaluate individual, top-level expressions rather than entire blocks. # We can easily compute the dependencies for either the blocks or the calls # and so by converting the blocks to calls, we work with not aggregated groups of calls # but individual ones directly. function(blocks) { tmp = lapply(blocks, function(x) { if(is(x, "{")) as.list(x[-1]) # unlist(x, recursive = FALSE) else x }) if(all(is.na(names(tmp)))) names(tmp) = NULL unlist(tmp, recursive = FALSE) }
cd6636849043776aa1715a38da0c310a9b95d5d2
41fb06c29999266568b0084075d33b2370f8de4a
/man/removeComments.Rd
59d94beb9f140a3dd6cecf6579ce53567dfcb88d
[]
no_license
eloytf/caeasciiR
e3813dd0a3831bd76dee7000fbf0fae1d7652cf3
80b1f2f67f5e9a22c81b9f5ecb30fe78d57c4d46
refs/heads/master
2021-09-13T03:59:27.905751
2018-04-24T18:56:12
2018-04-24T18:56:12
106,620,964
0
0
null
null
null
null
UTF-8
R
false
true
518
rd
removeComments.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{removeComments} \alias{removeComments} \title{A Function for removing commented lines from a character vector of expanded bulk lines.} \usage{ removeComments(lines) } \arguments{ \item{lines}{character vector with the bulk lines} } \value{ a character vector of non-commented bulk lines. } \description{ This function allows you remove commented lines, returning a character vector of the non-commented lines } \examples{ }
e52ead0529268a26907a8b6a4e41565a9f60bba0
9571c840a5ea7ad52e68e87b928be9eba864317d
/plot3.R
e779fa6f08f7f01cbc02ccddcda772a2c4c1ba62
[]
no_license
fabioks/ExData_Plotting1
6907fa0ab24b95f9cb137ddd6d9a45604131044c
8f5d4c69f582710bc41a9bdffc9b0bf1cc8ebc83
refs/heads/master
2021-01-21T09:18:10.106728
2015-12-13T20:36:12
2015-12-13T20:36:12
47,931,351
0
0
null
2015-12-13T18:35:28
2015-12-13T18:35:28
null
UTF-8
R
false
false
1,565
r
plot3.R
#Setting file name file <- "household_power_consumption.zip" #Download file if it was not already downloaded if(!file.exists(file)) { URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(URL, file) } ##Checking if file exists, if not, unzip the data if(!file.exists("household_power_consumption")) { unzip(file) } #Reading the required data only data <- read.table("household_power_consumption.txt", header = FALSE, skip = 66637, nrows = 2880, sep = ";", na.strings = "?", stringsAsFactors = FALSE, dec = ".") #Configuring column names names <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") #Setting column names colnames(data) <- names #Converting date column do Date data$Date <- as.Date(data$Date, format = "%d/%m/%Y") #Creating new column with date and time DateTime <- paste(data$Date, data$Time) data$DateTime <- as.POSIXct(DateTime) #Plotting plot(data$Sub_metering_1 ~ data$DateTime, type = "l", ylab = "Energy sub metering", xlab = "") lines(data$Sub_metering_2 ~ data$DateTime, type = "l", col = "red") lines(data$Sub_metering_3 ~ data$DateTime, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"), cex = .75) #Saving to file png dev.copy(png, filename = "plot3.png", height = 480, width = 480) #Shutting off device dev.off()
0b518cb62a55ab88a7d70e8daf8e95c060e6d2e0
fbb2d5e4032e46e82a4f5d7a061654a0deb371d1
/man/dot-estimateZcutoff.Rd
2d8478c235669b574e86eeb784bf74373795c97b
[ "MIT" ]
permissive
quevedor2/CCLid
23a59d973db2d4cdb6884fecea85b00099fbccc9
460744aafc2127251a8d2b943d0475cbf15cb0d4
refs/heads/master
2022-11-13T17:33:51.166853
2020-07-09T19:48:56
2020-07-09T19:48:56
232,857,878
1
0
null
null
null
null
UTF-8
R
false
true
642
rd
dot-estimateZcutoff.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils_drift.R \name{.estimateZcutoff} \alias{.estimateZcutoff} \title{.estimateZcutoff} \usage{ .estimateZcutoff(seg, data.type = "baf", verbose = FALSE) } \arguments{ \item{seg}{an output dataframe from a CNA object} \item{data.type}{Data type, supports only 'baf' at this time} \item{verbose}{Verbose, default = FALSE} } \value{ Returned seg with $t and $seg.diff } \description{ Creates a theoreticla framework for difference between BAFs, and then compares the observed z-differences against the theoretical diff to look for differences } \keyword{internal}
ffe35201a6939b599763c1fb6aeb09874e8a51d3
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed/7702_0/rinput.R
5c754709e8fcab3315e4d307ab2f7cc77d40a79b
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
rinput.R
library(ape) testtree <- read.tree("7702_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="7702_0_unrooted.txt")
56160204dd8ed422272d979df0efd9b385c6a609
e7f2c1765eaf6f2da87693ad1a0198a976320a1a
/ui.R
e0f73adbbfe2698e4bee859f6db32d9f8c427222
[]
no_license
angeliflavio/FAOStatistics
2610966fc71bad39b80fdd5a20a383136be389cc
2193c606916dfdf890695f4326bee2473a6a937d
refs/heads/master
2021-05-16T04:30:49.191559
2017-10-14T21:15:25
2017-10-14T21:15:25
106,080,625
1
1
null
null
null
null
UTF-8
R
false
false
8,219
r
ui.R
library(shiny) library(shinyBS) library(markdown) apple <- read.csv('apple.csv',sep = ',') wine <- read.csv('wine.csv',sep = ',') shinyUI( navbarPage(theme=shinythemes::shinytheme('cerulean'),title='Statistiche FAO', tabPanel('Mele', tabsetPanel( tabPanel('Mappa', br(), sidebarLayout( sidebarPanel( selectInput('measuremap','Misura', choices = c('Produzione (ton)'= 'Production', 'Superficie (ettari)'='Area harvested', 'Resa (hg/ettaro)'='Yield'), selected = 'Produzione'), selectInput('yearmap','Anno', choices = seq(1961,2014),selected = 2014), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('worldmap')) ) ), tabPanel('Grafico', br(), sidebarLayout( sidebarPanel( selectInput('countryapplemotion','Paesi', choices = c('Tutti',as.vector(unique(apple$Area))), selected = 'Tutti', multiple = T), bsTooltip('countryapplemotion','Selezione singola o multipla', options = list(container = "body")), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('motionapple') ) ) ), tabPanel('Grafico storico', br(), sidebarLayout( sidebarPanel( selectInput('measuretimeline','Misura', choices = c('Produzione (ton)'= 'Production', 'Superficie (ettari)'='Area harvested', 'Resa (hg/ettaro)'='Yield'), selected = 'Produzione (ton)'), selectInput('countrytimeline','Paesi', choices = as.vector(unique(apple$Area)), selected = 'Italy', multiple = T), bsTooltip('countrytimeline','Selezione singola o multipla', options = list(container = "body")), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('timeline')) ) ), tabPanel('Tabella', br(), dataTableOutput('table')) )), tabPanel('Vino', tabsetPanel( tabPanel('Mappa', br(), sidebarLayout( sidebarPanel( selectInput('yearmapwine','Anno', choices = seq(1961,2014),selected = 2014), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('worldmapwine')) )), tabPanel('Grafico', br(), sidebarLayout( sidebarPanel( selectInput('countrywinemotion','Paesi', choices = c('Tutti',as.vector(unique(wine$Area))), selected = 'Tutti', multiple = T), bsTooltip('countrywinemotion','Selezione singola o multipla', options = list(container = "body")), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('motionwine') ) )), tabPanel('Grafico storico', br(), sidebarLayout( sidebarPanel( selectInput('countrytimelinewine','Paesi', choices = as.vector(unique(wine$Area)), selected = 'Italy', multiple = T), bsTooltip('countrytimelinewine','Selezione singola o multipla', options = list(container = "body")), br(), tags$a(href='http://www.fao.org','Fonte FAO') ), mainPanel( br(), htmlOutput('timelinewine')) )), tabPanel('Tabella', br(), dataTableOutput('tablewine')) )), br(), br(), tags$a(img(src='github.png'),href='https://github.com/angeliflavio/FAOStatistics')) ) #add year selection #add value type (produciton, area harvested, yield )
d3651e0c7137f62ce8f727880745a2250a5f743e
3da6a7aa6f30ebe1dc9f2a3479da0e52b74c7a0b
/demographicDetection/man/gender_detect.Rd
4d5f4c485e11a46da60761acb55e5142abc2d5d2
[]
no_license
ensoesie/digitalpophealth
eed712a4fd20403f568678f83181362cd0d8c4a8
58cf871a29ccc6fb4fbb509428dfe08a61c08652
refs/heads/master
2020-03-20T16:15:56.189783
2018-09-18T19:05:34
2018-09-18T19:05:34
137,534,425
2
1
null
2019-11-12T21:11:19
2018-06-15T21:44:16
R
UTF-8
R
false
true
568
rd
gender_detect.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gender_detect.R \name{gender_detect} \alias{gender_detect} \title{Return the gender estimate for a name} \usage{ gender_detect(name, method = "ssa") } \arguments{ \item{name}{A character value containing a name} } \value{ gend_value A string value containing the estimated gender of a name } \description{ Uses the gender package, with US Social Security Administration Data, to estimate gender based on a first name } \examples{ gender_detect("Jessica") } \keyword{gender,} \keyword{text}
2ee97400bd794d978ad5500a2e9c1f1a1492c9ac
787231fb66ebaaad9b71268b4c1a76e90fd460d8
/software/step5.1_create_alpha_diveristy.R
f59838f15aba2d40f3591eb6ac727f1c300b9ff2
[]
no_license
wyc9559/miQTL_cookbook
e2bf28b6c6c089d75c813cb926dbd9c29a26d7e3
5ef8396ea46760179d892caef88b1e64d14d242a
refs/heads/master
2022-04-07T05:05:02.452893
2020-02-17T14:46:52
2020-02-17T14:46:52
null
0
0
null
null
null
null
UTF-8
R
false
false
2,403
r
step5.1_create_alpha_diveristy.R
options = commandArgs(trailingOnly = TRUE) taxonomy_file = options[1] phenotype_file = options[2] coupling_file = options[3] if(!require(vegan)){ install.packages("vegan") library(vegan) } tax = read.table(taxonomy_file,header=T,row.names=1,sep="\t") phenos = read.table(phenotype_file,header = T,row.names=1,sep="\t") phenos = phenos[!apply(phenos,1,function(x){any(is.na(x))}),,drop=F] if (exists("coupling_file")&file.exists(coupling_file)){ coupling = read.table(coupling_file,colClasses = "character") has_both = (coupling[,1] %in% rownames(phenos)) & (coupling[,2] %in% rownames(tax)) coupling= coupling[has_both,] tax = tax[coupling[,2],] phenos = phenos[coupling[,1],,drop = FALSE] rownames(tax) = rownames(phenos) tax = tax[rownames(tax)[grep("[.][0-9]+$",rownames(tax),invert=T)],] phenos = phenos[rownames(tax),,drop = FALSE] } else { tax = tax[intersect(rownames(tax),rownames(phenos)),] phenos = phenos[rownames(tax),,drop = FALSE] } tax = tax[,grep("^genus[.]",colnames(tax))] shannon = diversity(tax,index = "shannon") simpson = diversity(tax,index = "simpson") invsimpson = diversity(tax,index = "invsimpson") all_div = data.frame(row.names = rownames(tax),shannon = shannon, simpson = simpson,invsimpson = invsimpson) corrected_data = apply(all_div,2,function(x){ x.subset = x[!is.na(x)] phenos.subset = phenos[!is.na(x),,drop = FALSE] phenos.subset.matrix = data.matrix(phenos.subset) if(ncol(phenos.subset)==ncol(phenos.subset.matrix)){ phenos.subset = phenos.subset[,apply(phenos.subset.matrix,2,sd) !=0,drop = FALSE] } x.resid = resid(lm(x.subset ~ .,data = phenos.subset)) x[!is.na(x)] = x.resid+100 x[is.na(x)] = 0 return(x) }) corrected_data = as.data.frame(t(corrected_data)) corrected_data2 = data.frame(rownames(corrected_data),corrected_data) colnames(corrected_data2)[1] = "-" annot = data.frame(platform = "RDP", HT12v3.ArrayAddress = rownames(corrected_data2), Gene = rownames(corrected_data2), Chr = 4, ChrStart = 1000, ChrEnd = 1000) colnames(annot)[2] = "HT12v3-ArrayAddress" write.table(corrected_data2, file = "alpha_div.txt",sep="\t",row.names = F,quote = F) write.table(annot,file = "alpha_div.txt.annot",sep="\t",row.names=F,quote = F)
dd1c38d9307b9b5a87a40f392822f5220788b6ab
f197e1cfd48f7d0b55b79fdc2e084ca9cce85b02
/R/prof.R
9bdfa259229981fe99ba0b76d64a4006fd49327a
[]
no_license
lenamax2355/Rprofiling
61696211ceff4ef7830cae0b47efcd27861f74d5
308e46de533f050e3acaa8156e6321da09f6894f
refs/heads/master
2022-01-06T13:58:43.926179
2018-09-14T15:33:27
2018-09-14T15:33:27
null
0
0
null
null
null
null
UTF-8
R
false
false
5,710
r
prof.R
####--------------------------------## ## Global Function get_data_types <- function(data){ x <- (lapply(data,class)) x_frame <- (data.frame(unlist(x))) names(x_frame) <- c("Data_Type") x_frame %>% dplyr::group_by(Data_Type) %>% dplyr::count() } ###----------------------------------## ## Global Function create_html <- function(html){ template <- "<!DOCTYPE html> <html lang='en'> <head> <title>Rprofiling</title> <meta charset='utf-8'> <meta name='viewport' content='width=device-width, initial-scale=1'> <link rel='stylesheet' href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js'></script> <script src='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js''></script> <style> .table-hover tbody tr:hover td { background-color: #008080; color : white; } </style> </head> <body> <div class='container'>" end_temp <- "</div> </body> </html>" html <- stringr::str_c(template, html, end_temp) html <- gsub("<table>", '<table class="table table-hover table-bordered">',html) tempDir <- tempfile() dir.create(tempDir) html_file <- file.path(tempDir, "report.html") writeLines(html,html_file) viewer <- getOption("viewer") if (!is.null(viewer)){ viewer(html_file) utils::browseURL(html_file) } else utils::browseURL(html_file) } #' Information about datasets #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Returns information on variables, observation, missing values and uniquness. #' @export dsetinfo #' #' @examples destinfo(mtcars) dsetinfo <- function(data){ df <- dsetinfo_(data) description <- "Dataset Information" res <- knitr::kable(df, format = "html",caption = description) create_html(res) } dsetinfo_ <- function(data){ if(class(data) != "data.frame"){ return("Sorry this function only works for data frames as of now") }else{ num_var <- as.character(length(names(data))) num_obs <- nrow(data) t_missing <- sum(!complete.cases(data)) t_missing_percentage <- round((sum(!complete.cases(data))/nrow(data) * 100),2) unique_percentage <- format(round(nrow(unique(data))/nrow(data) * 100,2), nsmall=2) df <- data.frame(Info = c("Number of variables","Number of observation","Total Missing","Total Missing(%)","Unique Row(%)"), Value = c(num_var,num_obs,t_missing,paste0(t_missing_percentage,"%"),paste0(unique_percentage, "%"))) } } #' Information on the variables #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Returns the count of data_types. #' @export var_types #' #' @examples var_types(mtcars) var_types <- function(data){ if(class(data) != "data.frame"){ return("Sorry this function only works for data frames as of now") }else{ df <- data.frame(get_data_types(data)) description <- "Variable Types" res <- knitr::kable(df, format = "html", caption = description) create_html(res) } } #' Sample of dataset #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Returns a sample(20 rows if dataset is more than 20 rows) of the dataset #' @export sample_data #' #' @examples sample_data(mtcars) sample_data <- function(data){ if(class(data) != "data.frame"){ return("Sorry this function only works for data frames as of now") }else{ data <- head(data,15) description <- "Data Sample" res <- knitr::kable(data, format = "html",caption = description) create_html(res) } } #' Summary of dataset #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Returns a summary of the dataset of each #' @export #' #' @examples data_summary(mtcars) data_summary <- function(data){ if(class(data) != "data.frame"){ return("Sorry this function only works for data frames as of now") }else{ data <- summary(data) description <- "Data Summary" res <- knitr::kable(data, format = "html", caption = description) create_html(res) } } #' Visualization of missing data #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Return a graph that sows the variables and rows with missing data #' @export #' #' @examples vis_mdata(mtcars) vis_mdata <- function(data){ if(class(data) != "data.frame"){ return("Sorry this function only works for data frames as of now") }else{ Amelia::missmap(data,main="Visualization of missing Data",y.cex = 0.9, x.cex = 0.9) } } #' Generates full profile of the dataframe passed #' #' @param data is the dataset that you want to digest. dataset class must be dataframe or tibble. #' #' @return Returns a full digestion(profile) of the data #' @export #' #' @examples full_profile(mtcars) full_profile <- function(data){ info <- knitr::kable(x = dsetinfo_(data), format = "html", caption = "Dataset Information") x <- get_data_types(data) var <- knitr::kable(x,format = "html", caption = "Variable Types") d_summary <- knitr::kable(summary(data), format = "html", caption = "Data Summary") samp <- knitr::kable(x=head(data,20),format = "html", caption = "Data Sample") #png("missing.png") Amelia::missmap(data,main="Visualization of missing Data",y.cex = 0.9, x.cex = 0.9) # dev.off() img <- "<strong>Check your 'Plots tab for the graph'</strong>" res <- paste(info,var,d_summary,samp,"Graph of missing Values", img,sep="<hr>") create_html(res) }
901912a79cfd37cb9368c2a15df49c64b969c6b8
53acd1eff5a7523085a71c6553e914999b46fb7b
/tests/testthat.R
a0417f9c7ab63b33096c4c90ff714ce64aa750f0
[ "MIT" ]
permissive
Helseatlas/data
33ee21c497c45a2b5be7fae18e535d9e66c01a23
b18a235a580573b71623c91e9c012400290475a5
refs/heads/master
2021-06-24T21:09:54.761949
2020-05-02T13:24:12
2020-05-02T13:24:12
183,443,098
0
0
MIT
2020-05-20T09:11:59
2019-04-25T13:45:25
R
UTF-8
R
false
false
44
r
testthat.R
library(data) testthat::test_check("data")
42346e60802e8019683094b52f5f671d3bd88d6b
4a7718b5618d75bdcfb3fb71324569d0d11ac749
/man/load_multi_data.Rd
69ae14d43e3d2403de40fe3ee4cd92bfad28357f
[]
no_license
EwaMarek/FindReference
859676f1744ea2333714634fd420d6b91b367956
eedc8c80809b6f3e4439999bac4cb09ec2b228f2
refs/heads/master
2018-08-01T08:39:05.326467
2018-06-02T17:05:06
2018-06-02T17:05:06
104,148,930
0
0
null
null
null
null
UTF-8
R
false
true
2,518
rd
load_multi_data.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load_multi_data.R \name{load_multi_data} \alias{load_multi_data} \title{Loading microarray data from a list of experiments downloaded from ArrayExpress database} \usage{ load_multi_data(dane, ExpInfoTable, sdrfFiles) } \arguments{ \item{dane}{A list of lists from downloadAE function.} \item{platforma}{A character vector indicating platform for each experiment. Supported platforms are 'Affymetrix' and 'Agilent'.} \item{donotread}{A list of character vectors indicating which microarrays should not be loaded for each experiment. Default is NA, which means that all microarrays listed in dane$rawFiles will be loaded. Note that list's length must be the same as the length of list 'dane'.} } \value{ Function returns a list with two elements. The first one is a list of objects specific for array platform with raw expression data. The second element is a list of data frames with loaded .sdrf files for each experiment. } \description{ \code{load_multi_data} function loads data for a list of microarray experiments downloaded from ArrayExpress database. } \details{ This function is designed to read in microarray data from many experiments. For more detail see \code{load_data} function help. } \examples{ \dontrun{ ### load all microarrays for two experiments to_download = c("E-GEOD-21066", "E-MTAB-966") platforms = c("Affymetrix", "Agilent") dane = downloadAE(to_download, getwd()) loaded_experiments = load_multi_data(dane, platforms) ### do not load some microarrays from the first experiment to_download = c("E-GEOD-21066", "E-MTAB-966") platforms = c("Affymetrix", "Agilent") dane = downloadAE(to_download, getwd()) # for second experiment all microarrays should be loaded # -> the second element of the list is NA unwanted_arrays = list(c("GSM526680.CEL", "GSM526756.CEL"), NA) # or (which could be useful when there are a lot of experiments) unwanted_arrays = rep(list(list(NA)), length(dane)) unwanted_arrays[[1]] = c("GSM526680.CEL", "GSM526756.CEL") loaded_experiment = load_multi_data(dane, platforms, donotread = unwanted_arrays) ### when you downloaded data with downloadAE ### but didn't assign the output into variable to_download = c("E-GEOD-21066", "E-MTAB-966") platforms = c("Affymetrix", "Agilent") downloadAE(to_download, getwd()) # read in data saved by downloadAE function dane = readRDS('dataAE.rds') loaded_experiment = load__multi_data(dane, platforms) } } \seealso{ \code{\link{load_data}} }
d104423281d81f0f2189c26b188d1cf6974d9f9c
4dd05c5789fbf09aeb6fdc1229ccf9b8b3caf511
/R/downloadSitemap.R
952da39d5342ada926e0e865a0fc09ddb299d6bc
[ "MIT" ]
permissive
dschmeh/seoR
2d553d3a06aea53c572d30cb9cd2c3a91e2eb8b1
9908f41b3f026930bde83028b8669f70d3a1030d
refs/heads/master
2023-01-04T10:07:53.267464
2022-12-21T12:33:59
2022-12-21T12:33:59
114,384,766
39
7
MIT
2018-06-13T12:59:18
2017-12-15T15:29:25
R
UTF-8
R
false
false
846
r
downloadSitemap.R
#' Function to download the Sitemap.xml of a given Webpage #' #' This function allows to Download the Sitemap.xml of a given Webpage #' @param sitemap The Sitemap you want to download. This can also be a Index-Sitemap #' downloadSitemap() #' @examples #' downloadSitemap("http://ohren-reinigen.de/sitemap.xml") downloadSitemap <- function(sitemap) { #Input check if(sitemapxml_check(sitemap) != TRUE) { warning("Please check you provided Sitemap") } #Get the Sitemap s <- XML::xmlToDataFrame(sitemap) #check if Sitemap is Index Sitemap if ((nrow(s) / sum(grepl("xml", s$loc))) > 0.8) { loop <- 0 full_sm <- NULL for (i in 1:nrow(s)) { sm <- XML::xmlToDataFrame(as.character(s$loc[i])) full_sm <- rbind(full_sm, sm) } } else { full_sm <- s } return(full_sm) }
12f86bd624011dc2b74de685443c7aa8fc56c8be
6c4a6643847fab3b6bb38cfa0c914f353c678214
/QRMC-3HS_Fig3B/QRMC-3HS_Fig3B.R
9fcc292825945bfca39d004d13d7cd6909a8e568
[]
no_license
BulgarelliD-Lab/Microbiota_mapping
2693af905cc1ae23c8771c744c990513106043cd
c69cdb61409d7513e8b2e4a7dfcbcf5843caaac6
refs/heads/main
2023-04-08T05:09:09.889259
2022-06-21T15:53:23
2022-06-21T15:53:23
429,454,678
0
0
null
null
null
null
UTF-8
R
false
false
6,883
r
QRMC-3HS_Fig3B.R
##################################################################################### #Ref to the ARTICLE # # Code to compute calculations presented in:https://www.biorxiv.org/content/10.1101/2021.12.20.472907v3 # Figure 3B # # c.m.z.escuderomartinez@dundee.ac.uk # d.bulgarelli@dundee.ac.uk ############################################################# # Clean-up the memory and start a new session ############################################################# rm(list=ls()) dev.off() ############################################################# # Libraries required ############################################################# library("DESeq2") library("ggplot2") library("vegan") library ("ape") library("dplyr") library("scales") library("VennDiagram") library("tidyverse") library("forcats") library("PMCMRplus") library("phyloseq") #retrieve R and package versions and compare to the uploaded file in GitHub for the reproducibility of the code sessionInfo() #set the working directory setwd("") getwd() ################################################# ################################################ ##Fig. 3B Fungal community ############################################### ############################################## #Import data picked with JH23 DADA2 JH23_data_phyloseq_DADA2 <-readRDS("JH23_dada2.rds") JH23_data_phyloseq_sample <- JH23_data_phyloseq_DADA2 JH23_data_phyloseq_sample sample_names(JH23_data_phyloseq_sample) rank_names(JH23_data_phyloseq_sample) JH23_tax_table<-tax_table(JH23_data_phyloseq_sample) dim(tax_table(JH23_data_phyloseq_sample)) ################################################################## #Pre-processing: remove Chloroplast and Mitochondria but retain NA ################################################################# JH23_taxa_raw<-tax_table(JH23_data_phyloseq_sample) #write.table(JH23_taxa_raw, file="JH23_taxa_raw_taxonomy_info.txt", sep="\t") #Remove chloroplasts but retain"NA" JH23 JH23_no_chlor <-subset_taxa(JH23_data_phyloseq_DADA2, (Order!="Chloroplast") | is.na(Order)) JH23_no_chlor #Remove mitochondria but retains "NA" JH23_no_plants <-subset_taxa(JH23_no_chlor, (Family!="Mitochondria") | is.na(Family)) JH23_no_plants colnames (tax_table(JH23_no_plants)) rownames (tax_table(JH23_no_plants)) dim(tax_table(JH23_no_plants)) ######################################################################### # Remove ASVs assigned to NA at phylum level ######################################################################### JH23_phyloseq_DADA2_no_plants_1 <- subset_taxa (JH23_no_plants, Phylum!= "NA") JH23_phyloseq_DADA2_no_plants_1 ######################################################################### # Abundance threshold: 20 reads, set as 2% the minimum number of samples ######################################################################### JH23_phyloseq_DADA2_no_plants_10K <- prune_samples(sample_sums(JH23_phyloseq_DADA2_no_plants_1)>=10000, JH23_phyloseq_DADA2_no_plants_1) JH23_phyloseq_DADA2_no_plants_10K JH23_phyloseq_DADA2_no_plants_2 = filter_taxa(JH23_phyloseq_DADA2_no_plants_10K, function(x) sum(x > 20) > (0.02 *length(x)), TRUE) JH23_phyloseq_DADA2_no_plants_2 sort(sample_sums(JH23_phyloseq_DADA2_no_plants_2)) JH23_taxa<-tax_table(JH23_phyloseq_DADA2_no_plants_2 ) #write.table(JH23_taxa, file="JH23_taxa_info.txt", sep="\t") ##ratio filtered reads/total reads ratio <- sum(sample_sums(JH23_phyloseq_DADA2_no_plants_2))/sum(sample_sums(JH23_phyloseq_DADA2_no_plants_1))*100 ratio ######################################################################### # Import a simplified mapping file ######################################################################## #Import the simplified mapping file design <- read.delim("JH23_Map.txt", sep = "\t", header=TRUE, row.names=1) JH23_map <- sample_data(design) sample_data(JH23_phyloseq_DADA2_no_plants_2)<-JH23_map JH23_phyloseq_DADA2_no_plants_2 ######################################################################### # Aggregate samples at genus and family level (Note: NArm set to false to keep NA at Genus level) ######################################################################## JH23_data_phyloseq_genus <- tax_glom(JH23_phyloseq_DADA2_no_plants_2, taxrank= "Genus", NArm=FALSE, bad_empty=c(NA, "", " ", "\t")) JH23_genus_taxa<-tax_table(JH23_data_phyloseq_genus) #write.table(JH23_genus_taxa, file="JH23_genus_taxa_taxonomy_info.txt", sep="\t") JH23_data_phyloseq_family <- tax_glom(JH23_phyloseq_DADA2_no_plants_2, taxrank= "Family", NArm=FALSE, bad_empty=c(NA, "", " ", "\t")) #Compare the two objects #ASVs JH23_phyloseq_DADA2_no_plants_2 sort(sample_sums(JH23_phyloseq_DADA2_no_plants_2)) #Genus JH23_data_phyloseq_genus sort(sample_sums(JH23_data_phyloseq_genus)) #Family JH23_data_phyloseq_family sort(sample_sums(JH23_data_phyloseq_family)) #################################################################################### # Rarefy at an even sequencing depth (10,600) and "freeze" these objects for downstream analyses ##################################################################################### #ASVs : ignore the warnings, the object will be saved right after JH23_rare_ASV_10K <- rarefy_even_depth(JH23_phyloseq_DADA2_no_plants_2, 10200) #saveRDS(JH23_rare_ASV_10K, file ="JH23_rare_ASV_10K.rds") JH23_rare_ASV_10K<- readRDS("JH23_rare_ASV_10K.rds") ###################################################################### ###################################################################### ## Figure 3B_Constrained ordination ##################################################################### ##################################################################### JH23_rhizosphere<- subset_samples(JH23_rare_ASV_10K, Genotype == "Barke" |Genotype == "52"|Genotype == "17") JH23_rhizosphere JH23_rare_sqrt<-transform_sample_counts(JH23_rhizosphere, function (x) sqrt(x)) #constrained ordination: constrained for genotype #rarefied data JH23_rhizosphere_CAP <- ordinate(JH23_rare_sqrt, "CAP", "bray", ~ Genotype) plot_ordination(JH23_rare_sqrt, JH23_rhizosphere_CAP, color = "Genotype") #assign shapes to Soil type and color to Sample type p=plot_ordination(JH23_rare_sqrt, JH23_rhizosphere_CAP , color = "Genotype") p = p + geom_point(size = 6, alpha = 0.80, shape =17) p = p + scale_colour_manual(values = c("#56B4E9","#E69F00","#0072B2")) p + ggtitle("CAP 16S data, Bray distance-rarefied samples") #extract the mapping file design_rhizosphere <- design[colnames(otu_table(JH23_rhizosphere)), ] #BC distance BC <- phyloseq::distance(JH23_rhizosphere, "bray") adonis(BC ~ Genotype, data= design_rhizosphere , permutations = 5000) adonis(BC ~ Batch*Genotype , data=design_rhizosphere , permutation =5000) ############################################################################################################################ ##End
851c5fd0414524e13155aa2b6d5b557847015c92
7a35a5103521e220d69036f9a71b60d9055052e7
/man/namespaceImportMethods.Rd
8658fa9aa78a3a8e07bde6f258d6b3e143f3b285
[]
no_license
granatb/RapeR
da7205c6f07442ee03e53511b636858f3d0c8bbf
799b289c09ec024e7186cbf338bfe539c6ed7334
refs/heads/master
2020-05-20T14:36:24.817925
2019-05-15T14:56:26
2019-05-15T14:56:26
185,624,078
1
0
null
null
null
null
UTF-8
R
false
true
303
rd
namespaceImportMethods.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/namespaceImportMethods.R \name{namespaceImportMethods} \alias{namespaceImportMethods} \title{fun_name} \usage{ namespaceImportMethods(params) } \arguments{ \item{param}{fun_name} } \description{ kolejna funkcja podmieniona }
62cd6e61c34d2f12171cf58a17ded351f11f9235
e41e39e46ccaa44f104f8a817932824dbe4fb0c0
/R_script/1/quant_pub.R
d5a293771f1fc01ce76b8d8ff9629e0ff99a642a
[]
no_license
yoshimura19/samples
fd59758e4431565047d306ad865fb000d2f4df4b
fc4cf50a583029016ddcb9eb9476c464c52900f1
refs/heads/master
2021-07-18T00:23:02.252666
2017-10-26T19:21:09
2017-10-26T19:21:09
108,371,681
0
0
null
null
null
null
UTF-8
R
false
false
3,148
r
quant_pub.R
library(quantreg) data(engel) qr <- rq(foodexp~income,data=engel,tau=seq(0,1,0.25)) summary(qr) qr data6 <- read.csv("/Users/serubantesu2/Downloads/675cross.csv",header= T,sep=",",na.strings=".",fileEncoding="cp932") attach(data6) data6 ls(data6) data6 data7 <- read.csv("/Users/serubantesu2/Downloads/try2-2.csv",header= T,sep=",",na.strings=".",fileEncoding="cp932") attach(data7) ls(data7) data7 data8 <- read.csv("/Users/serubantesu2/Downloads/make717public_panel.csv",header= T,sep=",",na.strings=".",fileEncoding="cp932") attach(data8) ls(data8) forPSM <- model.frame(id ~ oldid + year + misOrder + kubun + ken + shi + name + forcheck + med_cost_per1man1day + med_cost + general_beds + use_rate_general_beds + ave_stay_general + ave_impatient_per1day + out_patient + patient + outpatient_rate + assets_building_100beds + doctors_100beds + nurses_100beds + pharmacist_100beds + DPCdummyH20 + fixedeffect , data = data7) for717panel <- model.frame(id ~ year + kubun + ken + shi + name + forcheck + med_cost_per1man1day + medical_cost + general_beds + use_rate_general_beds + ave_stay_general + ave_inpatient_per1day #+out_patient + outpatient_rate + assets_building_100beds #+ patient + doctors_100beds + nurses_100beds + pharmacist_100beds #+ fixedeffect + DPCdummyH20 , data = data8) length(for717panel$id) write.table(forPSM, file="forPSM.txt", quote=F, col.names=T,append=F) editeddata <- model.frame(DPCdummyH20 ~ fe + id + med_cost_per1man1day + general_beds + use_rate_general_beds + ave_stay_general + ave_impatient_per1day + outpatient_rate + assets_building_100beds + doctors_100beds + pharmacist_100beds , data=data6) ed <- editeddata write(ed$id, file="ed.txt") write.table(editeddata, file="ed.txt", quote=F, col.names=T, append=F) reg <- lm(ed$med_cost_per1man1day ~ ed$DPCdummyH20 + ed$general_beds + ed$use_rate_general_beds + ed$ave_stay_general + ed$ave_impatient_per1day + ed$outpatient_rate + ed$assets_building_100beds + ed$doctors_100beds + ed$pharmacist_100beds , data=editeddata) summary(reg) qr <- rq(ed$med_cost_per1man1day ~ ed$general_beds + ed$use_rate_general_beds + ed$ave_stay_general + ed$ave_impatient_per1day + ed$outpatient_rate + ed$assets_building_100beds + ed$doctors_100beds + ed$pharmacist_100beds , data=editeddata, tau=seq(0,1,0.25)) summary(qr) summary.rq(qr) plot(qr) qr options(scipen=10) qr <- rq(med_cost_per1man1day ~ general_beds + use_rate_general_beds + ave_stay_general + ave_impatient_per1day + outpatient_rate + assets_building_100beds + doctors_100beds + pharmacist_100beds #+ nurses_100beds , data=data7, tau=seq(0,1,0.25))
d8970e048fcd2706512283e4c1f9b4e4e0b2f002
bdd73b70c77512ad645b546a2f9bdb6c6b96f60f
/analyses/1 simulate_data.R
6dec9d22f06acfa716d2d538edca92ae87591ffd
[ "MIT" ]
permissive
lnsongxf/eta-simulations
1e0e0703e1143c4930627659eb47e44ba503f09c
aee8587d481bf121c6da809ae3161a41529816a1
refs/heads/master
2020-04-25T18:46:24.249519
2018-08-28T19:52:50
2018-08-28T19:52:50
null
0
0
null
null
null
null
UTF-8
R
false
false
9,339
r
1 simulate_data.R
setwd("C:/Users/eri2005/Desktop") ####simulations for data#### library(mvtnorm) library(ez) library(reshape) ##rmvnorm(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), ## method=c("eigen", "svd", "chol"), pre0.9_9994 = FALSE) ####things to simulate#### ##rotate around N values start at 20 in each level, then add 5 as you go ##keep means steady 2.5, 3, 3.5, etc. ##rotate around the number of levels 3-6 ##rotate around SDs making eta small medium large ##rotate around correlated error 0, .1, 3., .5, .7, .9 ####create blank data from for data#### totalsims = 1224*1000 mydata = data.frame(N = 1:totalsims) ####keep track of the simulation rounds#### round = 0 ####loop over N values here#### ##loop a Nloops = seq(20, 100, 6) for (a in 1:length(Nloops)) { simulate = 0 ####loop over M and levels values here#### ##this loop will give you different numbers of means for the different number of levels ##loops b levels = 3:6 for (b in 1:length(levels)) { topmean = c(3.5, 4.0, 4.5, 5) Means = seq(2.5, topmean[b], .5) ####loop over SD values here#### ##loop c SDloops = c(5, 3, 1) for (c in 1:length(SDloops)) { ####loop over correlations here#### ##loop d corloops = c(0, .1, .3, .5, .7, .9) for (d in 1:length(corloops)) { ####simulate 1000 rounds of data#### for (e in 1:1000) { ####make the data here#### ##here we are going to want to make the cor / SD matrix pattern ##but that will depends on the number of levels ##might have to do it by if statements? not very elegant if(levels[b] == 3) { sigma = matrix(c(SDloops[c],corloops[d],corloops[d], corloops[d],SDloops[c],corloops[d], corloops[d],corloops[d],SDloops[c]), nrow = 3, ncol = 3) } if(levels[b] == 4) { sigma = matrix(c(SDloops[c],corloops[d],corloops[d],corloops[d], corloops[d],SDloops[c],corloops[d],corloops[d], corloops[d],corloops[d],SDloops[c],corloops[d], corloops[d],corloops[d],corloops[d],SDloops[c]), nrow = 4, ncol = 4) } if(levels[b] == 5) { sigma = matrix(c(SDloops[c],corloops[d],corloops[d],corloops[d],corloops[d], corloops[d],SDloops[c],corloops[d],corloops[d],corloops[d], corloops[d],corloops[d],SDloops[c],corloops[d],corloops[d], corloops[d],corloops[d],corloops[d],SDloops[c],corloops[d], corloops[d],corloops[d],corloops[d],corloops[d],SDloops[c]), nrow = 5, ncol = 5) } if(levels[b] == 6) { sigma = matrix(c(SDloops[c],corloops[d],corloops[d],corloops[d],corloops[d],corloops[d], corloops[d],SDloops[c],corloops[d],corloops[d],corloops[d],corloops[d], corloops[d],corloops[d],SDloops[c],corloops[d],corloops[d],corloops[d], corloops[d],corloops[d],corloops[d],SDloops[c],corloops[d],corloops[d], corloops[d],corloops[d],corloops[d],corloops[d],SDloops[c],corloops[d], corloops[d],corloops[d],corloops[d],corloops[d],corloops[d],SDloops[c]), nrow = 6, ncol = 6) } dataset = rmvnorm(Nloops[a], Means, sigma) ##here we are simulating 1-7 Likert type data ##take off the digits ##take out the out of range values dataset = round(dataset, digits = 0) dataset[ dataset < 1 ] = 1 dataset[ dataset > 7 ] = 7 ####put in the basic statistics here#### round = round + 1 simulate = simulate + 1 mydata$N[round] = Nloops[a] mydata$levels[round] = levels[b] mydata$stdev[round] = SDloops[c] mydata$correl[round] = corloops[d] mydata$simnum[round] = simulate ####begin RM ANOVA one way#### dataset = as.data.frame(dataset) dataset$partno = as.factor(1:nrow(dataset)) longdataset = melt(dataset, id = "partno", measured = colnames(longdataset[1:(ncol(longdataset)-1), ])) rmoutput = ezANOVA(data = longdataset, wid = partno, within = variable, dv = value, type = 3, detailed = T) mydata$RM1.dfm[round] = rmoutput$ANOVA$DFn[2] mydata$RM1.dfr[round] = rmoutput$ANOVA$DFd[2] mydata$RM1.ssm.p[round] = rmoutput$ANOVA$SSn[1] mydata$RM1.ssm.main[round] = rmoutput$ANOVA$SSn[2] mydata$RM1.ssr.p[round] = rmoutput$ANOVA$SSd[1] mydata$RM1.ssr.main[round] = rmoutput$ANOVA$SSd[2] mydata$RM1.F[round] = rmoutput$ANOVA$F[2] mydata$RM1.ges[round] = rmoutput$ANOVA$ges[2] ####begin RM ANOVA two way#### tempstuff = longdataset randomvalues = rnorm(Nloops[a], mean(Means), 1) tempstuff$value = tempstuff$value + randomvalues tempstuff$value = round(tempstuff$value, digits = 0) tempstuff$value[ tempstuff$value < 1 ] = 1 tempstuff$value[ tempstuff$value > 7 ] = 7 doublermdata = rbind(longdataset, tempstuff) level1 = as.numeric(gl(2, Nloops[a]/2, nrow(longdataset), labels = c("1", "2"))) level2 = 3 - as.numeric(gl(2, Nloops[a]/2, nrow(longdataset), labels = c("2", "1"))) doublermdata$rmlevel2 = as.factor(c(level1,level2)) rmoutput2 = ezANOVA(data = doublermdata, wid = partno, within = .(variable, rmlevel2), dv = value, type = 3, detailed = T) mydata$RM2.dfm[round] = rmoutput2$ANOVA$DFn[2] mydata$RM2.dfr[round] = rmoutput2$ANOVA$DFd[2] mydata$RM2.ssm.p[round] = rmoutput2$ANOVA$SSn[1] mydata$RM2.ssm.main[round] = rmoutput2$ANOVA$SSn[2] mydata$RM2.ssm.other[round] = rmoutput2$ANOVA$SSn[3] mydata$RM2.ssm.interact[round] = rmoutput2$ANOVA$SSn[4] mydata$RM2.ssr.p[round] = rmoutput2$ANOVA$SSd[1] mydata$RM2.ssr.main[round] = rmoutput2$ANOVA$SSd[2] mydata$RM2.ssr.other[round] = rmoutput2$ANOVA$SSd[3] mydata$RM2.ssr.interact[round] = rmoutput2$ANOVA$SSd[4] mydata$RM2.F[round] = rmoutput2$ANOVA$F[2] mydata$RM2.ges[round] = rmoutput2$ANOVA$ges[2] ####begin MIXED ANOVA two way#### longdataset$level2 = gl(2, Nloops[a]/2, nrow(longdataset), labels = c("level 1", "level 2")) mixedoutput = ezANOVA(data = longdataset, wid = partno, within = variable, between = level2, dv = value, type = 3, detailed = T) mydata$MIX.dfm[round] = mixedoutput$ANOVA$DFn[3] mydata$MIX.dfr[round] = mixedoutput$ANOVA$DFd[3] mydata$MIX.ssm.p[round] = mixedoutput$ANOVA$SSn[1] mydata$MIX.ssm.other[round] = mixedoutput$ANOVA$SSn[2] mydata$MIX.ssm.main[round] = mixedoutput$ANOVA$SSn[3] mydata$MIX.ssm.interact[round] = mixedoutput$ANOVA$SSn[4] mydata$MIX.ssr.p[round] = mixedoutput$ANOVA$SSd[1] mydata$MIX.ssr.other[round] = mixedoutput$ANOVA$SSd[2] mydata$MIX.ssr.main[round] = mixedoutput$ANOVA$SSd[3] mydata$MIX.ssr.interact[round] = mixedoutput$ANOVA$SSd[4] mydata$MIX.F[round] = mixedoutput$ANOVA$F[3] mydata$MIX.ges[round] = mixedoutput$ANOVA$ges[3] ####begin BN ANOVA one way#### longdataset$bnpartno = as.factor(1:nrow(longdataset)) bnoutput = ezANOVA(data = longdataset, wid = bnpartno, between = variable, dv = value, type = 3, detailed = T) mydata$BN1.dfm[round] = bnoutput$ANOVA$DFn[2] mydata$BN1.dfr[round] = bnoutput$ANOVA$DFd[2] mydata$BN1.ssm.p[round] = bnoutput$ANOVA$SSn[1] mydata$BN1.ssm.main[round] = bnoutput$ANOVA$SSn[2] mydata$BN1.ssr.p[round] = bnoutput$ANOVA$SSd[1] mydata$BN1.ssr.main[round] = bnoutput$ANOVA$SSd[2] mydata$BN1.F[round] = bnoutput$ANOVA$F[2] mydata$BN1.ges[round] = bnoutput$ANOVA$ges[2] ####begin BN ANOVA two way#### bnoutput2 = ezANOVA(data = longdataset, wid = bnpartno, between = .(variable, level2), dv = value, type = 3, detailed = T) mydata$BN2.dfm[round] = bnoutput2$ANOVA$DFn[2] mydata$BN2.dfr[round] = bnoutput2$ANOVA$DFd[2] mydata$BN2.ssm.p[round] = bnoutput2$ANOVA$SSn[1] mydata$BN2.ssm.main[round] = bnoutput2$ANOVA$SSn[2] mydata$BN2.ssm.other[round] = bnoutput2$ANOVA$SSn[3] mydata$BN2.ssm.interact[round] = bnoutput2$ANOVA$SSn[4] mydata$BN2.ssr.all[round] = bnoutput2$ANOVA$SSd[1] mydata$BN2.F[round] = bnoutput2$ANOVA$F[2] mydata$BN2.ges[round] = bnoutput2$ANOVA$ges[2] } ##end sim loop filename = paste(round, ".csv", sep = "") datalines = (abs(round-999):round) write.csv(mydata[ datalines, ], file = filename) } ##end N loop } ##end levels loop } ##end SD loop } ##end cor loop
44320d61d1bbabc4f611a541f13cb29e70673228
0c7f7d067297fce49cf8eacd2efbaeb3b441f6dc
/decision_tree/decisionTree.R
eaa367019672eb5e6eb2c15fd1b35413eb28c5ca
[]
no_license
AngeloDamiani/CodingMachineLearning
f7ff418de38c26c1f61845661ce18a2305124d67
a36bdc878bc32dd0a5b182e5216a02fcf4ca3935
refs/heads/master
2020-03-14T02:13:22.494524
2018-05-04T08:43:30
2018-05-04T08:43:30
131,394,986
0
0
null
null
null
null
UTF-8
R
false
false
4,194
r
decisionTree.R
rm(list=ls()) this.dir <- dirname(parent.frame(2)$ofile) setwd(this.dir) ##################### MATH FUNCTIONS ###################################### gini <- function(rows){ nrows = nrow(rows) already_seen = vector() impurity = 1 label_index = ncol(rows) for (label in 1:nrows){ current_label = rows[label, label_index] if(!(current_label %in% already_seen)){ lbl_count = sum(rows[,label_index] == current_label) impurity = impurity - (lbl_count/nrows)^2 already_seen = c(already_seen, current_label) } } impurity } information_gain <- function(uncertainty, left_partition, right_partition){ nright = nrow(right_partition) nleft = nrow(left_partition) ntotal = nleft + nright uncertainty - (nleft/ntotal)*gini(left_partition) - (nright/ntotal)*gini(right_partition) } ###################### QUESTION DEFINITION ################################## Question <- setRefClass("Question", fields = list(var = "numeric", point = "ANY"), methods = list( match = function(sample) { val = sample[var] if(is.numeric(point)){ result = val >= point } else{ result = val == point } result } ) ) ####################### TREE ELEMENTS ######################################## Node <- setRefClass("Node", fields = list() ) QuestionNode <- setRefClass("QuestionNode", fields = list(question = "Question", left_son = "Node", right_son = "Node"), contains = c("Node") ) Leaf <- setRefClass("Leaf", fields = list(solution = "ANY"), contains = c("Node") ) ###################### TREE BUILDING FUNCTIONS ############################### split <- function(dataset, question){ true_partition = vector() false_partition = vector() for (row in 1:nrow(dataset)){ if (question$match(dataset[row,]) == TRUE){ true_partition = rbind(true_partition, dataset[row,]) } else{ false_partition = rbind(false_partition, dataset[row,]) } } partitions = list(true_partition, false_partition) } get_best_splitting_question <- function(dataset){ best_question = NULL current_uncertainty = gini(dataset) max_gain = 0 for (clmn in 1:(ncol(dataset)-1)){ for (row in 1:nrow(dataset)){ new_quest = Question(var=clmn, point=dataset[row,clmn]) splitting = split(dataset, new_quest) true_rows = splitting[[1]] false_rows = splitting[[2]] if (length(true_rows) == 0 | length(false_rows) == 0) next gain = information_gain(current_uncertainty, true_rows, false_rows) if (gain >= max_gain){ max_gain = gain best_question = new_quest } } } best_question } decision_tree_building <- function(dataset){ result = NULL impurity = gini(dataset) if(impurity == 0){ result = Leaf(solution=dataset[1,ncol(dataset)]) } else{ best_question = get_best_splitting_question(dataset) splitting = split(dataset, best_question) true_branch = splitting[[1]] false_branch = splitting[[2]] left_branch = decision_tree_building(true_branch) right_branch = decision_tree_building(false_branch) result = QuestionNode(question=best_question, left_son=left_branch, right_son=right_branch) } result } classify <- function(node, sample){ result = NULL if (class(node)[[1]] == "Leaf"){ result = node$solution } else{ if(node$question$match(sample)){ result = classify(node$left_son, sample) } else{ result = classify(node$right_son, sample) } } result } ################### MAIN ################################################################ data <- read.csv(file="./irisdataset.data", header=FALSE, sep=",") dataset = vector() for(i in 1:(ncol(data))){ dataset = cbind(dataset, data[[i]]) } print(dataset) tree = decision_tree_building(dataset) # Three samples has been removed from iris dataset and put here test1 = c(5.2,3.4,1.4,0.2) # "1" expected (Iris Setosa) test2 = c(5.7,2.8,4.1,1.3) # "2" expected (Iris Versicolor) test3 = c(7.9,3.8,6.4,2.0) # "3" expected (Iris Virginica) print(classify(tree, test1)) print(classify(tree, test2)) print(classify(tree, test3))
3817cb91d99bfd47945349144373d2e67314b713
2dfe5ece80ef0cde27bc3369f08d9798188e69b5
/PL.R
a75e7953b3b41b9736b12597a4e7b04486852752
[]
no_license
iamkk11/EPL-Prediction
c31de33503fb595d658ec032a80f8245ea517375
ade3542cbfc3146072d26d1d4cbd7e107639748c
refs/heads/master
2022-12-10T19:02:13.258482
2020-09-11T14:21:16
2020-09-11T14:21:16
105,903,780
0
0
null
null
null
null
UTF-8
R
false
false
1,767
r
PL.R
set.seed(1) v='PL2' p=paste('/home/kevin/Downloads/',v,sep='') #Combine files from trace folders into 1 data frame library('dplyr',warn.conflicts=FALSE) #Defining function to merge traces combine = function(mypath){ filenames=list.files(path=mypath, full.names=TRUE) datalist = lapply(filenames, function(x){read.csv(file=x,sep=',')}) Reduce(function(x,y) {bind_rows(x,y)}, datalist)} #Reading each trace file and combining them a=combine(p) attach(a) #teams library('sqldf',warn.conflicts=FALSE) t=sqldf("SELECT Date,B365H,B365D,B365A,LBH,LBD,LBA,FTR FROM a WHERE HomeTeam='Chelsea' AND AwayTeam='Man City'ORDER BY Date") s=sqldf("SELECT FTR AS 'Result',COUNT(FTR) AS 'Counts' FROM t GROUP BY FTR") p=sqldf("SELECT Result,Counts,ROUND(Counts*100.0/(SELECT SUM(Counts) FROM s),0) AS 'Percentage' FROM s GROUP BY Result") #b=t[-10,] #b=t #Factorization b$FTR=factor(b$FTR) #Training and test library('varhandle') gtrain=b gtest<-data.frame(gtrain[,c(1:6)]) gtestlabel<-c(unfactor(gtrain$FTR)) attach(gtrain) #naive bayes library('e1071',warn.conflicts=FALSE) naive_bayes_model<-naiveBayes(FTR ~ ., data = gtrain) naive_bayes_predictions<-predict(naive_bayes_model, newdata=gtest) naive_bayes_accuracy=round(mean(naive_bayes_predictions==gtestlabel),2)*100 #classification tree library('party',warn.conflicts=FALSE) ctree_model<- ctree(FTR ~ ., data = gtrain,controls=ctree_control(minsplit=30,minbucket=10,maxdepth=5)) ctree_predictions <- predict(ctree_model,newdata=gtest,type='response') ctree_accuracy=round(mean(ctree_predictions==gtestlabel),2)*100 odds=data.frame(B365H='0.727',B365D='2.75',B365A='4',LBH='0.727',LBD='2.6',LBA='3.75') predict(naive_bayes_model,newdata=odds,type = 'class',interval=predict)
f457a3c0f65291ab43d1e2cd776dd076da6e031a
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/paws.common/man/new_request.Rd
eaa86e50083d94a86e66d0c6b9fbdbdb876054ea
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
1,251
rd
new_request.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/request.R \name{new_request} \alias{new_request} \title{Return an API request object} \usage{ new_request(client, operation, params, data, dest = NULL) } \arguments{ \item{client}{A service client, e.g. from \code{new_service}.} \item{operation}{An operation, e.g. from \code{new_operation}.} \item{params}{A populated input object.} \item{data}{An empty output object.} \item{dest}{Control where the response body is written} } \description{ Return an API request object with everything needed to make a request. } \examples{ \dontrun{ # Make a request object for the S3 ListBuckets operation. metadata <- list( endpoints = list("*" = list(endpoint = "s3.{region}.amazonaws.com", global = FALSE)), service_name = "s3" ) client <- new_service(metadata, new_handlers("restxml", "s3")) op <- new_operation("ListBuckets", "GET", "/", list()) params <- list() data <- tag_add(list(Buckets = list()), list(type = "structure")) req <- new_request(client, op, params, data) } } \seealso{ Other API request functions: \code{\link{new_handlers}()}, \code{\link{new_operation}()}, \code{\link{new_service}()}, \code{\link{send_request}()} } \concept{API request functions}
bddb86db08d7eea410dbb0e4e69091e001e0fd22
0d4c415c43ef34498dc2e5fbb58080c3401d194b
/R/dropSetup.R
d0704865813f016c253f6b647eddb0a757fd10d5
[]
no_license
OliverDietrich/MasterThesis
4faca4e62f9a36f392cbc61072cab834a09dad47
3167ef11f75744fb9ee6d366ad3c576a0e06de9b
refs/heads/master
2020-05-06T13:43:58.249981
2019-11-26T09:56:36
2019-11-26T09:56:36
180,152,871
0
0
null
null
null
null
UTF-8
R
false
false
9,270
r
dropSetup.R
###################################################################### ######### Setup Seurat object from Cell Ranger output files ########## #--------------------------------------------------------------------# ########### R script by Oliver Dietrich on 17 April 2019 ############# ###################################################################### # read input arguments from the command line args <- commandArgs(trailingOnly = TRUE) DSname <- args[1] arg2 <- args[2] arg3 <- args[3] # load dependencies library(tidyverse) library(Seurat) library(hdf5r) set.seed(154) # check if the input is correct, otherwise abort script if( arg2 %in% c("raw", "filtered")) { type <- arg2 ; print(paste("Type:", type)) } else { print("Error. Please specify data type ('raw' or 'filtered'). Usage: DS type format") ; quit(status = 10) } if( arg3 %in% c("h5", "mtx") ) { format <- arg3 ; print(paste("Format:", format)) } else { print("Error. Please specify data format ('h5' or 'mtx'). Usage: DS type format") ; quit(status = 10) } # check to which project the dataset belongs HOME <- paste0("/home/", Sys.getenv("USER"), "/Data/") if(file.exists(paste0(HOME, "projects")) == TRUE) { projects <- read.table(file = paste0("/home/", Sys.getenv("USER"), "/Data/", "projects"), sep = "\t", header = TRUE) projectName <- as.character(projects$project[str_detect(projects$datasets, DSname)]) } else { print("Error. Projects file not available or path corrupt.") ; quit(status = 12) } dir.create(paste0(HOME, projectName, "/analysis/Input"), recursive = TRUE) # load the data and convert to Seurat object, depends on the file format (mtx or hdf5) if( format %in% "h5") { print("Error. The Hdf5 format cannot be used yet. Please refer to the matrix format instead.") ; quit(status = 13) # IN CONSTRUCTION ... # pathIN <- paste0(pathIN, projectName, "/datasets/", DSname, "/outs/", type, "_feature_bc_matrix.h5") # DS_data <- Read10X_h5(pathIN) # DS <- CreateSeuratObject(DS_data) } else { # set path to data pathIN <- paste0(HOME, projectName, "/datasets/", DSname, "/outs/", type, "_feature_bc_matrix/") ### test the input, must lead from different inputs to same output print("Checking and sorting input") inputFolder <- paste0(HOME, projectName, "/analysis/Input/") if(file.exists(paste0(pathIN, "/genes.tsv")) == TRUE) { # load genes table, copy to features table genes <- read.table(file = paste0(pathIN, "genes.tsv"), sep = "\t", header = FALSE) features <- genes # remove "/" and "_" from gene names and replaces with "-", necessary for printing out gene names levels(features$V2) <- gsub("/", "-", levels(features$V2)) ; levels(features$V2) <- gsub("_", "-", levels(features$V2)) # change column names, does not depend on columns (only two) # change column names, depends on the number of columns if (length(colnames(features)) == 2) {colnames(features) <- c("ensID", "feature") } if (length(colnames(features)) == 3) {colnames(features) <- c("ensID", "feature", "type") } # save features table to file write.table(features, file = paste0(inputFolder, "features.tsv"), sep = "\t", row.names = FALSE, col.names = TRUE) } else { # load features table features <- read.table(file = paste0(pathIN, "features.tsv"), sep = "\t", header = FALSE) # remove "/" and "_" from gene names and replaces with "-", necessary for printing out gene names levels(features$V2) <- gsub("/", "-", levels(features$V2)) ; levels(features$V2) <- gsub("_", "-", levels(features$V2)) # change column names, depends on the number of columns if (length(colnames(features)) == 2) {colnames(features) <- c("ensID", "feature") } if (length(colnames(features)) == 3) {colnames(features) <- c("ensID", "feature", "type") } # save features table to input files + copy as genes.tsv write.table(features, file = paste0(inputFolder, "features.tsv"), sep = "\t", row.names = FALSE, col.names = TRUE) write.table(features, file = paste0(pathIN, "genes.tsv"), sep = "\t", row.names = FALSE, col.names = FALSE) } print("Converting matrix to Seurat object") # read matrix from file DS <- Read10X(data.dir = pathIN) # change feature names to ensemble ID DS@Dimnames[[1]] <- as.character(features$ensID) # convert to seurat object DS <- CreateSeuratObject(DS, project = DSname) ### add metadata based on datasets file, necessary for distinguishing merged datasets and later visualizations print("Adding metadata") if(file.exists(paste0(HOME, projectName, "/datasets/datasets")) == TRUE) { datasets <- read.table(paste0(HOME, projectName, "/datasets/datasets"), sep = "\t", header = TRUE) } else { print("Datasets file not available, aborting.") ; quit(status = 14)} # add information for single dataset if (levels(DS@meta.data$orig.ident) %in% datasets$dataset) { for (i in colnames(datasets)) { DS@meta.data[i] <- datasets[match(DS@meta.data$orig.ident, datasets$dataset),i] } # end of for loop } # end of if statement # add information for multiple (merged) datasets if (str_detect(head(rownames(DS@meta.data), 1), "-") == TRUE) { DS@meta.data$order <- as.factor(str_extract(string = rownames(DS@meta.data), "\\d")) # detect which datasets are being analyzed and subset the datasets table a <- which(str_detect(levels(DS@meta.data$orig.ident), as.character(datasets$dataset)) == TRUE) b <- datasets[a,] rownames(b) <- seq(1, length(rownames(b))) # add information to DS@meta.data (order is assumed by name, e.g. D6, E2, E12, F5, ...) for (i in colnames(datasets)) { DS@meta.data[i] <- b[match(DS@meta.data$order, rownames(b)), i] } # end of for loop } # end of if statement # write R dataset to .Rds file print("Saving to file") pathOUT <- paste0(HOME, projectName, "/analysis/", DSname, "_", Sys.Date(), "/") dir.create(paste0(pathOUT, "RDS"), recursive = TRUE) info <- data.frame("parameter" = c("type", "format"), "value" = c(type, format)) write.table(info, file = paste0(pathOUT, "RDS/setupInfo"), sep = "\t") saveRDS(DS, paste0(pathOUT, "RDS/", DSname, ".Rds")) # produce table to set values for image generation parameters <- c("pointSize","position","binSize","color","fill","shape","alpha","titleSize","axisTitle","axisXsize", "axisXangle","axisXhjust","axisXvjust","axisYsize","axisYangle","axisYhjust","axisYvjust","axisLineSize", "axisTicks","legendPositionX","legendPositionY","legendTitle","legendText","legendKeySize","legendBgrdCol", "guideSize","fileType","dpi","width","height") boxplot <- c(1,"jitter",NA,"grey50","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) smoothDensity <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, 0.7,0.7,0,35,35,"white",5,"png",350,10,10) scatter <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) hvgPlot <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "none","none",0,35,35,"white",5,"png",350,10,10) PCAplot <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) PCAloadings <- c(3,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) tSNE <- c(2,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) UMAP <- c(2,"jitter",NA,"dataset","dataset",NA,0.5,35,0,0,0,0.5,0,0,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) DM <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) barplot <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) sankey <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) DEheatmap <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) GO <- c(1,"jitter",NA,"dataset","dataset",NA,0.5,35,35,35,0,0.5,0,35,0,0,0.5,1,1, "right","none",0,35,35,"white",5,"png",350,10,10) imageStats <- data.frame(parameters = parameters, boxplot = boxplot, smoothDensity = smoothDensity, scatter = scatter, hvgPlot=hvgPlot, PCAplot=PCAplot, PCAloadings=PCAloadings, tSNE=tSNE, UMAP=UMAP, DM=DM, barplot=barplot, sankey=sankey, DEheatmap=DEheatmap, GO=GO) dir.create(paste0(pathOUT, "Input")) write.table(imageStats, file = paste0(pathOUT, "Input/imageStats.csv"), sep = ",", row.names = FALSE) print(paste("Finished. Seurat object for", DSname, "has been created and stored in", pathOUT)) } # end of else, mtx format sessionInfo() quit(status = 0)
51c08b5d18e7c8f78891e9e354c9d5c6c4ee2111
3a764372a998e48807ac0fb7c2c2eaa91324ed35
/Plot5.R
10763038795ca5e4568455a3e8d56adf2046363e
[]
no_license
gnupate/EDA-Assignment2
f8bb0c00527948bdf614d2ccd0f1908621652ab7
4748c0c287aa35c46a8f914fd5545db186989064
refs/heads/master
2020-04-19T21:57:36.532058
2015-07-25T23:54:42
2015-07-25T23:54:42
39,645,095
0
0
null
null
null
null
UTF-8
R
false
false
1,252
r
Plot5.R
library(dplyr) ## use the dplyr library ## NOTE: I have not saved the data to github, it should be unzipped into the directrory where this script is run ## ensure that the data is there files <- dir() if(is.element("summarySCC_PM25.rds",files)) { #the data exists, go ahead and work ## read the data into a dataframe sourceData <- readRDS("summarySCC_PM25.rds") ## I'm using the ON-ROAD data under the assumption that vehicles means motor vehicles driven on roads ## (not boats, trains, planes, etc,) vehicleData <- filter(sourceData, type == "ON-ROAD", fips == 24510) year = c(1999,2002,2005,2008) emissions = c(0,0,0,0) df = data.frame(year , emissions) ## fill in the dataframe for each year counter = 1 for(i in year) { df$emissions[counter] <- colSums(select(filter(vehicleData, year == i),Emissions)) counter <- counter + 1 } ## use the png filter to write to a graphics file device png(file="plot5.png") plot(df, main="Annual Vehicle Related Emmissions across Baltimore", type="l") dev.off() } else { # datafile isn't there ... print("Expected datafile not found, please ensure it is in the current directory and retry") }
2e2f73cb5450a1680db28fbb42452720b8912e03
a2bcf83193959276f7bd8db656db53e58e875a26
/steal-the-moon/stealTheMoonSim/man/stealTheMoonSim.Rd
bfeb2828e28c48bad9c2ed6781c44e4b2435999e
[]
no_license
PieceMaker/presentation-lsdg-docker
2b3c22fafdfe390eb9e60499c9083b4c1d461065
09aa739cfad27583d344cbae6a653dcb5700ce99
refs/heads/master
2020-04-21T06:16:15.830018
2019-02-09T05:03:32
2019-02-09T05:03:32
169,361,528
3
0
null
null
null
null
UTF-8
R
false
true
503
rd
stealTheMoonSim.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stealTheMoonSim.R \name{stealTheMoonSim} \alias{stealTheMoonSim} \title{Simple example simulation for use in demonstrating rminions package.} \usage{ stealTheMoonSim(fuelParams, solarFlareParams, flareSurfaceParams, flareAdditionalFuelParams, surfaceFuelDependenceCopParams, asteroidParams, asteroidAdditionalFuelParams) } \description{ Simple example simulation for use in demonstrating rminions package. }
24dbc2ef30113949e62e3fa595985e261fb3d7b4
dd0969134fdc71e65bcb8380ac03a14cbc680cc2
/R/getFlexFile.R
0d761d91bda332aa43d52806a06aa15f0176b9ae
[]
no_license
cran/icesDatras
ae26fd615ad95aecb37d9f330bbef65d8985cd09
bb39967b7c89a6734ceb314e88248ac230e54235
refs/heads/master
2023-05-26T11:16:22.608725
2023-05-08T08:40:05
2023-05-08T08:40:05
78,541,736
0
0
null
null
null
null
UTF-8
R
false
false
1,644
r
getFlexFile.R
#' Get Flex File #' #' Get all information in HH plus estimates of Door Spread, Wing Spread and #' Swept Area per square km. Only available for NS-IBTS survey. #' #' @param survey the survey acronym, e.g. NS-IBTS. #' @param year the year of the survey, e.g. 2010. #' @param quarter the quarter of the year the survey took place, i.e. 1, 2, 3 or 4. #' #' @return A data frame. #' #' @seealso #' \code{\link{getDATRAS}} supports querying many years and quarters in one function call. #' #' \code{\link{getHHdata}} get haul data #' #' \code{\link{icesDatras-package}} gives an overview of the package. #' #' @author Adriana Villamor. #' #' @examples #' \dontrun{ #' flex <- getFlexFile(survey = "NS-IBTS", year = 2020, quarter = 1) #' str(flex) #' #' # error checking examples: #' flex <- getFlexFile(survey = "NS_IBTS", year = 2016, quarter = 1) #' flex <- getFlexFile(survey = "NS-IBTS", year = 2030, quarter = 1) #' flex <- getFlexFile(survey = "NS-IBTS", year = 2016, quarter = 6) #' } #' @export getFlexFile <- function(survey, year, quarter) { # check survey name if (!checkSurveyOK(survey)) return(FALSE) # check year if (!checkSurveyYearOK(survey, year, checksurvey = FALSE)) return(FALSE) # check quarter if (!checkSurveyYearQuarterOK(survey, year, quarter, checksurvey = FALSE, checkyear = FALSE)) return(FALSE) # read url and parse to data frame url <- sprintf( "https://datras.ices.dk/WebServices/DATRASWebService.asmx/getFlexFile?survey=%s&year=%i&quarter=%i", survey, year, quarter) out <- readDatras(url) out <- parseDatras(out) out }
c35d30d451bdfd98a2c152b3ef2bb25dfa726e90
e67c99cc4146142e0b9210a7a908e0b7998f2cef
/man/race.describe.Rd
58d0916f96486ead545a967caa34798218511bd2
[]
no_license
cran/race
888998a1a9ad03c6aca6928f84d2bf8109c8fce6
c1d8cbcfe91ff2e0c34b068f8522da612eea6ec9
refs/heads/master
2016-09-16T10:05:42.835799
2012-04-05T00:00:00
2012-04-05T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
3,508
rd
race.describe.Rd
% ------------------------------------------------------- -*- mode: Rd; -*- % % race.describe.Rd Describe a candidate % % ------------------------------------------------------------------------- % % ========================================================================= % % Racing methods for the selection of the best % % ------------------------------------------------------------------------- % % Copyright (C) 2003 Mauro Birattari % % ========================================================================= % % This program is free software; you can redistribute it and/or modify it % % under the terms of the GNU General Public License as published by the % % Free Software Foundation; either version 2 of the License, or (at your % % option) any later version. % % % % This program is distributed in the hope that it will be useful, but % % WITHOUT ANY WARRANTY; without even the implied warranty of % % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU % % General Public License for more details. % % % % You should have received a copy of the GNU General Public License along % % with this program; if not, write to the Free Software Foundation, Inc., % % 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. % % ========================================================================= % % ========================================================================= % % Mauro BIRATTARI % % IRIDIA - ULB, CP 194/6 % % Av. F. D. Roosevelt 50 mbiro@ulb.ac.be % % 1050 Brussels, Belgium http://iridia.ulb.ac.be/~mbiro % % ========================================================================= % \name{race.describe} \alias{race.describe} \title{Describe a candidate} \description{The function \preformatted{race.describe(candidate,data)} may be provided by the user for giving a description of a candidate. It's definition has to be given in the same file in which the functions \code{race.wrapper} and \code{race.info} are defined. The name of such file has to be passed as first argument to the function \code{race}. } \arguments{ \item{candidate}{The candidate for which a description is to be returned.} \item{data}{It is the object of type \code{list} (possibly empty) returned by \code{\link{race.init}}, if the latter is defined by the user.} } \value{The function \code{race.describe} should return an object describing the selected candidate. Such object will be printed by \code{race} through the function \code{print}.} \examples{ # Please have a look at the function `race.describe' # defined in the file `example-wrapper.R': local({ source(file.path(system.file(package="race"), "examples","example-wrapper.R"),local=TRUE); print(race.describe)}) } \author{Mauro Birattari} \seealso{\code{\link{race}}, \code{\link{race.init}}} \keyword{misc}
3479e8484ff7abf4a0cf2aee41d3be7a779d45db
c4a362b0a4dbf892dc257e25e5df07f8ff9a7c2f
/R/centerVAR1data.r
58199b726c840d46a13079a8282fe82446276928
[]
no_license
cran/ragt2ridges
232f816a2817a9a31f86d4ea815fb43381b613b2
5c4c2ebb6adfb5d3e955ca3df6aca80596f43143
refs/heads/master
2020-12-24T07:41:30.024419
2020-01-28T14:30:02
2020-01-28T14:30:02
58,468,203
0
1
null
null
null
null
UTF-8
R
false
false
1,000
r
centerVAR1data.r
centerVAR1data <- function(Y){ ######################################################################## # # DESCRIPTION: # within-individual, covariate-wise centering of the data. # # ARGUMENTS: # -> Y : Three-dimensional array containing the data. The # first, second and third dimensions correspond to # covariates, time and samples, respectively. The # data are assumed to centered covariate-wise. # # DEPENDENCIES: # ... # # NOTES: # ... # ######################################################################## # input checks if (!is(Y, "array")){ stop("Input (Y) is of wrong class.") } if (length(dim(Y)) != 3){ stop("Input (Y) is of wrong dimensions: either covariate, time or sample dimension is missing.") } # centering for (i in 1:dim(Y)[3]){ Y[1:dim(Y)[1], 1:dim(Y)[2],i] <- sweep(Y[,,i,drop=TRUE], 1, apply(Y[,,i,drop=TRUE], 1, mean, na.rm=TRUE)) } return(Y) }
05531cb116fe1e6fe565ef22b78a31fa1a5d9fc2
f0b1bc9d37d67113311f9b09dd559b9abbec166f
/inst/tests/tReadVar3.R
22fc1fca62e848a3ec8be0de5fa8d12311a5d2ee
[]
no_license
GAMS-dev/gdxrrw-miro
b0a8f28c730eaa02fb63887a6832d861f48914c5
91486406f60986429b385cf37b2741648ac5b2e2
refs/heads/master
2023-04-03T17:53:31.082850
2023-03-15T17:24:04
2023-03-15T17:24:04
219,473,354
2
1
null
2021-01-28T09:19:28
2019-11-04T10:19:39
C
UTF-8
R
false
false
27,563
r
tReadVar3.R
#### test rgdx reading a 3-dim variable #### test form=['sparse','full'] X [filtered,unfiltered] #### ['l','m','lo','up','s'] #### wanted lists produced with dump("listName",file="") if (! require(gdxrrwMIRO)) stop ("gdxrrw package is not available") if (0 == igdx(silent=TRUE)) stop ("the gdx shared library has not been loaded") source ("chkSame.R") reqIdent <- TRUE iUels <- c("i1", "i2") iCard <- length(iUels) jUels <- c("j1", "j2") jCard <- length(jUels) kUels <- c("k1", "k2") kCard <- length(kUels) fields <- c('l','m','lo','up','s') nFields <- length(fields) domains <- c("i","j","k") domainsf <- c("i","j","k","_field") cart <- list(iUels,jUels,kUels) cartN <- list('i'=iUels,'j'=jUels,'k'=kUels) cartf <- list(iUels,jUels,kUels,fields) cartfN <- list('i'=iUels,'j'=jUels,'k'=kUels,'_field'=fields) lev <- 1 mar <- 2 low <- 3 upp <- 4 sca <- 5 tryCatch({ print ("testing rgdx on variable reads") rgdx('?') fnIn <- "tReadVar3.gdx" if (! file_test ('-f', fnIn)) { stop (paste("FAIL: File", fnIn, "does not exist")) } ### ---------- reading form=sparse, no filter # all t <- matrix(c( 1,1,1,lev, 0 ,1,1,1,mar, 0 ,1,1,1,low, 0 ,1,1,1,upp, 525 ,1,1,1,sca, 1 ,1,1,2,lev, 1 ,1,1,2,mar, 0.25 ,1,1,2,low, 0 ,1,1,2,upp, Inf ,1,1,2,sca, 1 ,1,2,1,lev, 10 ,1,2,1,mar, 0 ,1,2,1,low,-Inf ,1,2,1,upp, Inf ,1,2,1,sca, 1 ,1,2,2,lev, 11 ,1,2,2,mar, 0.25 ,1,2,2,low, 100 ,1,2,2,upp, Inf ,1,2,2,sca, 1 ,2,1,1,lev, 100 ,2,1,1,mar, 0 ,2,1,1,low, 0 ,2,1,1,upp, 0 ,2,1,1,sca, 1 ,2,1,2,lev, 101 ,2,1,2,mar, 0 ,2,1,2,low, 0 ,2,1,2,upp, Inf ,2,1,2,sca, 1 ,2,2,1,lev, 110 ,2,2,1,mar, 0 ,2,2,1,low, 0 ,2,2,1,upp, Inf ,2,2,1,sca, 10 ,2,2,2,lev, 6 ,2,2,2,mar, 0 ,2,2,2,low, 6 ,2,2,2,upp, 6 ,2,2,2,sca, 1 ), nrow=40, ncol=5, byrow=T) xwantA <- list(name="x", type="variable", dim=3L, val=t, form="sparse", uels=list(iUels,jUels,kUels,fields), domains=domainsf, domInfo="full", field='all', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='alL')) chk <- chkRgdxRes (x, xwantA, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='sparse',field='alL'),squeeze=F) chk <- chkRgdxRes (x, xwantA, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',unfiltered,squeeze=F) failed:",chk$msg)) } # level xwantL <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,2, 1 ,1,2,1, 10 ,1,2,2, 11 ,2,1,1, 100 ,2,1,2, 101 ,2,2,1, 110 ,2,2,2, 6), nrow=7, ncol=4, byrow=T), form="sparse", uels=list(iUels,jUels,kUels), domains=domains, domInfo="full", field='l', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='L')) chk <- chkRgdxRes (x, xwantL, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',unfiltered) failed:",chk$msg)) } xwantL$val <- matrix(c( 1,1,1, 0 ,1,1,2, 1 ,1,2,1, 10 ,1,2,2, 11 ,2,1,1, 100 ,2,1,2, 101 ,2,2,1, 110 ,2,2,2, 6), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='L'),squeeze=F) chk <- chkRgdxRes (x, xwantL, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',unfiltered,squeeze=F) failed:",chk$msg)) } # marginal xwantM <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,2, .25 ,1,2,2, .25), nrow=2, ncol=4, byrow=T), form="sparse", uels=list(iUels,jUels,kUels), domains=domains, domInfo="full", field='m', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='M')) chk <- chkRgdxRes (x, xwantM, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',unfiltered) failed:",chk$msg)) } xwantM$val <- matrix(c( 1,1,1, 0 ,1,1,2, .25 ,1,2,1, 0 ,1,2,2, .25 ,2,1,1, 0 ,2,1,2, 0 ,2,2,1, 0 ,2,2,2, 0 ), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='M'),squeeze=F) chk <- chkRgdxRes (x, xwantM, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',unfiltered,squeeze=F) failed:",chk$msg)) } # lower xwantLo <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,2,1, -Inf ,1,2,2, 100 ,2,2,2, 6), nrow=3, ncol=4, byrow=T), form="sparse", uels=list(iUels,jUels,kUels), domains=domains, domInfo="full", field='lo', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='lo')) chk <- chkRgdxRes (x, xwantLo, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',unfiltered) failed:",chk$msg)) } xwantLo$val <- matrix(c( 1,1,1, 0 ,1,1,2, 0 ,1,2,1, -Inf ,1,2,2, 100 ,2,1,1, 0 ,2,1,2, 0 ,2,2,1, 0 ,2,2,2, 6), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='lo'),squeeze=F) chk <- chkRgdxRes (x, xwantLo, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',unfiltered,squeeze=F) failed:",chk$msg)) } # upper xwantUp <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,1, 525 ,2,1,1, 0 ,2,2,2, 6), nrow=3, ncol=4, byrow=T), form="sparse", uels=list(iUels,jUels,kUels), domains=domains, domInfo="full", field='up', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='up')) chk <- chkRgdxRes (x, xwantUp, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',unfiltered) failed:",chk$msg)) } xwantUp$val <- matrix(c( 1,1,1, 525 ,1,1,2, Inf ,1,2,1, Inf ,1,2,2, Inf ,2,1,1, 0 ,2,1,2, Inf ,2,2,1, Inf ,2,2,2, 6), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='up'),squeeze=F) chk <- chkRgdxRes (x, xwantUp, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',unfiltered,squeeze=F) failed:",chk$msg)) } # scale xwantS <- list(name="x", type="variable", dim=3L, val=matrix(c( 2,2,1, 10), nrow=1, ncol=4, byrow=T), form="sparse", uels=list(iUels,jUels,kUels), domains=domains, domInfo="full", field='s', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='s')) chk <- chkRgdxRes (x, xwantS, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',unfiltered) failed:",chk$msg)) } xwantS$val <- matrix(c( 1,1,1, 1 ,1,1,2, 1 ,1,2,1, 1 ,1,2,2, 1 ,2,1,1, 1 ,2,1,2, 1 ,2,2,1, 10 ,2,2,2, 1), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='s'),squeeze=F) chk <- chkRgdxRes (x, xwantS, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',unfiltered,squeeze=F) failed:",chk$msg)) } ### ---------- reading form=sparse, filtered # all f <- list(c('i2'),jUels,kUels) t <- matrix(c( 1,1,1,lev, 100 ,1,1,1,mar, 0 ,1,1,1,low, 0 ,1,1,1,upp, 0 ,1,1,1,sca, 1 ,1,1,2,lev, 101 ,1,1,2,mar, 0 ,1,1,2,low, 0 ,1,1,2,upp, Inf ,1,1,2,sca, 1 ,1,2,1,lev, 110 ,1,2,1,mar, 0 ,1,2,1,low, 0 ,1,2,1,upp, Inf ,1,2,1,sca, 10 ,1,2,2,lev, 6 ,1,2,2,mar, 0 ,1,2,2,low, 6 ,1,2,2,upp, 6 ,1,2,2,sca, 1 ), nrow=20, ncol=5, byrow=T) xwantA <- list(name="x", type="variable", dim=3L, val=t, form="sparse", uels=list(f[[1]],f[[2]],f[[3]],fields), domains=domainsf, domInfo="filtered", field='all', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='all')) chk <- chkRgdxRes (x, xwantA, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='all'),squeeze=F) chk <- chkRgdxRes (x, xwantA, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',filtered,squeeze=F) failed:",chk$msg)) } # level f <- list(c('i2'),jUels,kUels) xwantL <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,1, 100 ,1,1,2, 101 ,1,2,1, 110 ,1,2,2, 6), nrow=4, ncol=4, byrow=T), form="sparse", uels=f, domains=domains, domInfo="filtered", field='l', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f)) chk <- chkRgdxRes (x, xwantL, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='sparse',uels=f),squeeze=F) chk <- chkRgdxRes (x, xwantL, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',filtered,squeeze=F) failed:",chk$msg)) } # marginal f <- list(iUels,jUels,kUels) xwantM <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,2, .25 ,1,2,2, .25), nrow=2, ncol=4, byrow=T), form="sparse", uels=f, domains=domains, domInfo="filtered", field='m', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='M')) chk <- chkRgdxRes (x, xwantM, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',filtered) failed:",chk$msg)) } xwantM$val <- matrix(c( 1,1,1, 0 ,1,1,2, .25 ,1,2,1, 0 ,1,2,2, .25 ,2,1,1, 0 ,2,1,2, 0 ,2,2,1, 0 ,2,2,2, 0 ), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='M'),squeeze=F) chk <- chkRgdxRes (x, xwantM, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',filtered,squeeze=F) failed:",chk$msg)) } # lower f <- list(iUels,c('j2'),kUels) xwantLo <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,1, -Inf ,1,1,2, 100 ,2,1,2, 6), nrow=3, ncol=4, byrow=T), form="sparse", uels=f, domains=domains, domInfo="filtered", field='lo', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='lo')) chk <- chkRgdxRes (x, xwantLo, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',filtered) failed:",chk$msg)) } xwantLo$val <- matrix(c( 1,1,1, -Inf ,1,1,2, 100 ,2,1,1, 0 ,2,1,2, 6), nrow=4, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='lo'),squeeze=F) chk <- chkRgdxRes (x, xwantLo, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',filtered,squeeze=F) failed:",chk$msg)) } # upper f <- list(iUels,jUels,kUels) xwantUp <- list(name="x", type="variable", dim=3L, val=matrix(c( 1,1,1, 525 ,2,1,1, 0 ,2,2,2, 6), nrow=3, ncol=4, byrow=T), form="sparse", uels=f, domains=domains, domInfo="filtered", field='up', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='UP')) chk <- chkRgdxRes (x, xwantUp, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',filtered) failed:",chk$msg)) } xwantUp$val <- matrix(c( 1,1,1, 525 ,1,1,2, +Inf ,1,2,1, +Inf ,1,2,2, +Inf ,2,1,1, 0 ,2,1,2, +Inf ,2,2,1, +Inf ,2,2,2, 6), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',uels=f,field='UP'),squeeze=F) chk <- chkRgdxRes (x, xwantUp, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',filtered,squeeze=F) failed:",chk$msg)) } # scale f <- list(iUels,jUels,kUels) xwantS <- list(name="x", type="variable", dim=3L, val=matrix(c( 2,2,1, 10), nrow=1, ncol=4, byrow=T), form="sparse", uels=f, domains=domains, domInfo="filtered", field='s', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='sparse',field='S',uels=f)) chk <- chkRgdxRes (x, xwantS, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'S',filtered) failed:",chk$msg)) } xwantS$val <- matrix(c( 1,1,1, 1 ,1,1,2, 1 ,1,2,1, 1 ,1,2,2, 1 ,2,1,1, 1 ,2,1,2, 1 ,2,2,1, 10 ,2,2,2, 1), nrow=8, ncol=4, byrow=T) x <- rgdx(fnIn,list(name='x',form='sparse',field='S',uels=f),squeeze=F) chk <- chkRgdxRes (x, xwantS, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'S',filtered,squeeze=F) failed:",chk$msg)) } ### ---------- reading form=full, no filter # all v <- array(0,c(iCard,jCard,kCard,nFields),dimnames=cartfN) for (i in 1:iCard) { for (j in 1:jCard) { for (k in 1:kCard) { v[i,j,k,'l'] <- 100 * (i-1) + 10 * (j-1) + (k-1) v[i,j,k,'up'] <- Inf v[i,j,k,'s'] <- 1 } } } v['i2','j2','k2',c('l','lo','up')] <- 6 v['i1',c('j1','j2'),'k2','m'] <- 0.25 v['i1','j2','k1','lo'] <- -Inf v['i1','j2','k2','lo'] <- 100 v['i1','j1','k1','up'] <- 525 v['i2','j1','k1','up'] <- 0 v['i2','j2','k1','s'] <- 10 xwantA <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartfN, domains=domainsf, domInfo="full", field='all', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='all')) chk <- chkRgdxRes (x, xwantA, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='all'),squeeze=F) chk <- chkRgdxRes (x, xwantA, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',full,unfiltered,squeeze=F) failed:",chk$msg)) } # level v <- array(0,c(iCard,jCard,kCard),dimnames=cartN) for (i in 1:iCard) { for (j in 1:jCard) { for (k in 1:kCard) { v[i,j,k] <- 100 * (i-1) + 10 * (j-1) + (k-1) } } } v['i2','j2','k2'] <- 6 xwantL <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartN, domains=domains, domInfo="full", field='l', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full')) chk <- chkRgdxRes (x, xwantL, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full'),squeeze=F) chk <- chkRgdxRes (x, xwantL, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',full,unfiltered,squeeze=F) failed:",chk$msg)) } # marginal v <- array(0,c(iCard,jCard,kCard),dimnames=cartN) v['i1','j1',2] <- .25 v['i1','j2',2] <- .25 xwantM <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartN, domains=domains, domInfo="full", field='m', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='m')) chk <- chkRgdxRes (x, xwantM, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='m'),squeeze=F) chk <- chkRgdxRes (x, xwantM, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',full,unfiltered,squeeze=F) failed:",chk$msg)) } # lower v <- array(0,c(iCard,jCard,kCard),dimnames=cartN) v['i1','j2','k1'] <- -Inf v['i1','j2','k2'] <- 100 v['i2','j2','k2'] <- 6 xwantLo <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartN, domains=domains, domInfo="full", field='lo', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='lo')) chk <- chkRgdxRes (x, xwantLo, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='lo'),squeeze=F) chk <- chkRgdxRes (x, xwantLo, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',full,unfiltered,squeeze=F) failed:",chk$msg)) } # upper v <- array(+Inf,c(iCard,jCard,kCard),dimnames=cartN) v['i1','j1','k1'] <- 525 v['i2','j1','k1'] <- 0 v['i2','j2','k2'] <- 6 xwantUp <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartN, domains=domains, domInfo="full", field='up', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='up')) chk <- chkRgdxRes (x, xwantUp, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='up'),squeeze=F) chk <- chkRgdxRes (x, xwantUp, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',full,unfiltered,squeeze=F) failed:",chk$msg)) } # scale v <- array(1,c(iCard,jCard,kCard),dimnames=cartN) v[2,2,1] <- 10 xwantS <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=cartN, domains=domains, domInfo="full", field='s', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='s')) chk <- chkRgdxRes (x, xwantS, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',full,unfiltered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='s'),squeeze=F) chk <- chkRgdxRes (x, xwantS, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',full,unfiltered,squeeze=F) failed:",chk$msg)) } ### ---------- reading form=full, filtered # all f <- list(c('i2'),jUels,kUels) dnames <- f ; dnames[[4]] <- fields ; names(dnames) <- domainsf v <- array(0,c(1,jCard,kCard,nFields),dimnames=dnames) for (j in 1:jCard) { for (k in 1:kCard) { v['i2',j,k,'l'] <- 100 + 10 * (j-1) + (k-1) v['i2',j,k,'up'] <- Inf v['i2',j,k,'s'] <- 1 } } v['i2','j2','k2',c('l','lo','up')] <- 6 v['i2','j1','k1','up'] <- 0 v['i2','j2','k1','s'] <- 10 xwantA <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domainsf, domInfo="filtered", field='all', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',field='all',uels=f)) chk <- chkRgdxRes (x, xwantA, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',field='all',uels=f),squeeze=F) chk <- chkRgdxRes (x, xwantA, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'all',full,filtered,squeeze=F) failed:",chk$msg)) } # level f <- list(c('i2'),jUels,kUels) dnames <- f ; names(dnames) <- domains v <- array(0,c(1,jCard,kCard),dimnames=dnames) for (j in 1:jCard) { for (k in 1:kCard) { v[1,j,k] <- 100 + 10 * (j-1) + (k-1) } } v['i2','j2','k2'] <- 6 xwantL <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domains, domInfo="filtered", field='l', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',uels=f)) chk <- chkRgdxRes (x, xwantL, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',uels=f),squeeze=F) chk <- chkRgdxRes (x, xwantL, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'L',full,filtered,squeeze=F) failed:",chk$msg)) } # marginal f <- cart dnames <- f ; names(dnames) <- domains v <- array(0,c(iCard,jCard,kCard),dnames) v['i1',jUels,'k2'] <- 0.25 xwantM <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domains, domInfo="filtered", field='m', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='M')) chk <- chkRgdxRes (x, xwantM, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='M'),squeeze=F) chk <- chkRgdxRes (x, xwantM, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'M',full,filtered,squeeze=F) failed:",chk$msg)) } # lower f <- list(iUels,c('j2'),kUels) dnames <- f ; names(dnames) <- domains v <- array(0,c(iCard,1,kCard),dimnames=dnames) v['i1','j2','k1'] <- -Inf v['i1','j2','k2'] <- 100 v['i2','j2','k2'] <- 6 xwantLo <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domains, domInfo="filtered", field='lo', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='lo')) chk <- chkRgdxRes (x, xwantLo, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='lo'),squeeze=F) chk <- chkRgdxRes (x, xwantLo, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'lo',full,filtered,squeeze=F) failed:",chk$msg)) } # upper f <- cart dnames <- f ; names(dnames) <- domains v <- array(Inf,c(iCard,jCard,kCard),dimnames=dnames) v['i1','j1','k1'] <- 525 v['i2','j1','k1'] <- 0 v['i2','j2','k2'] <- 6 xwantUp <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domains, domInfo="filtered", field='up', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='up')) chk <- chkRgdxRes (x, xwantUp, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='up'),squeeze=F) chk <- chkRgdxRes (x, xwantUp, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'up',full,filtered,squeeze=F) failed:",chk$msg)) } # scale f <- cart dnames <- f ; names(dnames) <- domains v <- array(1,c(iCard,jCard,kCard),dimnames=dnames) v['i2','j2','k1'] <- 10 xwantS <- list(name="x", type="variable", dim=3L, val=v, form="full", uels=dnames, domains=domains, domInfo="filtered", field='s', varTypeText='positive', typeCode=GMS_VARTYPE$POSITIVE) x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='s')) chk <- chkRgdxRes (x, xwantS, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',full,filtered) failed:",chk$msg)) } x <- rgdx(fnIn,list(name='x',form='full',uels=f,field='s'),squeeze=F) chk <- chkRgdxRes (x, xwantS, T, reqIdent=reqIdent) if (!chk$same) { stop (paste("test rgdx(x,'s',full,filtered,squeeze=F) failed:",chk$msg)) } print ("test of rgdx on variable reads passed") TRUE ## all tests passed: return TRUE }, error = function(ex) { print ("test of rgdx on variable reads failed"); print(ex) ; FALSE } )