blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
be9992d5fc5ff7890d48fe6dcfda38815f7d163c
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/iotools/examples/readAsRaw.Rd.R
66c254dc7b84318be5b18abddc4331f9df98452b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
337
r
readAsRaw.Rd.R
library(iotools) ### Name: readAsRaw ### Title: Read binary data in as raw ### Aliases: readAsRaw ### ** Examples mm <- model.matrix(~., iris) f <- file("iris_mm.io", "wb") writeBin(as.output(mm), f) close(f) m <- mstrsplit(readAsRaw("iris_mm.io"), type="numeric", nsep="\t") head(mm) head(m) unlink("iris_mm.io")
1cbe5c4bdebb040b0a86b7b115b1e8627e3c1308
3252907f13eb34741fb51b7288abf43bebecc312
/plotSeqsProbHeatmap.r
e2f45b67b9158ad3e57823a6de1ccc152ff26aea
[]
no_license
NarlikarLab/exoDIVERSITY
5ef3d8570ac22629009fcec25021351b60137155
d646fc655d0cf686a11ce29de4731f32fb7a4f6d
refs/heads/master
2023-08-14T01:34:11.678808
2021-10-05T10:35:49
2021-10-05T10:35:49
310,792,452
0
0
null
null
null
null
UTF-8
R
false
false
2,223
r
plotSeqsProbHeatmap.r
library("RColorBrewer") library("plotfunctions") args=commandArgs(trailingOnly=TRUE) infofile = args[1] inpfile = args[2] outfile = args[3] seqprobs = read.table(inpfile) info = read.table(infofile) modes = as.integer(rownames(table(info[,2]))) modeOrder = seq(min(modes),max(modes)) lseqs = cbind(info[,2],seqprobs[,2:dim(seqprobs)[2]]) newdf = lseqs[which(lseqs[,1]==modeOrder[1]),] rmo = max(modes):min(modes) for (i in 2:length(modeOrder)){ newdf = rbind(newdf,lseqs[which(lseqs[,1]==modeOrder[i]),]) } df = newdf[,2:dim(newdf)[2]] ord = order(rmo) df = df[,ord] shape = dim(df) lenOfSeq = shape[2] totalSeqs = shape[1] tdf = t(df) w=4 h=10 modes = table(newdf[,1]) pal = colorRampPalette(c("white","gray10"))(256) png(outfile,w=w,h=h,units="in",res=600) par(xpd=TRUE,cex.lab=0.7,cex.axis=0.7,tcl=-0.05,mgp=c(0.5,0.2,0),las=1) image(1:dim(tdf)[1],1:dim(tdf)[2],tdf,col=pal,xlab="",ylab="",xaxt="n",yaxt="n",zlim=c(0,1)) axis(1,labels=modeOrder,at=seq(1,length(modeOrder)),col=NA,col.ticks=1,cex.axis=0.7) ylabels = c() ylabelposns = c() j = (length(modeOrder)-1) for (i in 1:(length(modeOrder)-1)){ if (i==1){ yval = strtoi(modes[[as.character(modeOrder[i])]]) mid = yval/2 lines(c(0.5,lenOfSeq+0.5),c(yval,yval),lwd=0.8) #text(x= -0.6,y=mid,labels=j,cex=0.6,col="black") ylabels[i] = j ylabelposns[i] = mid } else{ mid = yval yval = yval + strtoi(modes[[as.character(modeOrder[i])]]) mid = (mid+yval)/2 lines(c(0.5,lenOfSeq+0.5),c(yval,yval),lwd=0.8) #text(x= -0.6,y=mid,labels=j,cex=0.6,col="black") ylabels[i] = j ylabelposns[i] = mid } j=j-1 } i=i+1 mid = yval yval = yval + strtoi(modes[[as.character(modeOrder[i])]]) mid = (mid+yval)/2 ylabels[i] = j ylables = modeOrder ylabelposns[i] = mid #text(x=-0.6,y=mid,labels=j,cex=0.6,col="black") axis(2,labels=ylabels,at=ylabelposns,col=NA,col.ticks=1,cex.axis=0.7) for (i in 2:length(modeOrder)){ lines(c((i-0.5),(i-0.5)),c(0,totalSeqs),lwd=0.5,lty=3,col="black") } par(cex=0.5) gradientLegend(valRange=c(0,1),pos = c(1,-(0.04*totalSeqs),lenOfSeq,-(0.048*totalSeqs)),coords=T,side=1,color=pal,cex.label=0.7,seq=1) g = dev.off()
9bd89e27651e3a74b3f7ff190b5ca7fd3cc8abbb
c847169d8ce43d5e31a0a428b4d97e5a7c8c230a
/by/process_surface.R
5cbc6e2839ef6f6275cd0e97997a8e297bbaa12a
[]
no_license
xuehangsong/dvz_dust
992e03056f6d0c81bf150bd06ce3e32d5761c4fe
75ac29bdee5191648bef8253d527d851787e7e99
refs/heads/master
2020-03-19T07:27:33.848850
2018-10-02T21:57:44
2018-10-02T21:57:44
136,115,282
0
0
null
null
null
null
UTF-8
R
false
false
1,128
r
process_surface.R
## scripts was named as <solute.flux.v3.R> ## plot all figures for single case ## written by Xuehang 04/11/2018 rm(list=ls()) library(RColorBrewer) #simu.dir="/pic/projects/dvz/xhs_simus/dust/fy18/by_9a/" simu.dir="./" scripts.dir="/people/song884/github/dvz_dust/by/" # load model parameters source(paste(scripts.dir,"parameter.R",sep="")) ## list all cases files cases = c(list.files(paste(simu.dir,sep=""),pattern="^r"), list.files(paste(simu.dir,sep=""),pattern="^l"), list.files(paste(simu.dir,sep=""),pattern="^2018$"), list.files(paste(simu.dir,sep=""),pattern="^base$")) ## read the surface data from surface.data = list() for (icase in cases) { print(icase) case.data = read.table( paste(simu.dir,icase,"/tec_data/surface.dat",sep=""), skip=3) surface.data[[icase]] = case.data[,c(1,4,6)] colnames(surface.data[[icase]]) = c("time", "crib.water", "crib.solute") } save(surface.data,file=paste(simu.dir,"surface_data.r",sep=""))
2de6c5ac073e25336a3a7b875d0a5abe416613ac
29585dff702209dd446c0ab52ceea046c58e384e
/ProgGUIinR/inst/Examples/ch-RGtk2/ex-RGtk2-color-tool-button.R
44ea85285c0f69c48fe9efd54630d2a88c0df438
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,727
r
ex-RGtk2-color-tool-button.R
### R code from vignette source 'ex-RGtk2-color-tool-button.Rnw' ################################################### ### code chunk number 1: rgtk2-mennus-toolbar-color-button ################################################### gdk_color <- gdkColorParse(palette()[1])$color color_button <- gtkColorButton(gdk_color) ################################################### ### code chunk number 2: rgtk2-menus-toolbar-color-menu ################################################### colorMenuItem <- function(color) { drawing_area <- gtkDrawingArea() drawing_area$setSizeRequest(20, 20) drawing_area$modifyBg("normal", color) image_item <- gtkImageMenuItem(color) image_item$setImage(drawing_area) image_item } color_items <- sapply(palette(), colorMenuItem) color_menu <- gtkMenu() for (item in color_items) color_menu$append(item) ################################################### ### code chunk number 3: rgtk2-menus-toolbar-color-cb ################################################### colorMenuItemActivated <- function(item) { color <- gdkColorParse(item$getLabel())$color color_button$setColor(color) } sapply(color_items, gSignalConnect, "activate", colorMenuItemActivated) ################################################### ### code chunk number 4: ex-RGtk2-color-tool-button.Rnw:54-55 ################################################### toolbar <- gtkToolbar() ################################################### ### code chunk number 5: rgtk2-menus-toolbar-menu ################################################### menu_button <- gtkMenuToolButton(color_button, "Color") menu_button$setMenu(color_menu) toolbar$add(menu_button) ## add to GUI window <- gtkWindow() window$add(toolbar)
47acd1a6d03da0b747791f794624adb189cd85d8
4df3882cd1d21190b10894a5eea39ba5ebbf7c5d
/usernames.R
2bb4bdff342aaaaeb317152bdb389613aea64580
[]
no_license
yk2684/NYC-Theatre-Reviews-Scrape
4714f0511dddf6e88e194635cd9e26d33b2911ec
94fde09a358b330de7c13e284144658149fdbfa8
refs/heads/master
2021-05-09T05:07:20.213072
2018-01-28T21:37:20
2018-01-28T21:37:20
119,298,866
0
0
null
null
null
null
UTF-8
R
false
false
2,418
r
usernames.R
library(rvest) library(stringr) library(data.table) getwd() #setting up working direcotry setwd("Your_working_directory") #clear environment rm(list=ls()) #Load in the show link and add the page numbers - will loop through each page to scrape the usernames #i.e. ('https://www.show-score.com/broadway-shows/once-on-this-island?page=', 1:23) Show <- lapply(paste0('Link_to_Show_Of_Your_Choice', 1:100), function(url){ url %>% read_html() %>% html_nodes(".-light-gray") %>% html_text() }) Show <- unlist(Show) Show #Some cleaning of the scraped usernames remove <- c("Edit \n", "Edit Show \n", "View in admin\n", "\nReport Abuse\n", " View in admin\n") Show_username <- setdiff(Show, remove) Show_username <- sapply(Show_username, tolower) Show_username Show_username <- gsub(" ", "-", Show_username, fixed=TRUE) Show_username <- gsub("'", "-", Show_username, fixed=TRUE) Show_username <- gsub(".", "", Show_username, fixed=TRUE) Show_username #Load in the scraped usernames to access their profile pages and scrape their age, gender, and location listed Show_username_scrape <- lapply(paste0('https://www.show-score.com/member/', Show_username), function(url){ url %>% read_html() %>% html_nodes(".user-details__main-info :nth-child(1)") %>% html_text() }) Show_username_scrape_2 <- paste(Show_username_scrape, 1) Show_username_scrape_2 <- gsub("c(\"", "", Show_username_scrape, fixed=TRUE) Show_username_scrape_2 <- gsub("\"", "", Show_username_scrape_2, fixed=TRUE) Show_username_scrape_2 <- gsub(")", "", Show_username_scrape_2, fixed=TRUE) Show_username_scrape_2 <- gsub("|", "=", Show_username_scrape_2, fixed=TRUE) Show_username_scrape_2 <- gsub(",", "=", Show_username_scrape_2, fixed=TRUE) Show_username_scrape_2 <- str_trim(Show_username_scrape_2) #Split the dataframe into 4 columns df.Show_username <- data.frame(str_split_fixed(Show_username_scrape_2, "=", 4)) #Delete the '='s df.Show_username$X4 <- gsub("=", "", df.Show_username$X4, fixed=TRUE) #Delete the first 25 rows df.Show_username <- df.Show_username[-1:-25,] #Add names to the columns colnames(df.Show_username) <- c("Username", "Location", "Age", "Sex") #Write to csv write.csv(df.Show_username, "Show_username.csv")
46677c0676c2022e3f4e909ec8e3e1b8e27ac159
c4d14f572811fb5c7e94d9c97568c158574f330f
/ui.R
52ca1a7479d6f56830febc77bf0351a3bc625d55
[]
no_license
bigstewosu/Champs
5e4d5048ab7b817ff67c16d8345daaf1945bfd25
2c3ee2ccdb2ed57146965571ac8fc7ef8aadb52e
refs/heads/master
2021-07-25T14:52:54.181296
2017-11-07T22:32:09
2017-11-07T22:32:09
109,896,677
0
0
null
null
null
null
UTF-8
R
false
false
1,464
r
ui.R
## Course Project Data Products Coursera ## Author: "Dale Stewart" ## date: "Nov 7, 2017" ## Build User interface (ui.R), which will control the layout and appearance ## Load Shiny library library(shiny) shinyUI(fluidPage( # Application title titlePanel("Number of Major Championships per State in 4 Major U.S. Sporting Leagues"), br(), br(), sidebarPanel( helpText("Documentation and Instructions:"), helpText("In the input boxes below please choose one of the 4 major sporting leagues in the United States as well as a range of years. The output on the left will display associated counts for championships won in that state as well as a horizontal bar chart representing the same data."), selectInput("league", label = "Choose a league", choices = c("NBA", "MLB", "NFL", "NHL"), selected = "NBA"), sliderInput("year", "Year Range:", min = 1950, max = 2017, value = c(1993, 2011), sep = ""), helpText("Based on the league selection that you have made this page will show you the data and a histogram of the states that have won championships in the time frame selected")), br(), br(), # Call Data table mainPanel(dataTableOutput('Details'), # Pass state name textOutput("league"), br(), br(), # Plot coliform levels state-wise plotOutput("plot1")) ) )
70b85b1a7aff68bc7f02bc90fdc2db18fcce2b86
c0e4fe657bc91bebdd99dc609e30d6779a050923
/src/miller_signatures.R
bd931d00939ef8b8f799d9cc121b7e7232895712
[]
no_license
caramirezal/tcd8ExscSeq
491d2983ce64d2f2f4087dd69359adaa2ad8f9c2
900b785ac62f7b1e23babe49cf7ae7bdd52abe7b
refs/heads/master
2022-11-10T17:45:43.873223
2022-11-02T10:37:22
2022-11-02T10:37:22
248,523,847
0
0
null
null
null
null
UTF-8
R
false
false
8,365
r
miller_signatures.R
## Visualization of scarches results ## Dependencies library(Seurat) library(dplyr) library(readxl) library(rio) library(dittoSeq) library(viridis) set.seed(333) ## Setting working directory path2project <- '/Users/carlosramirez/sc/tcd8ExscSeq/' setwd(path2project) ##################################################################### ## Loading Hofmann data ## Data preprocessing ## gene names must be standardized himmer <- read.csv('data/maike2020/nina_thimme_raw_counts.csv', header = TRUE) r_names <- himmer$X himmer <- select(himmer, -X) himmer_mtx <- apply(himmer, 2, as.numeric) rownames(himmer_mtx) <- gsub('__chr.*', '', r_names) ## Seurat seurat construction seurat <- CreateSeuratObject(counts = himmer_mtx, project = 'himmer', min.cells = 3, min.features = 3, assay = 'RNA') remove(himmer, himmer_mtx) ##################################################################### ## Adding Nina annotations ## Reading Nina annotations nina.anns <- read_xlsx('data/cell_annotation_nina.xlsx') %>% as.data.frame() rownames(nina.anns) <- nina.anns$CELLID head(nina.anns) ## Annotating seurat file seurat$'nina_annotations' <- 'NA' seurat$nina_annotations[rownames(nina.anns)] <- nina.anns$TYPE seurat$nina_annotations %>% table() seurat <- NormalizeData(seurat) %>% ScaleData() seurat <- FindVariableFeatures(seurat, nfeatures = 3000) seurat <- RunPCA(seurat, verbose = FALSE) seurat <- RunUMAP(seurat, dims = 1:30) ## Corrected Hofmann data pdf('figures/hcv_umap_before_correction.pdf') dittoDimPlot(subset(seurat, nina_annotations != 'NA'), "nina_annotations", size = 2.5) dittoDimPlot(subset(seurat, nina_annotations != 'NA'), "orig.ident", size = 2.5) dev.off() ##################################################################### ## Correcting by sample ## Using all genes seurat.list <- lapply(unique(seurat$orig.ident), function(x) subset(seurat, orig.ident == x)) names(seurat.list) <- unique(seurat$orig.ident) seurat.list <- lapply(seurat.list, function(x) SCTransform(x)) seurat.features <- SelectIntegrationFeatures(object.list = seurat.list, nfeatures = 3000) seurat.list <- PrepSCTIntegration(object.list = seurat.list, anchor.features = seurat.features, verbose = FALSE) seurat.anchors <- FindIntegrationAnchors(object.list = seurat.list, normalization.method = "SCT", anchor.features = seurat.features, verbose = FALSE, reduction = 'cca', k.anchor = 30, k.score = 30, k.filter = 10) seurat.integrated <- IntegrateData(anchorset = seurat.anchors, normalization.method = "SCT", verbose = FALSE) seurat.integrated <- FindVariableFeatures(seurat.integrated, nfeatures = 3000) seurat.integrated <- RunPCA(seurat.integrated, verbose = FALSE) seurat.integrated <- RunUMAP(seurat.integrated, dims = 1:30) ## Corrected Hofmann data pdf('figures/hofmann_corrected.pdf') DimPlot(subset(seurat.integrated, nina_annotations != 'NA') , group.by = c('orig.ident', 'nina_annotations'), pt.size = 2.3) dev.off() ##################################################################### ## Scoring using Progenitor ## Reading Progenitor-like signature in Miller miller_deg_path <- 'https://static-content.springer.com/esm/art%3A10.1038%2Fs41590-019-0312-6/MediaObjects/41590_2019_312_MOESM4_ESM.xlsx' ## LCMV TEx Progenitor vs Terminally DEG lcmv_deg <- import(miller_deg_path, which = 1) str(lcmv_deg) ## TIL TEx Progenitor vs Terminally DEG til_deg <- import(miller_deg_path, which = 2) signature.lcmv <- lcmv_deg %>% mutate(padj = as.numeric(padj)) %>% filter(padj < 0.05) %>% arrange(desc(log2FoldChange)) %>% head(100) %>% select(Gene) %>% unlist() %>% toupper() %>% unname() seurat.integrated <- AddModuleScore( seurat.integrated, features = list(lcmv=signature.lcmv), nbin = 5, name = 'Progenitor-like' ) pdf('figures/umap_miller_prog-like_signature.pdf') FeaturePlot(subset(seurat.integrated, nina_annotations != 'NA'), reduction = 'umap', features = c('Progenitor.like1'), pt.size = 2.5) + scale_color_viridis() dev.off() ## Plot nina annotations pdf('figures/vlnplot_miller_prog-like_signature.pdf') seurat.integrated@meta.data$nina_annotations <- factor(seurat.integrated@meta.data$nina_annotations, levels = c('mem', 'trans', 'exh')) dittoPlot(subset(seurat.integrated, nina_annotations != 'NA'), "Progenitor.like1", group.by = "nina_annotations", plots = c("jitter", "vlnplot", "boxplot"), jitter.color = "blue", jitter.size = 0.5, boxplot.color = "white", boxplot.width = 0.1, boxplot.fill = FALSE) + ggtitle('HCV-infected patients samples') + ylab('Miller Progenitor-like Signature Score') + xlab('') dev.off() pdf('figures/umap_corrected_nina_annotations.pdf') dittoDimPlot(subset(seurat.integrated, nina_annotations != 'NA'), "nina_annotations", size = 2.5) dittoDimPlot(subset(seurat.integrated, nina_annotations != 'NA'), "orig.ident", size = 2.5) dittoDimPlot(subset(seurat.integrated, nina_annotations %in% c('exh', 'mem')), "nina_annotations", size = 2.5, do.ellipse = TRUE, do.label = TRUE) dev.off() ############################################################## ## Trying to improve clustering using DEG for dimensional ## reduction seurat.integrated$nina_annotations %>% table() seurat.filtered <- subset(seurat.integrated, nina_annotations != 'NA') Idents(seurat.filtered) <- seurat.filtered$nina_annotations degs <- FindAllMarkers(seurat.filtered, only.pos = TRUE) degs <- arrange(degs, desc(avg_logFC)) dim(degs) seurat.filtered <- RunPCA(seurat.filtered, verbose = FALSE, features = degs$gene, npcs = 20) seurat.filtered <- RunUMAP(seurat.filtered, dims = 1:20) pdf('figures/improved_umap_only_degs.pdf') DimPlot(seurat.filtered, group.by = 'nina_annotations', reduction = 'pca') DimPlot(subset(seurat.filtered, nina_annotations != 'trans'), group.by = 'nina_annotations', reduction = 'umap') dittoDimPlot(subset(seurat.filtered, nina_annotations %in% c('exh', 'mem')), "nina_annotations", size = 2.5, do.ellipse = TRUE, do.label = TRUE) FeaturePlot(subset(seurat.filtered, nina_annotations != 'trans'), reduction = 'umap', features = c('Progenitor.like1'), pt.size = 3) + scale_color_viridis() dev.off() ################################################################# ## Calculating DEGs between Memory-like and Terminally exhausted ## cells Idents(seurat.integrated) <- seurat.integrated$nina_annotations degs <- FindMarkers( subset(seurat.integrated, nina_annotations != 'trans'), ident.1 = 'mem', logfc.threshold = 0, min.pct = 0 ) degs<- degs %>% filter(p_val_adj < 0.05) dim(degs) head(degs) saveRDS(degs, 'analysis/mem_vs_exh_hofmann_degs.rds', compress = TRUE)
0626c82cb528022680148e6dfcfd5a66ec832518
75716773a68c416be5b297bb278f6c036729891b
/cacheSolve.R
8f8be12206e5ac4c39bac9d94c545a9ac94a0263
[]
no_license
umaircheema/datasciencecoursera
466e12c06ff049d3ecaff267bec9b48a8084886a
aef42c65bc6d678d0db42723f30bc5af107db712
refs/heads/master
2016-09-05T19:16:36.180907
2015-08-21T10:11:08
2015-08-21T10:11:08
40,152,003
0
0
null
null
null
null
UTF-8
R
false
false
211
r
cacheSolve.R
cacheSolve <- function(x, ...) { invc <- x$getinverse() if(!is.null(invc)) { message("getting cached data") return(invc) } data <- x$get() invc <- solve(data, ...) x$setinverse(invc) invc }
626761451e495ba8b33b9c0155ac03b4dc085c07
135725be7153ff6ec6cc79b8db29cdea80ecc875
/man/gwasLog.Rd
97fdf137e8474d3f72a362e8714ff8b096963877
[]
no_license
cran/CollapsABEL
f2969755a8fe831eeab9c4971ddd1488a1e39b81
b3be3f831ba5e9500d016aca0ca768a367e79c84
refs/heads/master
2021-01-10T13:18:22.127650
2016-12-11T19:35:07
2016-12-11T19:35:07
55,400,560
1
0
null
null
null
null
UTF-8
R
false
true
356
rd
gwasLog.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/2_plGwas.R \name{gwasLog} \alias{gwasLog} \title{Plink log fie} \usage{ gwasLog(pl_gwas) } \arguments{ \item{pl_gwas}{PlGwasC object.} } \value{ character. Path to log file. } \description{ Redirect stdout to this file when plink is running. } \author{ Kaiyin Zhong, Fan Liu }
6b2fd42c781825652d89acedf9da8943b37089b2
48d2c8117c4604e32bef0752f16447641bd82718
/electability/R/ProcessPolls.R
c10a21c8476aa490628060c1f67a8fcd86e2a9cb
[ "MIT", "CC-BY-4.0", "LicenseRef-scancode-public-domain" ]
permissive
tmcintee/electability-2020-pub
b3334cf5ada9c74a43f5cdc9bbb5742cfef290d1
5dd97241c7551633890020b4a5ce92eff78dc468
refs/heads/master
2020-12-13T09:14:24.949548
2020-01-16T17:23:56
2020-01-16T17:23:56
234,372,855
0
0
null
null
null
null
UTF-8
R
false
false
1,883
r
ProcessPolls.R
ProcessPolls <- function(poll_file,candidate_vector) { polls_raw <- read.csv(poll_file,stringsAsFactors = FALSE) nameVector <- as.character(polls_raw$candidate_name) list_names <- str_split(nameVector," ") for(i in 1:length(list_names)) { first_name <- list_names[[i]][[1]] last_name <- list_names[[i]][[length(list_names[[i]])]] if(first_name == "Julián") { first_name <- "Julian" } else if(last_name == "Castro") { first_name <- "Julian" } else if(first_name == "Joseph") { first_name <- "Joe" } else if(first_name == "Bernard") { first_name <- "Bernie" } if(last_name == "Jr." | last_name == "III") { last_name <- list_names[[i]][[length(list_names[[i]])-1]] } else if(last_name == "Blasio") { last_name <- "de Blasio" } name_candidate_cleaned <- paste0(first_name," ",last_name) nameVector[[i]] <- paste0(first_name," ",last_name) } polls_raw$Candidate <- nameVector polls_raw$start_date <- parse_date(as.character(polls_raw$start_date),format = "%m/%d/%y") polls_condensed <- polls_raw %>% spread(key = candidate_party, value = pct) %>% group_by(question_id) %>% mutate(Share.Democratic = max(DEM,na.rm = TRUE)/100, Share.Republican = max(REP, na.rm = TRUE)/100) %>% ungroup %>% group_by(poll_id) %>% mutate(Bonus = Share.Democratic - median(Share.Democratic), Penalty = Share.Republican - median(Share.Republican), Year = 2020+as.numeric(start_date-parse_date("2020-11-03"))/366, Weight = sqrt(sample_size)*TimeDecay(Year), State = state, Type = "Poll") %>% filter(Candidate %in% candidate_vector) %>% ungroup %>% select(Candidate,State,Year,Bonus,Penalty,Weight,Type) return(polls_condensed) }
c7350e6a53ef721986edf955ed916c85f8a3df3a
c2f267114e9024f6fea4fbc2c3ce530fd44ee753
/man/is.surveyorStats.Rd
7308ed82eae807e4acccbe0d17431e3f420d0d99
[]
no_license
andrie/surveyor
a1e0596be1d910162be0e36c897f8ff11ee150de
b6724141687e31f193e87747e53e4a2b00187ae0
refs/heads/master
2016-09-09T23:47:00.528333
2012-08-28T16:35:20
2012-08-28T16:35:20
1,082,414
0
0
null
null
null
null
UTF-8
R
false
false
292
rd
is.surveyorStats.Rd
\name{is.surveyorStats} \alias{is.surveyorStats} \title{Test object for membership of class "surveyorStats".} \usage{ is.surveyorStats(x) } \arguments{ \item{x}{Object} } \value{ TRUE or FALSE } \description{ Test object for membership of class "surveyorStats". } \keyword{internal}
d054b7b352fba137f3b59453687fc8551de0d6ac
7853c37eebe37fa6a0307e0dd9e197830ee6ac71
/man/cuDeviceGet.Rd
fff5d64937e5a874ac90ade9cbbb00b8a029c81a
[]
no_license
chen0031/RCUDA
ab34ffe4f7e7036a8d39060639f09617943afbdf
2479a3b43c6d51321b0383a88e7a205b5cb64992
refs/heads/master
2020-12-22T22:47:37.213822
2016-07-27T03:50:44
2016-07-27T03:50:44
null
0
0
null
null
null
null
UTF-8
R
false
false
528
rd
cuDeviceGet.Rd
\name{cuDeviceGet} \alias{cuDeviceGet} \title{Returns a handle to a compute device} \description{ Returns a device handle given an ordinal in the range [1, \code{cuDeviceGetCount}()].} \usage{cuDeviceGet(ordinal)} \arguments{ \item{ordinal}{Device number to get handle for} } \seealso{\code{\link{cuDeviceGetAttribute}} \code{\link{cuDeviceGetCount}} \code{\link{cuDeviceGetName}} \code{\link{cuDeviceTotalMem}}} \references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.html}} \keyword{programming} \concept{GPU}
d46dca1ec68204520689b8a629639e62d2d076d0
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed/6890_0/rinput.R
d804caf5b7ed9107a4f03d8ef672e7016a1d0d91
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
rinput.R
library(ape) testtree <- read.tree("6890_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="6890_0_unrooted.txt")
308d6679c2a7c5e03cb39ddfed2d4053ebf3a478
0c3fa446b7aa8e5f48201e139fd7e19c351ae54d
/src/run_apeestim/R/apePredPost.R
f77017ea27b57100314935bcb6640425c193ab0e
[]
no_license
mrc-ide/covid19-forecasts-orderly
477b621129ebc93dbd4e75ae4d54fbda0ad1669c
2210ea70dc7b38d91fc1678ab0c2bb9b83c5ddcb
refs/heads/main
2023-02-06T17:58:18.937033
2022-08-02T16:39:50
2022-08-02T16:39:50
254,094,782
33
12
null
2022-08-02T16:39:51
2020-04-08T13:25:08
R
UTF-8
R
false
false
2,980
r
apePredPost.R
###################################################################### ## Negative-binomial posterior and APE score # From: Parag, KV, and Donnelly, CA. (2019) “Optimising Renewal Models for # Real-Time Epidemic Prediction and Estimation” BioRxiv: 835181. ###################################################################### # Assumptions # - uses Poisson renewal equation (as in EpiEstim) # - gamma prior required on R => negative binomial predictions # Inputs - window length (k), SI distribution (sidistr), total infectiousness (Lday), # incidence (Iday), max time (tPts), confidence 100*c(a, 1-a)%, gamma shape-scale (Rprior), # start time for all windows (trunc) # Output - APE score (ape), prob of next data (prob), R(t) estimate mean and confidence (Rhat, Rhatci), # I(t+1) prediction mean and confidence (Inexhat, Inexci) apePredPost <- function(k, sidistr, Lday, Iday, Rprior, a, trunc){ # Length of time vector to consider nPts = length(Iday) # Range of index time points - starts from trunc ir = seq(trunc, nPts-1) # Grouped incidence and infectiousness B = rep(0, nPts-1); A = B; # Offset Lday so for next point i.e. t+1 #Lday = Lday[2:length(Lday)] # At each time before compute historical R for(i in seq(1, nPts-1)){ # Most ancient window time, truncate if negative idlast = max(i - k + 1, 1) # Look-back window of k (or less) indices idback = seq(i, idlast, -1) # Relevant incidence sum (B) and total infectiousness sum (A) B[i] = sum(Iday[idback]); A[i] = sum(Lday[idback]); } # Shape-scale gamma R(t) parameters alpha = Rprior[1] + B beta = 1./(1/Rprior[2] + A) # Truncate vectors alpha = alpha[ir]; beta = beta[ir] # Posterior predictive negative binomial parameter pr = Lday[ir]*beta; pr = pr/(1 + pr) # Posterior mean estimate of R(t) Rhat = alpha*beta # Confidence interval (95%) on R(t) estimate Rhatci = matrix(-1, 2, length(Rhat)) Rhatci[1,] = qgamma(a, shape = alpha, scale = beta) Rhatci[2,] = qgamma(1-a, shape = alpha, scale = beta) # Posterior mean prediction of I(t+1) Inexhat = Lday[ir]*Rhat # check Lam[i] is for i+1 prediction <--- #Inexhat = qnbinom(0.5, size = alpha, prob = 1-pr) # Confidence interval (95%) on I(t+1) projection Inexci = matrix(-1, 2, length(Inexhat)) Inexci[1,] = qnbinom(a, size = alpha, prob = 1-pr) Inexci[2,] = qnbinom(1-a, size = alpha, prob = 1-pr) # Probability of next incidence value at t+1s prob = dnbinom(Iday[ir+1], size = alpha, prob = 1-pr) # APE score at this k, assume log 1/0 = 0 <----- check ape = -sum(log(prob[prob != 0])) # PMSE score pmse = sum((Inexhat - Iday[ir+1])^2) # Main outputs including parameters apePredPost = list( ape = ape, pmse = pmse, prob = prob, rhat = Rhat, rhatci = Rhatci, post_mean_tplus1 = Inexhat, tplus1_ci = Inexci, alpha = alpha, beta = beta, post_negbin_pr = pr ) }
b422b8d013001bdfa5779bcd6c3752e5d9e89ad5
8ff96e8db40d0688ddc93c749245883e13a5de88
/man/filesInTreeStructure.Rd
5be4f05114883ba6312f02c27cd866108647ac01
[ "MIT" ]
permissive
McClellandLegge/shinydashtabs
6614372257a960520d26c42a15c670fac5ebe180
79d2e530c89e7e42ad7c678ec780188e980fa6e2
refs/heads/master
2023-01-20T00:36:40.928833
2020-11-20T22:10:26
2020-11-20T22:10:26
305,006,404
1
0
null
null
null
null
UTF-8
R
false
true
463
rd
filesInTreeStructure.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/files-in-tree-structure.R \name{filesInTreeStructure} \alias{filesInTreeStructure} \title{Determine a directory file structure} \usage{ filesInTreeStructure(file, pattern = NULL) } \arguments{ \item{file}{The root directory or file} \item{pattern}{An optional pattern to filter out directories containing files} } \value{ A list } \description{ Determine a directory file structure }
69123b0f426b2924344cd8c21a6bc43f05529c7f
f2aa5c24567724bd3d2d20c21ae4cba4d7e268be
/R-Programming-By-Example-master/Chapter09/fibonacci.R
9675d85dd3a87952c623eaf1bd3412ceb882b459
[]
no_license
cyrsis/RSandBox
ab3a2548cbd2a0abaf6fcf2e8f2d42951d3ab80f
b647cda2e30702de408b1e223cb898d2bb05c01a
refs/heads/master
2020-03-21T19:50:21.523273
2018-06-28T09:31:36
2018-06-28T09:31:36
138,972,521
2
0
null
null
null
null
UTF-8
R
false
false
551
r
fibonacci.R
# # Chapter 09 - Improving Performance: Fibonacci # # R Programming by Example # Omar Trejo Navarro # Packt Publishing # Mexico City # 2017 # # ---- fibonacci-recursive fibonacci_recursive <- function(n) { if (n <= 1) { return(n) } return(fibonacci_recursive_1(n - 1) + fibonacci_recursive_1(n - 2)) } # ---- fibonacci-sequential fibonacci_sequential <- function(n) { if (n <= 2) { return(1) } f <- integer(n) f[1] <- 1 f[2] <- 1 for (i in 3:n) { f[i] <- f[i - 2] + f[i - 1] } return(f[n]) }
9f7e3221b54c2cad812394f12187426f21c37e9c
f46ff3940e124c95c1a6b092eb9faf84f772a489
/man/dint-package.Rd
eb9b0f956757855c17b7497031d2d89f966b5057
[ "MIT" ]
permissive
s-fleck/dint
6724a9e673f351ef03f8b82df7cb1b0d90109c07
16925fe23cb007d996cadbef6d702d459d44e7c2
refs/heads/master
2022-11-07T09:54:49.632281
2022-10-17T06:24:14
2022-10-17T06:24:14
130,039,666
13
4
NOASSERTION
2020-03-03T12:54:28
2018-04-18T09:38:21
R
UTF-8
R
false
true
828
rd
dint-package.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dint-package.R \docType{package} \name{dint-package} \alias{dint} \alias{dint-package} \title{dint: A Toolkit for Year-Quarter, Year-Month and Year-Isoweek Dates} \description{ S3 classes and methods to create and work with year-quarter, year-month and year-isoweek vectors. Basic arithmetic operations (such as adding and subtracting) are supported, as well as formatting and converting to and from standard R date types. } \seealso{ Useful links: \itemize{ \item \url{https://github.com/s-fleck/dint} \item Report bugs at \url{https://github.com/s-fleck/dint/issues} } } \author{ \strong{Maintainer}: Stefan Fleck \email{stefan.b.fleck@gmail.com} (\href{https://orcid.org/0000-0003-3344-9851}{ORCID}) } \keyword{internal}
fc40c7adf12302276648b236722fc076ef1d5fda
dcbd82e150369094a73253fa48cdd2887e1dd778
/IO_M2.1_new/Analysis_Ben_Loading.R
1d5051e7e43084b11610c78d829199bdbc2ed463
[]
no_license
yimengyin16/Model_Main
0e8140ba296d1115bbbc64bcffefb05ac7a4fe86
b63573407aaa0f6489d96083bf2fe09e843ca43f
refs/heads/master
2022-07-03T22:44:47.660461
2018-02-22T01:36:15
2018-02-22T01:36:15
27,349,525
0
2
null
null
null
null
UTF-8
R
false
false
1,545
r
Analysis_Ben_Loading.R
rm(list = ls()) library(reshape2) library(plyr) library(dplyr) library(tidyr) library(ggplot2) library(knitr) library(magrittr) # to use %<>% library(zoo) library(grid) library(gridExtra) library(stringr) library("readr") library("readxl") library(xlsx) source("Functions.R") IO_folder <- "IO_M2.1_new/" #**************************************************************************************************** # ## 1. Loading simulation results #### #**************************************************************************************************** load_run <- function(runname, folder){ load(paste0(folder, runname)) outputs_list$results } run_select <- dir(IO_folder, pattern = "^Outputs_B") results_all <- ldply(run_select, load_run, IO_folder) results_all %<>% mutate(MA_B = 100 * MA / B) save(results_all, file = paste0(IO_folder, "/Analysis_Ben/Ben_results.RData")) #**************************************************************************************************** # ## 2. Measures of FR risk and contribution volatility #### #**************************************************************************************************** # Needs results_all runs_benefit <- c(paste0("BF075-", c(0:3, "3a") )) df_metrics_Ben_y30 <- get_metrics(runs_benefit, 30, "BF075-1") df_maxChg_Ben_y30 <- get_metrics_maxChg(runs_benefit, 30) save(df_metrics_Ben_y30, df_maxChg_Ben_y30, file = paste0(IO_folder, "/Analysis_Ben/Ben_metrics.RData")) df_metrics_Ben_y30 %>% kable
ab8aac73ba030136d13666c77449e2b236d7cf1b
a001932cb35ad0f5f1874999044ea1768870502f
/talks-and-lectures-master/CSC-UCSB/R/clusterR/montecarlo.R
5ce13e79b74f746d88824e25951221e8638bd20e
[]
no_license
UCSBMRL/Center-for-Scientific-Computing
619bd772d82ea1cb4ef57c11cc0b2c43b1e206e4
0584e2845c782ff5f4182b529894379a164e56c8
refs/heads/master
2021-08-22T08:29:39.362297
2017-11-29T19:01:13
2017-11-29T19:01:13
112,514,483
3
0
null
null
null
null
UTF-8
R
false
false
1,076
r
montecarlo.R
# Burak Himmetoglu # 10-25-2016 # bhimmetoglu@ucsb.edu # # # Example: Serial Monte Carlo with for loop # # Number of simulations nSim = 1e+5 # Exact result exact <- function(x = 1, nDim){ ((pnorm(x*sqrt(2)) - 0.5)*sqrt(pi))^nDim } # Function for Monte Carlo calculation funMC <- function(nDim){ vec = runif(nDim, min = 0, max = 1) # Draw a random vector from uniform distribution vec_exp = exp(-sum(vec^2)) # The integrand z = vec_exp # Add contribution s2 = vec_exp^2 # Variance contribution # Return results list(z, s2) } # Set seed set.seed(123) # Start the clock t0 <- proc.time() # Compute serial z = 0; s2 =0 for (i in 1:nSim){ temp <- funMC(nDim=10) z <- z + temp[[1]] s2 <- s2 + temp[[2]] } Int = z/nSim; Err = sqrt(s2/nSim - Int^2) / sqrt(nSim) # End the clock t1 <- proc.time(); t_elapsed <- t1-t0 # Print cat("---- Monte Carlo integration ----\n") cat("Calculated Value, Error: ", Int, Err, "\n") cat("Timing:", t_elapsed[1], "seconds\n") cat("Exact results: ", exact(nDim=10))
9feab174dc4588676edb9d64a6df6db5a509d761
7747a3fdf0fdc57b767d8ed199b323afb4d491a2
/man/sim_detect.Rd
21306a2958cdd61fb9d6d75007fad1e22fd10fc1
[]
no_license
ianjonsen/simsmolt
dcafaad041d6caa29cd573cd543dbeab7e14868a
09c9a8b8132bedaa499dd71c5c2fc6e2439256eb
refs/heads/master
2022-07-28T10:03:06.683400
2022-07-07T14:13:08
2022-07-07T14:13:08
155,731,825
0
0
null
null
null
null
UTF-8
R
false
true
843
rd
sim_detect.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim_detect.r \name{sim_detect} \alias{sim_detect} \title{simulate acoustic transmissions & detection, using \code{simulate} & \code{sim_setup} output} \usage{ sim_detect(s, data, delay = c(50, 130), burst = 5, noise = 1) } \arguments{ \item{s}{- a simsmolt class list containing output from sim_setup and sim_move} \item{delay}{- min & max time intervals (s) between transmissions} \item{burst}{- duration of each transmission (s)} \item{noise}{- range 0 - 1; simulate effect of noisy environment. Reduces detection prob w dist by specified proportion; default = 1, no reduction} } \description{ simulates transmissions & detections along simulated track segments within a defined range of acoustic array(s) } \author{ Ian Jonsen \email{ian.jonsen@mq.edu.au} }
2df169fd43e928124a9c25347a629c443ce2117f
72d9009d19e92b721d5cc0e8f8045e1145921130
/lefko3/R/lefko3-package.R
cff774b0d367de3181808804eb7789d1503caaf5
[]
no_license
akhikolla/TestedPackages-NoIssues
be46c49c0836b3f0cf60e247087089868adf7a62
eb8d498cc132def615c090941bc172e17fdce267
refs/heads/master
2023-03-01T09:10:17.227119
2021-01-25T19:44:44
2021-01-25T19:44:44
332,027,727
1
0
null
null
null
null
UTF-8
R
false
false
1,685
r
lefko3-package.R
#' @title Historical and Ahistorical Population Projection Matrix Analysis #' #' @description This package creates population projection matrices for use in population #' ecological analyses. The particular specialty of the package is the construction #' of historical matrices, which are 2-dimensional matrices comprising 3 time #' intervals of demographic information. The package is robust and can produce #' function-based and raw matrices for both standard ahistorical (i.e. 2 time #' interval) and historical analyses. #' #' @details The lefko package provides five categories of functions: #' 1. Data transformation and handling functions #' 2. Functions determining population characteristics from vertical data #' 3. Model building and selection #' 4. Matrix / integral projection model creation functions #' 5. Population dynamics analysis functions #' #' @details It also includes example datasets complete with sample code. #' #' @docType package #' @author Richard P. Shefferson <cdorm@g.ecc.u-tokyo.ac.jp> #' @author Johan Ehrlén #' @references Shefferson, R.P., J. Ehrlen, and S. Kurokawa. \emph{In press}. \emph{lefko3}: analyzing #' individual history through size-classified matrix population models. \emph{Methods in Ecology and Evolution}. #' @import Rcpp #' @importFrom Rcpp evalCpp #' @importFrom stats na.action na.fail na.omit lm glm getCall xtabs sd rnorm setNames #' @importFrom stats as.formula median pchisq var poisson #' @importFrom pscl zeroinfl #' @importFrom MuMIn dredge #' @importFrom MASS glm.nb #' @importFrom lme4 lmer glmer fixef ranef VarCorr #' @importFrom glmmTMB glmmTMB nbinom2 fixef ranef #' @useDynLib lefko3 #' @name lefko3 NULL
2b50ad98367696c52822e270071089b655f628a2
d255d28ece6cbc2967ef00c014eafc859cf68141
/RHRV/LoadBeatVector.R
01434b8e251b542f2b93bce9b72f9366991e2f32
[ "MIT" ]
permissive
med-material/ArduinoLoggerShinyApp
0fd645b7d721efd9bf005c12b46030402e98b35e
a1af24786df19dd4bbd909b5189d77ece88ac49f
refs/heads/master
2023-08-03T17:01:19.812307
2023-07-28T13:11:35
2023-07-28T13:11:35
228,591,063
1
1
MIT
2020-02-18T11:16:30
2019-12-17T10:24:41
R
UTF-8
R
false
false
2,408
r
LoadBeatVector.R
#' Loads beats positions from an R vector #' @description Stores the beat positions from an R vector under the #' \emph{HRVData} data structure. #' @param HRVData Data structure that stores the beats recording and information #' related to it #' @param beatPositions Numeric vector with the heartbeats occurrence's times since #' the beginning of the recording. See \emph{scale} parameter to specify the #' units #' @param scale Numeric value identifying the temporal units in which #' the beat positions are specified: 1 if beat positions is specified in seconds, #' 0.001 if beat positions in milliseconds, etc. #' @param datetime Date and time (DD/MM/YYYY HH:MM:SS) of the beginning of the #' recording #' @examples #' \dontrun{ #' hd = CreateHRVData() #' hd = LoadBeatVector(hd, #' c(0.000, 0.328, 0.715, 0.124, 1.50,1.880, 2.268, 2.656)) #' hd = BuildNIHR(hd) #' # ... continue analyzing the recording #' } #' @return A \emph{HRVData} structure containing the heartbeat positions #' from the \emph{beatPositions} vector. LoadBeatVector <- function(HRVData, beatPositions, scale = 1, datetime = "1/1/1900 0:0:0"){ VerboseMessage(HRVData$Verbose, "Loading beats positions") dir = getwd() on.exit(setwd(dir)) VerboseMessage(HRVData$Verbose, paste("Scale:", scale)) beatsaux = beatPositions beatPositions = beatsaux[!duplicated(beatsaux)] if (length(beatsaux) != length(beatPositions)) { warning(paste("Removed", length(beatsaux) - length(beatPositions), "duplicated beat positions")) } datetimeaux = strptime(datetime, "%d/%m/%Y %H:%M:%S") if (is.na(datetimeaux)) { stop("Date/time format is dd/mm/yyyy HH:MM:SS") } VerboseMessage(HRVData$Verbose, paste("Date: ", sprintf("%02d", datetimeaux$mday), "/", sprintf("%02d", 1 + datetimeaux$mon), "/", 1900 + datetimeaux$year, sep = "")) VerboseMessage(HRVData$Verbose, paste("Time: ", sprintf("%02d", datetimeaux$hour), ":", sprintf("%02d", datetimeaux$min), ":", sprintf("%02d", datetimeaux$sec), sep = "")) HRVData$datetime = datetimeaux HRVData$Beat = data.frame(Time = beatPositions * scale) VerboseMessage(HRVData$Verbose, paste("Number of beats:", length(HRVData$Beat$Time))) return(HRVData) }
77403b4cd7d31b86c5061726432d6f6a42471aa0
9f4949d8dfd39f94d75fc06b95b5b584f7ea04e3
/miscfunc.R
bc7859e36e4e4a827c6108010edd80756e742a03
[]
no_license
sickel/cowplot
8be407904f2faa5c1a445f8782b20d215e955432
22391752b77bbddb9b527ea428113d8fbfc0ea7a
refs/heads/master
2021-01-10T03:49:34.124346
2015-01-27T18:18:02
2015-01-27T18:18:02
8,456,736
0
0
null
null
null
null
UTF-8
R
false
false
6,944
r
miscfunc.R
cumuldata=function(lok,type='grazing'){ sqlbase=paste("select count(*) from ",lok,"_adjusted_model where result='",type,"' and newperc <=",sep='') ps=c(0,10,20,30,40,50,60,65,70,75,80,85,90,95,96,97,98,99,100) sets=c() for(p in ps){ sql=paste(sqlbase,p) rs=dbSendQuery(con,statement=sql) ret=fetch(rs,n=-1) sets=c(sets,ret$count[1]) } ref=tail(sets,n=1) df=data.frame(cbind(ps,sets)) df$perc=df$sets/ref*100 return(df) } #" select 100,count(*) from geilo_adjusted_model where result='grazing' and newperc=100" probplot=function(type='grazing'){ size=1.5 cdat_valdres=cumuldata('Valdres',type) cdat_geilo=cumuldata('Geilo',type) plot(cdat_geilo[,1],cdat_geilo[,2]/5359.44,ylab='Percentage of grazing points',xlab='Minimum probability of correct classification',col=5,pch=15,, cex.lab=size, cex.axis=size, cex.main=size, cex.sub=size) points(cdat_valdres[,1],cdat_valdres[,2]/6777.8,col=2,pch=15) legend(0,70,c('Valdres','Hallingdal'),c(2,5),cex=size) } totalcumuls=function(lok){ df=cumuldata(lok,'grazing') df$grazing=df$perc dt=cumuldata(lok,'walking') df$walking=dt$perc dt=cumuldata(lok,'resting') df$resting=dt$perc return(df) } createcdf=function(){ cdf_geilo=totalcumuls('Geilo') cdf_valdres=totalcumuls('Valdres') cdf_valdres$location='Valdres' cdf_geilo$location='Hallingdal' cdf=rbind(cdf_valdres,cdf_geilo) return(melt(data=cdf,id.var=c('location','ps'),measure.var=c('grazing','resting','walking'),variable_name="behaviour")) } if(FALSE){ pchv=1 pchg=2 colg=1 colr=2 colw=3 plot(cdf_valdres$ps,cdf_valdres$grazing,ylim=c(0,100),ylab='Percentage of grazing points',xlab='Cumultative probability of correct classification',col=colg,pch=pchv, cex.lab=size, cex.axis=size, cex.main=size, cex.sub=size) points(cdf_valdres$ps,cdf_valdres$resting,pch=pchv,col=colr) points(cdf_valdres$ps,cdf_valdres$walking,pch=pchv,col=colw) points(cdf_geilo$ps,cdf_geilo$grazing,pch=pchg,col=colg) points(cdf_geilo$ps,cdf_geilo$resting,pch=pchg,col=colr) points(cdf_geilo$ps,cdf_geilo$walking,pch=pchg,col=colw) legend(0,100,c('Valdres - grazing',' - walking',' - resting','Hallingdal - grazing',' - walking',' - resting'), col=c(colg,colw,colr,colg,colw,colr), pch=c(pchv,pchv,pchv,pchg,pchg,pchg)) plot(cdf_valdres$ps,cdf_valdres$grazing,ylim=c(0,100),ylab='Percentage of grazing points',xlab='Minimum probability of correct classification',col=colg,pch=pchv, cex.lab=size, cex.axis=size, cex.main=size, cex.sub=size) points(cdf_valdres$ps,cdf_valdres$resting,pch=pchv,col=colr) points(cdf_valdres$ps,cdf_valdres$walking,pch=pchv,col=colw) pchv=1 pchg=2 colg=1 colr=2 colw=3 plot(cdf_valdres$ps,cdf_valdres$grazing,ylim=c(0,100),ylab='Percentage of grazing points',xlab='Cumultative probability of correct classification',col=colg,pch=pchv, cex.lab=size, cex.axis=size, cex.main=size, cex.sub=size) points(cdf_valdres$ps,cdf_valdres$resting,pch=pchv,col=colr) points(cdf_valdres$ps,cdf_valdres$walking,pch=pchv,col=colw) points(cdf_geilo$ps,cdf_geilo$grazing,pch=pchg,col=colg) points(cdf_geilo$ps,cdf_geilo$resting,pch=pchg,col=colr) points(cdf_geilo$ps,cdf_geilo$walking,pch=pchg,col=colw) legend(0,50,c('Valdres - grazing',' - walking',' - resting','Hallingdal - grazing',' - walking',' - resting'), col=c(colg,colw,colr,colg,colw,colr), pch=c(pchv,pchv,pchv,pchg,pchg,pchg)) cdf_valdres=cumuldata('valdres','grazing') cdf_valdres$grazing=cdf_valdres$perc } plotexample=function(x){ # x=runif(11,0,1)*c(1:11) plot(x,axes=FALSE,xlab='',ylab='') lines(c(1,11),c(x[1],x[11]),col='blue',lty='dotdash') lines(x) text(c(1:11)+0.2,x,c(1:11)) legend(7.5,2,c('movement','displacement'),lty=c('solid','dotdash'),col=c('black','blue')) } qplotexample=function(x){ # x=runif(11,0,1)*c(1:11) df=data.frame(x=c(1:11),y=x,type='movement') df2=rbind(df,data.frame(x=c(1,11),y=c(df$y[1],df$y[11]),type='displacement')) plt=ggplot(df2,aes(x=x,y=y,linetype=type))+ geom_line() plt0=plt+ theme_bw() plt=plt+theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),panel.border = element_blank(), panel.background = element_blank(),axis.line=element_blank(),axis.text=element_blank(),axis.ticks=element_blank(), axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position=c(0.1,0.9)) plt=plt+geom_point() plt=plt+geom_text(data=df,aes(x=x,y=y+0.3,label=c(1:11))) # plt=plt+scale_colour_manual(breaks=c("Displacement","Movement"),values=c("red", "blue")) plt=plt+ theme(legend.key = element_blank()) plt=plt+ theme(legend.text=element_text(size=12)) plt=plt+theme(legend.title=element_blank()) return(plt) } ggplotcumul=function(){ if(!(exists('cdfm'))){ cdfm<<-createcdf() } plt=ggplot(cdfm,aes(y=ps/100,x=value,colour=location,linetype=behaviour))+geom_line() # plt=plt+theme_bw() plt=plt+scale_colour_manual(values=c('black','#999999')) plt=plt+ylab("Cumultative probability of correct classification")+xlab("Percentage of points") return(plt) } arealist=function(lok){ sql=paste("select shape_area from ",lok,"classified where shape_area>1", sep="") rs=dbSendQuery(con,statement=sql) ret=fetch(rs,n=-1) return(ret$shape_area) } plotareapercentille =function(v,g,col2="darkgray"){ plot(quantile(v,ps),log="y",type='l',ylim=c(10,1000010),ylab="Area m2",xlab="Percentille",axes=FALSE) lines(quantile(g,ps),col=col2,lty="dotdash") legend(700,100,c("Valdres","Hallingdal"),lty=c('solid','dotdash'),col=c('black',col2)) axis(1,at=c(0:10)*100,labels=c(0:10)*10) axis(2,at=c(10,100,1000,10000,100000,1000000),labels=c(10,100,1000,10000,100000,1000000)) } ggplotarperc =function(col2="darkgray",nclass=1000){ ps=c(1:nclass)/nclass al=arealist('Valdres') quant=data.frame(q=quantile(al,ps),n=ps*100,lok="Valdres") al=arealist('Geilo') quant=rbind(quant,data.frame(q=quantile(al,ps),n=ps*100,lok="Hallingdal")) #return(quant) plt=ggplot(quant,aes(x=n,y=q,linetype=lok,type=""))+geom_hline(y=c(50,500,5000,50000,500000),colour="white",size=0.2)+geom_line() plt=plt+ theme(legend.key = element_blank()) plt=plt+ theme(legend.text=element_text(size=12)) plt=plt+theme(legend.title=element_blank()) plt=plt+scale_y_continuous(trans = 'log10', breaks=c(100,1000,10000,100000),labels=comma,minor_breaks=c(50,500,5000,50000,500000)) plt=plt+xlab('Percentille')+ylab(expression(paste("Area (",m^2,")",sep=""))) plt=plt+theme(legend.position=c(0.85,0.1)) plt=plt+theme(axis.text=element_text(size=12,colour="black")) #plot(quantile(v,ps),log="y",type='l',ylim=c(10,1000010),ylab="Area m2",xlab="Percentille",axes=FALSE) #lines(quantile(g,ps),col=col2,lty="dotdash") #legend(700,100,c("Valdres","Hallingdal"),lty=c('solid','dotdash'),col=c('black',col2)) #axis(1,at=c(0:10)*100,labels=c(0:10)*10) #axis(2,at=c(10,100,1000,10000,100000,1000000),labels=c(10,100,1000,10000,100000,1000000)) return(plt) }
724d007b0aad92c975b5461815dfbdf67500274e
1b6c2393630ef796e147a5538f14742cfbd14a13
/Rpkg/man/exm_madvise.Rd
c51831225ae3b3002388081e9759d2d98f91f702
[ "BSD-1-Clause", "BSD-2-Clause" ]
permissive
bwlewis/exm
2ba55286cad19152f5b309c97d269adbda04c66b
d6ca3936fc5850c4d02641dd705f6264703e1cdd
refs/heads/master
2018-11-16T02:58:56.304005
2018-09-20T13:50:16
2018-09-20T13:50:16
17,552,567
0
0
null
null
null
null
UTF-8
R
false
true
503
rd
exm_madvise.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/exm.r \name{exm_madvise} \alias{exm_madvise} \title{Lookup the exm backing file for an object} \usage{ exm_madvise(object, advice = c("normal", "random", "sequential")) } \arguments{ \item{object}{Any R object} \item{advice}{Mapping type, one of "normal", "random", or "sequential"} } \value{ Integer return code from the OS-level madvise call, zero means success } \description{ Lookup the exm backing file for an object }
0e63aa0785cd3b33502f211642dbe937194e514a
c87282580bd8547d4def283cf2f738c270046676
/man/getAssistLeaderByYear.Rd
068495fa63b9a379fbf9ee2c3488d205339a5fe2
[]
no_license
rpcvjet/ModernDSHW
6ee72c0995c1c604226a5a6861ec6a87a6ceaf2d
b935b51944d8b7935756a0b43939b769e51ff00e
refs/heads/master
2020-06-04T11:03:40.428227
2019-06-14T21:40:49
2019-06-14T21:40:49
191,995,223
0
0
null
2019-06-14T21:40:50
2019-06-14T19:23:25
R
UTF-8
R
false
true
314
rd
getAssistLeaderByYear.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getAssistLeaderByYear.R \name{getAssistLeaderByYear} \alias{getAssistLeaderByYear} \title{getAssistLeaderByYear} \usage{ getAssistLeaderByYear(year) } \arguments{ \item{year}{} } \description{ gets assist leader by year } \examples{ }
b1256250088a57d713f8b78541fd1952066f85c0
c1c42390b20504827f3e939689bdda634bbac931
/stats/top-n.R
1b7c642a0c59eb28251a77d25b8acb13f84a04be
[]
no_license
jithinjohn7/FaultLocalizationResearch
ddde1aa6dcfb50150b2f7d91853fe931bc674a94
965f47678c672e3f15c272e385944ebf4f793cc0
refs/heads/master
2020-04-03T23:56:10.022504
2018-12-07T17:08:18
2018-12-07T17:08:18
155,635,143
5
4
null
null
null
null
UTF-8
R
false
false
3,197
r
top-n.R
# Script that computes the ratio of defects that the best mbfl, sbfl, and hybrid # techniques localize in the top-5, top-10, and top-200 of the suspiciousness ranking. # # usage: Rscript top-n.R <data_file_real_exploration> <out_dir> # # Read file name of the data file and the output directory args <- commandArgs(trailingOnly = TRUE) if (length(args)!=2) { stop("usage: Rscript top-n.R <data_file_real_exploration> <out_dir>") } data_file <- args[1] out_dir <- args[2] source("util.R") library(ggplot2) # Read data file and add two columns df <- readCsv(data_file) df$Real <- getReal(df) df$FaultType <- ifelse(df$Real, "Real faults", "Artificial faults") df$Scheme <- getScoringSchemes(df) df$Type <- getType(df$Technique) # New (hybrid) techniques: # # "MCBFL", "MCBFL-hybrid-failover", "MCBFL-hybrid-avg", "MCBFL-hybrid-max" # "MRSBFL", "MRSBFL-hybrid-failover", "MRSBFL-hybrid-avg", "MRSBFL-hybrid-max", "MCBFL-hybrid-avg" # Show top-n rankings for two hybrid and all existing techniques flts <- c("MCBFL-hybrid-avg", "MRSBFL-hybrid-avg", "DStar", "Ochiai", "Jaccard", "Barinel", "Tarantula", "Op2", "Metallaxis", "MUSE") df <- df[df$FLT %in% flts,] rank <- rankTopN(df) for (scheme in c("first", "last", "median")) { sorted <- rank[rank$ScoringScheme==scheme & rank$Real==T,]$FLT cat("FLTs sorted (", scheme, "): ", sorted, "\n", file=stderr()) } num_real_bugs <- length(unique(df[df$Real,ID])) ################################################################################ sorted_first <- rank[rank$ScoringScheme=="first" & rank$Real==T,]$FLT sink(paste(out_dir, "top-n.tex", sep="/")) for (flt in sorted_first) { cat(sprintf("%20s", unique(df[df$FLT==flt,]$TechniqueMacro))) for (scheme in c("first", "last", "median")) { mask <- df$ScoringScheme==scheme & df$Real & df$FLT==flt top5 <- nrow(df[mask & df$ScoreAbs<=5,])/num_real_bugs*100 top10 <- nrow(df[mask & df$ScoreAbs<=10,])/num_real_bugs*100 top200 <- nrow(df[mask & df$ScoreAbs<=200,])/num_real_bugs*100 cat(" & ") cat(round(top5, digits=0), "\\%", sep="") cat(" & ") cat(round(top10, digits=0), "\\%", sep="") cat(" & ") cat(round(top200, digits=0), "\\%", sep="") } cat("\\\\ \n") } sink() ################################################################################ for (scheme in c("first", "last", "median")) { sink(paste(out_dir, "/", "top-n", initialCap(scheme), ".tex", sep="")) sorted <- rank[rank$ScoringScheme==scheme & rank$Real==T,]$FLT for (flt in sorted) { cat(sprintf("%20s", unique(df[df$FLT==flt,]$TechniqueMacro))) mask <- df$ScoringScheme==scheme & df$Real & df$FLT==flt top5 <- nrow(df[mask & df$ScoreAbs<=5,])/num_real_bugs*100 top10 <- nrow(df[mask & df$ScoreAbs<=10,])/num_real_bugs*100 top200 <- nrow(df[mask & df$ScoreAbs<=200,])/num_real_bugs*100 cat(" & ") cat(round(top5, digits=0), "\\%", sep="") cat(" & ") cat(round(top10, digits=0), "\\%", sep="") cat(" & ") cat(round(top200, digits=0), "\\%", sep="") cat("\\\\ \n") } sink() }
dbefb8ee58ba9f1a55e26a2e7e20cba4e753f29b
316aa351ff9f4b242fd79cf388aecd5bd5ad4f21
/man/fromInd.Rd
26b06b1665b0d6cda37c45915915fe6d984ca980
[]
no_license
jmhewitt/dsdive
5d24b6f9d3e39e9d8fc33c12b6172126a3769dd0
bb55540452917d0c81e039e9ed6fb79f9b233131
refs/heads/master
2022-09-23T06:18:55.498742
2020-05-28T19:36:01
2020-05-28T19:36:01
224,232,874
0
0
null
null
null
null
UTF-8
R
false
true
572
rd
fromInd.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/indexing.R \name{fromInd} \alias{fromInd} \title{Converts array indices to (x,y,z) coordinates} \usage{ fromInd(ind, x.max, y.max) } \arguments{ \item{ind}{index in an array} \item{x.max}{maximum value of x coordinates} \item{y.max}{maximum value of y coordinates} } \description{ Used for converting a flattened index to the \eqn{(x,y,z)} indices for an array with dimensions \eqn{x.max x y.max x n}, where \eqn{n} is arbitrary. } \examples{ fromInd(ind = 100, x.max = 100, y.max = 100) }
8ce06702d1089fdb2beeac578aa71885ac285cfe
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/mondate/examples/format.mondate.rd.R
90404f288920574d352355c90a179ab17c058849
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
289
r
format.mondate.rd.R
library(mondate) ### Name: format.mondate ### Title: Format a mondate ### Aliases: format.mondate ### ** Examples (b<-mondate(1)) # end of first month of millennium format(b) # "01/31/2000" -- with quotes -- in the U.S. locale format(b, format="%Y-%m-%d") # "2000-12-31"
b8d2895fab5a959021c479af5a63c365b4ac1017
39ada063feadfa4fd38a2258e7bf1205a505be1d
/man/directpvalcombi.Rd
26432883d410011fe01ce2333138a057311758b4
[]
no_license
cran/metaMA
4846d0144431eea33be142968721310d07555be9
4ca8d9ae282eded228568d173d15ecc61b49c428
refs/heads/master
2022-04-30T10:23:17.194643
2022-04-12T15:02:36
2022-04-12T15:02:36
17,697,406
0
0
null
null
null
null
UTF-8
R
false
false
1,262
rd
directpvalcombi.Rd
\name{directpvalcombi} \alias{directpvalcombi} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Direct p-value combination } \description{ Combines one sided p-values with the inverse normal method. } \usage{ directpvalcombi(pvalonesided, nrep, BHth = 0.05) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{pvalonesided}{ List of vectors of one sided p-values to be combined. } \item{nrep}{ Vector of numbers of replicates used in each study to calculate the previous one-sided p-values. } \item{BHth}{ Benjamini Hochberg threshold. By default, the False Discovery Rate is controlled at 5\%. } } \value{ List \item{DEindices}{ Indices of differentially expressed genes at the chosen Benjamini Hochberg threshold.} \item{TestStatistic}{Vector with test statistics for differential expression in the meta-analysis.} } \references{ Hedges, L. and Olkin, I. (1985). Statistical Methods for Meta-Analysis. Academic Press. } \author{ Guillemette Marot } \note{ One-sided p-values are required to avoid directional conflicts. Then a two-sided test is performed to find differentially expressed genes. } \keyword{ methods } \keyword{ models }% __ONLY ONE__ keyword per line
6c7592a365b106443166a8b20533dc36ec193689
3dfcad5e4ca29823a6e7899dcd22aaf7f5df971c
/R/CopyNumberDataSetTuple.R
b43029156a0b6c5d47e4fbe34306b2d913aa4616
[]
no_license
HenrikBengtsson/aroma.core
f22c931029acf55f3ad2fdb6eb3bc2f0d2ba04e4
1bf20e2b09f4b8c0ca945dfb26fdf1902c187109
refs/heads/master
2022-12-01T09:01:49.295554
2022-11-15T18:29:17
2022-11-15T18:29:52
20,845,682
2
3
null
2018-04-21T02:06:48
2014-06-15T01:32:43
R
UTF-8
R
false
false
2,753
r
CopyNumberDataSetTuple.R
setMethodS3("getAverageFile", "AromaMicroarrayDataSet", abstract=TRUE) # AD HOC. This is currently only implemented in the AffymetrixCelFile class, # but should be implemented by others too, alternatively be replaced by # a "better" method. /HB 2009-11-18. setMethodS3("getXAM", "AromaMicroarrayDataFile", protected=TRUE, abstract=TRUE) setConstructorS3("CopyNumberDataFile", function(...) { extend(Interface(), "CopyNumberDataFile") }) setMethodS3("as.CopyNumberDataFile", "CopyNumberDataFile", function(this, ...) { this }) setMethodS3("hasAlleleBFractions", "CopyNumberDataFile", abstract=TRUE) setMethodS3("hasStrandiness", "CopyNumberDataFile", abstract=TRUE) setMethodS3("getNumberOfFilesAveraged", "CopyNumberDataFile", protected=TRUE, abstract=TRUE) setConstructorS3("CopyNumberDataSet", function(...) { extend(Interface(), "CopyNumberDataSet") }) setMethodS3("as.CopyNumberDataSet", "CopyNumberDataSet", function(this, ...) { this }) setMethodS3("hasAlleleBFractions", "CopyNumberDataSet", function(this, ...) { if (length(this) == 0L) { throw("Cannot infer hasAlleleBFractions(). No data files: ", getFullName(this)) } hasAlleleBFractions(getOneFile(this)) }) setMethodS3("hasStrandiness", "CopyNumberDataSet", function(this, ...) { if (length(this) == 0L) { throw("Cannot infer hasStrandiness(). No data files: ", getFullName(this)) } hasStrandiness(getOneFile(this)) }) setConstructorS3("CopyNumberDataSetTuple", function(...) { extend(Interface(), "CopyNumberDataSetTuple") }) setMethodS3("as.CopyNumberDataSetTuple", "CopyNumberDataSetTuple", function(this, ...) { this }) setMethodS3("as.CopyNumberDataSetTuple", "list", function(this, ...) { n <- length(this) if (n == 0) { throw("Cannot coerce to CopyNumberDataSetTuple: The list is empty.") } # Infer the data set tuple class by coercing the first data set. set <- this[[1]] tuple <- as.CopyNumberDataSetTuple(set) # Then coerce the list newInstance(tuple, this) }) setMethodS3("hasAlleleBFractions", "CopyNumberDataSetTuple", function(this, ...) { cesList <- getSets(this) res <- sapply(cesList, FUN=hasAlleleBFractions) # Sanity check if (length(unique(res)) != 1) { throw("Inconsistent data sets: The ", class(this)[1], " contains data sets where some have combined the alleles and some have not.") } res <- res[1] res }) setMethodS3("hasStrandiness", "CopyNumberDataSetTuple", function(this, ...) { cesList <- getSets(this) res <- sapply(cesList, FUN=hasStrandiness) # Sanity check if (length(unique(res)) != 1) { throw("Inconsistent data sets: The ", class(this)[1], " contains data sets where some have data by strand and some have not.") } res <- res[1] res })
3ef96cdac75901fd28d03f4967d7ba489ec30348
639e8fc3a59d319dfd1ea257570e4f4d71a55dd7
/Course3_DataClean/week 2/hdf5_intro.R
8c43cb65d17439cf85fb55e419a4924d8f3b53e8
[]
no_license
Bttobar/Bttobar-JH_datascience
fc4bdac65dba3948c575ce5130ad48fc7dcf2e1f
40bfbb305eafaf597c3b5fbc2fb5477e5e8082d1
refs/heads/master
2023-01-08T00:37:27.622459
2020-11-02T02:16:56
2020-11-02T02:16:56
291,361,665
0
0
null
null
null
null
UTF-8
R
false
false
1,512
r
hdf5_intro.R
# HDF5 otro formato para guardar los datos. # Caracteristicas: para guardar large data sets + alta variedad de tipos # groups => group header (name + attrib) # => group symbol (lista de objetos) # datasets => header (name + metadata como datatype ...) + arreglo de la data. # www.hdfgroup.org if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install(version = "3.11") BiocManager::install("rhdf5") library(rhdf5) # diferencia con require ? con el library cargamos load el paquete. created = h5createFile("example.h5") # con esto creamos un set del tipo hdf5 created = h5createGroup("example.h5","tech") # se crean las diferentes categorias o grupos (rows) created = h5createGroup("example.h5","ml") created = h5createGroup("example.h5","ml/industry ia") # subgrupos. h5ls("example.h5") # leemos el arreglo. A = matrix(1:10,nr = 5, nc= 2) h5write(A,"example.h5","tech/A") B = array(seq(0.1,2.0,0.1),dim = c(5,2,2)) # 2 matrices de 5 filas por 2 columnas attr(B,"scale") <- "liter" h5write(B,"example.h5","ml/industry ia/B") h5ls("example.h5") df = data.frame(1L:5L,seq(0,1,length.out = 5)) #c("ab","cde","fghi","a","s"), stringAsFactors = FALSE) h5write(df,"example.h5","df") h5ls("example.h5") readA = h5read("example.h5","tech/A") readB = h5read("example.h5","ml/industry ia/B") readdf = h5read("example.h5","df") readA; readB; readdf h5write(c(12,13,14),"example.h5","tech/A",index=list(1:3,1)) h5read("example.h5","tech/A")
a566c9702a1d96ff350b714a8426df086082e50b
e55eba9dc09a0eaea779a5fb71e878b53b037a79
/tests/testthat/test-linear.R
8915c4cda056e41b533d8b00d86960ed7728900a
[]
no_license
LTLA/InteractionSet
cb76c66d4e2cbd357c0eac022c0f90fd750e54f2
62cd50ab8dd16807b257621dc68078d7fdfaa350
refs/heads/master
2023-06-28T10:57:07.566004
2023-06-13T21:11:31
2023-06-13T21:11:31
45,793,595
3
3
null
2016-01-11T14:11:08
2015-11-08T18:43:25
R
UTF-8
R
false
false
4,961
r
test-linear.R
# Testing the linearize function for InteractionSet objects. # library(InteractionSet); library(testthat); source("test-linear.R") set.seed(200) N <- 50 all.starts <- round(runif(N, 1, 100)) all.ends <- all.starts + round(runif(N, 5, 20)) all.regions <- GRanges(rep(c("chrA", "chrB"), c(N-10, 10)), IRanges(all.starts, all.ends)) all.regions <- unique(all.regions) N <- length(all.regions) Np <- 100 all.anchor1 <- sample(N, Np, replace=TRUE) all.anchor2 <- sample(N, Np, replace=TRUE) Nlibs <- 4 counts <- matrix(rpois(Np*Nlibs, lambda=10), ncol=Nlibs) colnames(counts) <- seq_len(Nlibs) x <- InteractionSet(counts, GInteractions(all.anchor1, all.anchor2, all.regions)) #################################### test_that("conversion via linearization is working", { o <- order(all.regions) new.regions <- all.regions[o] new.pos <- integer(length(o)) new.pos[o] <- seq_along(new.pos) new.anchor1 <- new.pos[all.anchor1] new.anchor2 <- new.pos[all.anchor2] for (interest in seq_len(N)) { cur.reg <- regions(x)[interest] out <- linearize(x, interest) chosen <- new.anchor1==interest | new.anchor2==interest expect_identical(assay(out), assay(x[chosen,])) expect_output(show(out), sprintf("class: RangedSummarizedExperiment dim: %i 4 metadata(0): assays(1): '' rownames: NULL rowData names(0): colnames(4): 1 2 3 4 colData names(0):", sum(chosen)), fixed=TRUE) new.ranges <- anchors(x, type="first") not1 <- new.ranges!=cur.reg new.ranges[!not1] <- anchors(x, type="second")[!not1] new.ranges <- new.ranges[chosen] expect_identical(rowRanges(out), new.ranges) # Comparing with equivalent GRanges method. out2 <- linearize(x, cur.reg, type="equal") expect_equal(out, out2) # Also checking with GInteractions methods. out3 <- linearize(interactions(x), cur.reg, type="equal") expect_identical(out3, rowRanges(out2)) out3 <- linearize(interactions(x), interest) expect_identical(out3, rowRanges(out2)) } }) test_that("linearization behaves correctly with silly inputs", { expect_identical(nrow(linearize(x, 0)), 0L) expect_identical(nrow(linearize(x[0,], 1)), 0L) expect_identical(nrow(suppressWarnings(linearize(x, GRanges("chrC", IRanges(1,1))))), 0L) }) #################################### test_that("linearization works with multiple overlaps", { lenA <- max(end(regions(x)[seqnames(regions(x))=="chrA"])) x2 <- x mcols(x2)$Index <- seq_len(nrow(x2)) out <- linearize(x2, GRanges("chrA", IRanges(1, lenA))) all.a <- anchors(x2[mcols(out)$Index]) mcols(out)$Index <- NULL # Testing for proper merging of internal ranges. is.same <- as.logical(seqnames(all.a$first)==seqnames(all.a$second)) expect_identical(pmin(start(all.a$first), start(all.a$second))[is.same], start(rowRanges(out))[is.same]) expect_identical(pmax(end(all.a$first), end(all.a$second))[is.same], end(rowRanges(out))[is.same]) expect_identical(seqnames(all.a$first)[is.same], seqnames(rowRanges(out))[is.same]) is.B1 <- as.logical(seqnames(all.a$first)=="chrB") expect_identical(all.a$first[is.B1], rowRanges(out)[is.B1]) is.B2 <- as.logical(seqnames(all.a$second)=="chrB") expect_identical(all.a$second[is.B2], rowRanges(out)[is.B2]) lenB <- max(end(regions(x)[seqnames(regions(x))=="chrB"])) expect_error(linearize(x2, GRanges(c("chrA", "chrB"), IRanges(1, c(lenA, lenB)))), "multi-chromosome sets of 'ref' are not supported") }) test_that("linearization correctly ignores internal interactions", { lenA <- max(end(regions(x)[seqnames(regions(x))=="chrA"])) x2 <- x mcols(x2)$Index <- seq_len(nrow(x2)) out <- linearize(x2, GRanges("chrA", IRanges(1, lenA)), internal=FALSE) all.a <- anchors(x2[mcols(out)$Index]) mcols(out)$Index <- NULL is.diff <- as.logical(seqnames(all.a$first)!=seqnames(all.a$second)) expect_true(all(is.diff)) is.B1 <- as.logical(seqnames(all.a$first)=="chrB") expect_identical(all.a$first[is.B1], rowRanges(out)[is.B1]) is.B2 <- as.logical(seqnames(all.a$second)=="chrB") expect_identical(all.a$second[is.B2], rowRanges(out)[is.B2]) }) # Testing what happens with region and interaction-specific metadata. set.seed(100) test_that("linearization preserves metadata", { out <- linearize(x, 1) mcols(x) <- DataFrame(Blah=runif(nrow(x)), Index=seq_len(nrow(x))) mcols(regions(x)) <- DataFrame(Foo=runif(length(regions(x)))) out2 <- linearize(x, 1) expect_identical(colnames(mcols(out2)), c("Foo", "Blah", "Index")) expect_identical(mcols(out2)$Blah, mcols(x)$Blah[mcols(out2)$Index]) a1 <- anchors(x, type="first", id=TRUE) a2 <- anchors(x, type="second", id=TRUE) index <- ifelse(a1!=1L, a1, a2)[mcols(out2)$Index] expect_identical(mcols(out2)$Foo, mcols(regions(x))$Foo[index]) })
3a8408ee8aa782f0dc381cfca03cc60304cda71c
9046be09971d3d8ed2c997cc9524593d00a65d74
/R/boxM.R
a741f32863eb6b0459d4967eaed4cb000ea74359
[]
no_license
GabrielHoffman/decorate
e915a43b078d2cebe0db5e1f4381e039767e09c8
9b4109aab277e8e884e387772d567db00e0f6562
refs/heads/master
2023-05-28T10:53:47.066330
2023-05-17T18:28:03
2023-05-17T18:28:03
167,074,817
4
1
null
null
null
null
UTF-8
R
false
false
8,949
r
boxM.R
# Compute log determinate based on only the first N eigen values det_partial = function( C, tol=1e-10 ){ if( ncol(C) != nrow(C) ){ stop("Matrix must be square") } dcmp = eigen( C, only.values=TRUE) evalues = dcmp$values # log determinant is sum of log eigen-values # partial sum of 1:N values sum( log(evalues[evalues > tol]) ) } #' Box's M-test #' #' Box's M-test #' #' @param Y response matrix #' @param group factor defining groups #' @param tol tolerance for eigen values #' @param fxn define function. Here default is cor to compare correlation structure. Use cov to compare covariance structure like in heplots::boxM #' @param method specify which correlation method: "pearson", "kendall" or "spearman" #' #' @examples #' data(iris) #' #' boxM( iris[,1:4], iris[,5]) #' #' @importFrom stats complete.cases aggregate #' @export boxM <- function(Y, group, tol=1e-10, fxn=cor, method= c("pearson", "kendall", "spearman")){ dname <- deparse(substitute(Y)) method = match.arg( method ) if (!inherits(Y, c("data.frame", "matrix"))) stop(paste(dname, "must be a numeric data.frame or matrix!")) if (length(group) != nrow(Y)) stop("incompatible dimensions in Y and group!") Y <- as.matrix(Y) gname <- deparse(substitute(group)) if (!is.factor(group)) group <- as.factor(as.character(group)) valid <- complete.cases(Y, group) if (nrow(Y) > sum(valid)){ warning(paste(nrow(Y) - sum(valid)), " cases with missing data have been removed.") } Y <- Y[valid,] group <- group[valid] p <- ncol(Y) nlev <- nlevels(group) lev <- levels(group) dfs <- tapply(group, group, length) - 1 if( any(dfs < p) ){ warning("there are one or more levels with less observations than variables!") } mats <- aux <- list() for(i in seq_len(nlev) ){ mats[[i]] <- cor( Y[group == lev[i], ], method=method ) aux[[i]] <- mats[[i]] * dfs[i] } names(mats) <- lev pooled <- Reduce("+", aux) / sum(dfs) # rank = min( rank, ncol(Y) ) # logdet <- log(unlist(lapply(mats, det))) logdet <- vapply(mats, det_partial, tol=tol, numeric(1)) # logdet_pooled = log(det(pooled)) logdet_pooled = det_partial( pooled, tol=tol ) minus2logM <- sum(dfs) * logdet_pooled - sum(logdet * dfs) sum1 <- sum(1 / dfs) Co <- (((2 * p^2) + (3 * p) - 1) / (6 * (p + 1) * (nlev - 1))) * (sum1 - (1 / sum(dfs))) X2 <- minus2logM * (1 - Co) dfchi <- (choose(p, 2) + p) * (nlev - 1) pval <- pchisq(X2, dfchi, lower.tail = FALSE) means <- aggregate(Y, list(group), mean) rn <- as.character(means[,1]) means <- means[,-1] means <- rbind( means, colMeans(Y) ) rownames(means) <- c(rn, "pooled") logdet <- c(logdet, pooled=log(det(pooled))) df <- c(dfs, pooled=sum(dfs)) out <- structure( list(statistic = c("Chi-Sq (approx.)" = X2), parameter = c(df = dfchi), p.value = pval, cov = mats, pooled = pooled, logDet = logdet, means = means, df=df, data.name = dname, group = gname, method = "Box's M-test for Homogeneity of Covariance Matrices" ), class = c("htest", "boxM") ) out$logdet = logdet out$stat_logdet = ifelse(length(logdet) == 3, logdet[2] - logdet[1], NA) return(out) } # Compute boxM result using only the first *rank* eigen-values boxM_basic <- function(Y, group, tol=1e-10, fxn=cor, method= c("pearson", "kendall", "spearman")){ # dname <- deparse(substitute(Y)) # method = match.arg( method ) # if (!inherits(Y, c("data.frame", "matrix"))) # stop(paste(dname, "must be a numeric data.frame or matrix!")) # if (length(group) != nrow(Y)) # stop("incompatible dimensions in Y and group!") # Y <- as.matrix(Y) # gname <- deparse(substitute(group)) # if (!is.factor(group)) group <- as.factor(as.character(group)) # valid <- complete.cases(Y, group) # if (nrow(Y) > sum(valid)){ # warning(paste(nrow(Y) - sum(valid)), " cases with missing data have been removed.") # } # Y <- Y[valid,] # group <- group[valid] p <- ncol(Y) nlev <- nlevels(group) lev <- levels(group) dfs <- tapply(group, group, length) - 1 # if( any(dfs < p) ){ # warning("there are one or more levels with less observations than variables!") # } mats <- aux <- list() for(i in seq_len(nlev) ){ mats[[i]] <- cor( Y[group == lev[i], ], method=method ) aux[[i]] <- mats[[i]] * dfs[i] } names(mats) <- lev pooled <- Reduce("+", aux) / sum(dfs) # rank = min( rank, ncol(Y) ) # logdet <- log(unlist(lapply(mats, det))) logdet <- vapply(mats, det_partial, tol=tol, numeric(1)) # logdet_pooled = log(det(pooled)) logdet_pooled = det_partial( pooled, tol=tol ) minus2logM <- sum(dfs) * logdet_pooled - sum(logdet * dfs) sum1 <- sum(1 / dfs) Co <- (((2 * p^2) + (3 * p) - 1) / (6 * (p + 1) * (nlev - 1))) * (sum1 - (1 / sum(dfs))) X2 <- minus2logM * (1 - Co) dfchi <- (choose(p, 2) + p) * (nlev - 1) pval <- pchisq(X2, dfchi, lower.tail = FALSE) # means <- aggregate(Y, list(group), mean) # rn <- as.character(means[,1]) # means <- means[,-1] # means <- rbind( means, colMeans(Y) ) # rownames(means) <- c(rn, "pooled") # logdet <- c(logdet, pooled=log(det(pooled))) # df <- c(dfs, pooled=sum(dfs)) # out <- structure( # list(statistic = c("Chi-Sq (approx.)" = X2), # parameter = c(df = dfchi), # p.value = pval, # cov = mats, pooled = pooled, logDet = logdet, means = means, df=df, # data.name = dname, group = gname, # method = "Box's M-test for Homogeneity of Covariance Matrices" # ), # class = c("htest", "boxM") # ) out = list( statistic = X2, p.value = pval, parameter = c(df = dfchi)) out$logdet = logdet out$stat_logdet = ifelse(length(logdet) == 3, logdet[2] - logdet[1], NA) return(out) } #' Box's M-test #' #' boxM performs the Box's (1949) M-test for homogeneity of covariance matrices obtained from multivariate normal data according to one or more classification factors. The test compares the product of the log determinants of the separate covariance matrices to the log determinant of the pooled covariance matrix, analogous to a likelihood ratio test. The test statistic uses a chi-square approximation. Uses permutations to estimate the degrees of freedom under the null #' @param Y response variable matrix #' @param group a factor defining groups, or a continuous variable, number of entries must equal nrow(Y) #' @param nperm number of permutations of group variable used to estimate degrees of freedom under the null #' @param method Specify type of correlation: "pearson", "kendall", "spearman" #' #' #' @examples #' data(iris) #' #' boxM_permute(iris[, 1:4], iris[, "Species"]) #' #' @return list of p.value, test statistic, and df.approx estimated by permutation #' #' @importFrom stats optimize pchisq dchisq #' @seealso heplots::boxM #' @export boxM_permute = function(Y, group, nperm=200, method=c("pearson", "kendall", "spearman")){ dname <- deparse(substitute(Y)) if (!inherits(Y, c("data.frame", "matrix"))) stop(paste(dname, "must be a numeric data.frame or matrix!")) if (length(group) != nrow(Y)) stop("incompatible dimensions in Y and group!") Y <- as.matrix(Y) gname <- deparse(substitute(group)) if (!is.factor(group)) group <- as.factor(as.character(group)) valid <- complete.cases(Y, group) if (nrow(Y) > sum(valid)){ warning(paste(nrow(Y) - sum(valid)), " cases with missing data have been removed.") } Y <- Y[valid,] group <- group[valid] p <- ncol(Y) nlev <- nlevels(group) lev <- levels(group) dfs <- tapply(group, group, length) - 1 if( any(dfs < p) ){ warning("there are one or more levels with fewer observations than variables!") return(list( p.value = NA, statistic = NA, df.approx = NA, stat_logdet = NA)) } method = match.arg( method ) # fit statistic for real data fit = boxM_fast( Y, group, method) # fit statistic for permutated data # get chisq statistic chisq_stats = vapply( seq_len(nperm), function(i){ idx = sample.int(length(group), length(group)) fit = boxM_fast( Y, group[idx], method) fit$X2 }, numeric(1)) # estimate df that maximizes the chisqlog-likelihood opt = optimize( function(df, y){ sum(dchisq(y, df=df, log=TRUE)) }, interval=c(2, 1e6), y=chisq_stats, maximum=TRUE) df.approx = opt$maximum # compute p-value based on permuted statistic p.value = pchisq( fit$X2, df=df.approx, lower.tail=FALSE) # sum(chisq_stats > fit$statistic) / nperm list( p.value = as.numeric(p.value), statistic = fit$statistic, df.approx = df.approx, stat_logdet = fit$stat_logdet) }
a09cdccb658883650c243f278e5a328a303baa6e
354066554dd1c3ca7a2d45e3673a2da86330f816
/R/grafico.R
178ed77cf1b7a218830d19f3568f982ebe34474b
[ "MIT" ]
permissive
elemosjr/planejamento
e8bfe3d0d9d255b1216a91d91a0a880fa2f3cb0c
f7cd310c6a59533b8d7b7ec8b03391a874f0cb51
refs/heads/master
2023-04-30T19:34:33.232372
2021-05-20T00:54:16
2021-05-20T00:54:16
366,199,524
0
0
null
null
null
null
UTF-8
R
false
false
1,464
r
grafico.R
#' Base para gerar graficos de comparação #' #' @import ggplot2 #' #' @description Funcao base para graficos de comparacoes multiplas. #' #' @param dados Data frame com colunas separadas para os tratamentos, resultados, e caso haja, blocos #' @param x String com o nome da coluna dos tratamentos #' @param y String com o nome da coluna dos resultados #' @param bloco String com o nome da coluna dos blocos, manter NULL caso não hajam blocos #' @param legenda String com a posição da legenda ("top", "bottom", "left", "right"), padrao = NULL #' #' @return Ggplot com geometria de pontos e facetes da variavel x #' #' @examples #' #' grafico(mtcars, "cyl", "hp", "gear") #' #' @export grafico <- function(dados, x, y, bloco = NULL, legenda = NULL) { if(!is.null(bloco)) { col_ <- bloco if(is.null(legenda)) { legenda <- "top" } } else { col_ <- x if(is.null(legenda)) { legenda <- "none" } } if(!is.factor(dados[[col_]])) dados[[col_]] <- as.factor(dados[[col_]]) dados %>% ggplot(aes(x = "", y = .data[[y]], col = .data[[col_]])) + geom_jitter() + facet_grid(. ~ .data[[x]]) + labs(x = x) + theme(legend.position = legenda, axis.text.x = element_blank(), axis.ticks.x = element_blank(), panel.background = element_rect(fill = "white"), plot.margin = margin(.5, 1, .5, 1, "cm"), plot.background = element_rect(colour = "black")) }
23abc5b40604038d2264359e7b232d7ee07b6f1b
15b43be459adec4f54507aafe759f61f1eadd5d5
/man/as_search.Rd
9033c6bd8e48fd60fd0429e4af354ec25a66b101
[ "MIT" ]
permissive
jpiaskowski/rtimes
7d5c5ffe3bc250f2ee1d5bee98a10dbd98c141b6
4eaef46ca60a35c879db68edba6a697418693850
refs/heads/master
2022-01-28T10:42:27.313696
2019-07-19T17:12:07
2019-07-19T17:12:07
null
0
0
null
null
null
null
UTF-8
R
false
true
4,427
rd
as_search.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as_search.R \name{as_search} \alias{as_search} \title{Article search} \usage{ as_search(q, fq = NULL, sort = NULL, begin_date = NULL, end_date = NULL, key = NULL, fl = NULL, hl = FALSE, page = 0, all_results = FALSE, try_flatten = FALSE, facet_field = NULL, facet_filter = NULL, sleep = 2, ..., callopts = list()) } \arguments{ \item{q}{Search query term. Search is performed on the article body, headline and byline.} \item{fq}{Filtered search query using standard Lucene syntax. The filter query can be specified with or without a limiting field: label. See \code{fq_fields} for the filter query fields.} \item{sort}{(character) Default NULL. One of newest or oldest . By default, search results are sorted by their relevance to the query term (q). Use the sort parameter to sort by pub_date.} \item{begin_date}{Begin date - Restricts responses to results with publication dates of the date specified or later. In the form YYYYMMDD} \item{end_date}{End date - Restricts responses to results with publication dates of the date specified or earlier. In the form YYYYMMDD} \item{key}{your New York Times API key; pass in, or loads from .Renviron as \code{NYTIMES_KEY}. See \code{\link{rtimes-package}} for info on rate limits.} \item{fl}{(character) Vector of fields to return. default: all fields} \item{hl}{(logical) Highlight or not, default: \code{FALSE}} \item{page}{Page number. The value of page corresponds to a set of 10 results (it does not indicate the starting number of the result set). For example, page=0 corresponds to records 0-9. To return records 10-19, set page to 1, not 10.} \item{all_results}{(logical) return all results. Default: \code{FALSE} Internally, we do paging for you. We get first up to 10 results, then if there's more we page through results to get all of them.} \item{try_flatten}{(logical) attempt to completely flatten data.frame, which may fail in some cases as its hard to account for all edge cases. Default: \code{FALSE}} \item{facet_field}{(character) Specifies the sets of facet values to include in the facets array at the end of response, which collects the facet values from all the search results. By default no facet fields will be returned. See Details for options.} \item{facet_filter}{(logical) Fields to facet on, as vector. When set to \code{TRUE}, facet counts will respect any applied filters (fq, date range, etc.) in addition to the main query term. To filter facet counts, specifying at least one facet_field is required.} \item{sleep}{(integer) seconds to sleep between successive requests, only used when \code{all_results=TRUE}} \item{...}{Futher args pass into query} \item{callopts}{Curl options (debugging tools mostly) passed to \code{\link[crul]{HttpClient}}} } \description{ Article search } \details{ \code{fl} parameter options are: \itemize{ \item web_url \item snippet \item lead_paragraph \item abstract \item print_page \item blog \item source \item multimedia \item headline \item keywords \item pub_date \item document_type \item news_desk \item byline \item type_of_material \item _id \item word_count } \code{facet_field} param options are: \itemize{ \item section_name \item document_type \item type_of_material \item source \item day_of_week } } \examples{ \dontrun{ # basic search - copyright, metadata, data, and facet slots (x <- as_search(q="bailout", begin_date = "20081001", end_date = '20081005', callopts = list(verbose = TRUE))) x$copyright x$meta x$data x$facet Sys.sleep(1) as_search(q="money", fq = 'The New York Times', fl = c('word_count', 'snippet', 'headline')) Sys.sleep(1) x <- as_search(q="bailout", hl = TRUE) x$data$snippet Sys.sleep(1) # all results (x <- as_search(q="bailout", begin_date = "20081001", end_date = '20081003', all_results = TRUE)) x$meta x$data Sys.sleep(1) # facetting as_search(q="bailout", facet_field = 'section_name', begin_date = "20081001", end_date = '20081201') Sys.sleep(1) ## with facet filtering as_search(q="bailout", facet_field = 'section_name', begin_date = "20081001", end_date = '20081201', facet_filter = TRUE) # curl options x <- as_search(q="bailout", begin_date = "20081001", end_date = '20081005', callopts = list(verbose = TRUE)) } } \references{ \url{http://developer.nytimes.com}, \url{https://developer.nytimes.com/docs/articlesearch-product/1/overview} }
836b023482c60609ad46b8bba9d26e4446b37c3e
887d5ff66955a6e9110d95398a8d08bcb3b0ba48
/pollutantmean.R
62dbd2d8f13b73cf998682f30e10264adc4d5ba2
[]
no_license
yelangya3826850/R_Language
a26342120b99430b2953feaac08798466174628f
f571536ce249bac4bc99d78264201b02bdc97ef0
refs/heads/master
2016-09-05T22:47:51.734170
2014-07-20T14:03:23
2014-07-20T14:03:23
null
0
0
null
null
null
null
UTF-8
R
false
false
1,577
r
pollutantmean.R
pollutantmean <- function(directory, pollutant, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) ## store current path ## get in to the path where we store data current_path <- getwd() ## load data id_1 <- id[id > 0 & id < 10] id_2 <- id[id >= 10 & id < 100] id_3 <- id[id >= 100] file_part1 <- paste(current_path, "/", directory, "/", rep("00",times = length(id_1)), id_1, rep(".csv",times = length(id_1)), sep = "") file_part2 <- paste(current_path, "/", directory, "/", rep("0",times = length(id_2)), id_2, rep(".csv",times = length(id_2)), sep = "" ) file_part3 <- paste(current_path, "/", directory, "/", id_3, rep(".csv",times = length(id_3)), sep = "") file <- c(file_part1, file_part2, file_part3) ## extract data we need info_vector <- c() for (filename in file) { filename data <- read.csv(filename) if (pollutant == "sulfate") { temp <- data$sulfate[!is.na(data$sulfate)] }else if(pollutant == "nitrate") { temp <- data$nitrate[!is.na(data$nitrate)] } info_vector <- c(info_vector,temp) } ## return result mean(info_vector) }
dccb42c6f28f6412ce38adecb0e78466e3a415da
914c515a6c85356ee148950f5d93be59ee112b4c
/man/wmed.Rd
40e516cfff24e5a16fc32d1d4f837ad7b86dbec7
[]
no_license
Mufabo/Rrobustsp
917fb8e9a353ffc7a838544fa5cd1d39eae34f6c
1700f4fed27e63cec6dfb67a14a208d8f46e2418
refs/heads/master
2022-06-14T22:15:50.045970
2022-06-05T12:13:32
2022-06-05T12:13:32
222,286,547
0
3
null
2019-12-19T14:17:46
2019-11-17T17:36:00
R
UTF-8
R
false
true
1,101
rd
wmed.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Regression.R \name{wmed} \alias{wmed} \title{weighted median} \usage{ wmed(y, w, verbose = F, tol = 1e-07, iter_max = 2000) } \arguments{ \item{y}{: (numeric) data given (real or complex)} \item{w}{: (nubmer) positive real-valued weights. Inputs need to be of same length} \item{verbose:}{(logical) true of false (default). Set as true if you wish to see convergence as iterations evolve} \item{tol:}{threshold for iteration in complex case \cr default = 1e-7} \item{iter_max:}{number of iterations in complex case \cr default = 2000} } \value{ beta: (numeric) weighted median converged: (logical) flag of convergence (in the complex-valued data case) iter: (numeric) the number of iterations (complex-valued case) } \description{ wmed computes the weighted median for data vector y and weights w, i.e. it solves the optimization problem: } \details{ beta = arg min_b SUM_i | y_i - b | * w_i } \examples{ wmed(1:5, c(1,0,1,2,3)) wmed(1:5 +1i, c(1,0,1,2,3)) }
68c20f33351cbd027c10ccbeb9ab6186dc0403e5
c007f1b6847cf9909102d63d440cf8399a083412
/R/get_cpt_functions.R
efb29abbf3be5170202869ae6ccb2beb85029c89
[]
no_license
aarmiller/delayDX
36c67444f475e624a48ec6c9f78ab1ec44c44f03
fe3d715c6fc4b68ec564979660529843cf313349
refs/heads/master
2023-06-16T18:09:01.527192
2021-07-09T15:37:06
2021-07-09T15:37:06
274,458,924
1
1
null
2021-07-09T15:37:06
2020-06-23T16:43:49
R
UTF-8
R
false
false
6,019
r
get_cpt_functions.R
#' Return outpatient core information for a specific list of cpt codes #' @name gether_proc_data_outpatient #' @param table_name The specific outpatient core table to access #' @param cpt_codes The cpt codes of interest #' @param proc_vars List of variables to return #' @param db_path Path to the database #' @param collect_n The number of observations to return. Default in Inf #' @return A tibble with cpt code of interest for the specified list of variables #' @export gether_proc_data_outpatient <- function(table_name, cpt_codes, proc_vars =c(), db_path, collect_n = Inf) { db_con <- src_sqlite(db_path) tbl_name <- table_name get_vars <- dplyr::tbl(db_con, tbl_name) %>% dplyr::tbl_vars() if (is.null(proc_vars)) { get_vars <- get_vars } else { get_vars <- proc_vars[proc_vars %in% get_vars] } out <- db_con %>% dplyr::tbl(tbl_name) %>% dplyr::filter(.data$proc1 %in% cpt_codes) %>% dplyr::select(get_vars) %>% dplyr::collect(n = collect_n) return(out) } #' Return inpatient core information for a specific list of cpt codes #' @name gether_proc_data_inpatient #' @param table_name The specific inpatient core table to access #' @param cpt_codes The cpt codes of interest #' @param proc_vars List of variables to return #' @param db_path Path to the database #' @param collect_n The number of observations to return. Default in Inf #' @return A tibble with cpt code of interest for the specified list of variables #' @export gether_proc_data_inpatient <- function(table_name, cpt_codes, proc_vars = c(), db_path, collect_n = Inf) { db_con <- src_sqlite(db_path) split_table_name <- stringr::str_split(table_name, "_")[[1]] tbl_name1 <- paste0(split_table_name[1], "_proc_", split_table_name[3], "_", split_table_name[4]) out1 <- db_con %>% dplyr::tbl(tbl_name1) %>% dplyr::filter(.data$proc %in% cpt_codes) %>% dplyr::collect(n = collect_n) tbl_name <- table_name get_vars <- dplyr::tbl(db_con, tbl_name) %>% dplyr::tbl_vars() if (is.null(proc_vars)) { get_vars <- get_vars } else { get_vars <- proc_vars[proc_vars %in% get_vars] } out <- db_con %>% dplyr::tbl(tbl_name) %>% dplyr::select(c("caseid",get_vars)) %>% dplyr::collect(n = collect_n) out <- out %>% inner_join(out1) %>% rename(proc1=proc) %>% select(-caseid, -proc_num) return(out) } #' Return inpatient and outpatient core information for a specific list of cpt codes over multiple core tables (in parallel) #' @name gather_proc_data #' @param collect_tab A tibble with the specific setting (i.e. inpatient or outpatient), source (i.e. ccae or mdcr), and year to access. #' Default is all possible combinations of setting, source, and year #' @param cpt_codes The cpt codes of interest #' @param proc_vars List of variables to return #' @param db_path Path to the database #' @param collect_n The number of observations to return #' @param num_cores The number of worker cores to use. If not specified will determined the number of cores based on the which ever #' is the smallest value between number of rows in for collect_tab or detected number of cores - 1 #' @return A tibble with cpt code of interest for the specified list of variables #' @export gather_proc_data <- function (collect_tab = collect_table(), cpt_codes, proc_vars=c("enrolid", "proc1", "svcdate", "admdate", "proc","disdate"), db_path, collect_n = Inf, num_cores = NULL) { tab <- collect_tab %>% mutate(table = paste0(setting, "_core_", source, "_", year)) out_tab <- tab %>% filter(setting == "outpatient") in_tab <- tab %>% filter(setting == "inpatient") db_path2 <- db_path cpt_codes2 <- cpt_codes proc_vars2 <- proc_vars collect_n2 <- collect_n # set up clusters if (is.null(num_cores)) { num_cores <- min(nrow(tab), parallel::detectCores() - 1) } else { num_cores <- num_cores } cluster <- parallel::makeCluster(num_cores) parallel::clusterExport(cluster, varlist = c("gether_proc_data_outpatient", "gether_proc_data_inpatient")) parallel::clusterCall(cluster, function() library(tidyverse)) parallel::clusterCall(cluster, function() library(dplyr)) tmp_out <- parallel::parLapply(cl = cluster, 1:length(out_tab$table), function(x){gether_proc_data_outpatient(table_name = out_tab$table[x], cpt_codes = cpt_codes2, proc_vars = proc_vars2, db_path = db_path2, collect_n = collect_n2)}) tmp_in <- parallel::parLapply(cl = cluster, 1:length(in_tab$table), function(x){gether_proc_data_inpatient(table_name = in_tab$table[x], cpt_codes = cpt_codes2, proc_vars = proc_vars2, db_path = db_path2, collect_n = collect_n2)}) parallel::stopCluster(cluster) gc() out_outpatient <- tibble() for (i in 1:length(tmp_out)){ x <- tmp_out[[i]] %>% nest(cpt_data = everything()) out_outpatient <- bind_rows(out_outpatient, x) } outpatient <- bind_cols(out_tab %>% select(-table), out_outpatient) out_inpatient <- tibble() for (i in 1:length(tmp_in)){ x <- tmp_in[[i]] %>% nest(cpt_data = everything()) out_inpatient <- bind_rows(out_inpatient, x) } inpatient <- bind_cols(in_tab %>% select(-table), out_inpatient) out <- bind_rows(outpatient, inpatient) return(out) }
616e986d574add0665bce4d883a48322161fd546
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
/FDA_Pesticide_Glossary/triazole-1-propaneni.R
30e97ed3bd6f0234d277772dd35fbca31e05e2d1
[ "MIT" ]
permissive
andrewdefries/andrewdefries.github.io
026aad7bd35d29d60d9746039dd7a516ad6c215f
d84f2c21f06c40b7ec49512a4fb13b4246f92209
refs/heads/master
2016-09-06T01:44:48.290950
2015-05-01T17:19:42
2015-05-01T17:19:42
17,783,203
0
1
null
null
null
null
UTF-8
R
false
false
276
r
triazole-1-propaneni.R
library("knitr") library("rgl") #knit("triazole-1-propaneni.Rmd") #markdownToHTML('triazole-1-propaneni.md', 'triazole-1-propaneni.html', options=c("use_xhml")) #system("pandoc -s triazole-1-propaneni.html -o triazole-1-propaneni.pdf") knit2html('triazole-1-propaneni.Rmd')
40b98333e44b4c9f9b42ababa5311e0a26d6dc4a
8dda6c31530411c6328481a3b63ac0bb27320741
/R scripts/code_for_week10_part1.R
8568877404e93ff14dd330a0dc24f9031832f413
[ "MIT" ]
permissive
guhanavel/DSA1101-Introduction-to-Data-Science
fc1ccf855a2313a9ab8e04a03094cd9031755ae3
2c5e7f1ef53c2cac77ab3ad20c6d9e2a78e2ac15
refs/heads/master
2022-04-17T16:18:39.661539
2020-03-17T15:24:39
2020-03-17T15:24:39
null
0
0
null
null
null
null
UTF-8
R
false
false
1,158
r
code_for_week10_part1.R
install.packages('arules') install.packages('arulesViz') library('arules') library('arulesViz') data(Groceries) Groceries@itemInfo[1:10,] Groceries@data[,100:110] apply(Groceries@data[,100:105], 2, function(r) paste(Groceries@itemInfo[r,"labels"], collapse=", ")) itemsets <- apriori(Groceries, parameter=list(minlen=1, maxlen=1, support=0.02, target="frequent itemsets")) summary(itemsets) inspect(head(sort(itemsets, by = "support"), 10)) itemsets <- apriori(Groceries, parameter=list(minlen=3, maxlen=3, support=0.02, target="frequent itemsets")) summary(itemsets) inspect(sort(itemsets, by ="support")) itemsets <- apriori(Groceries, parameter=list(minlen=4, maxlen=4, support=0.02, target="frequent itemsets")) summary(itemsets) inspect(sort(itemsets, by ="support"))> itemsets <- apriori(Groceries, parameter=list(minlen=1, support=0.02, target="frequent itemsets")) summary(itemsets) rules <- apriori(Groceries, parameter=list(support=0.001, confidence=0.6, target = "rules")) inspect(head(sort(rules, by="lift"), 3)) highLiftRules <- head(sort(rules, by="lift"), 5) plot(highLiftRules, method="graph", control=list(alpha=1))
09b66a9010223a8171c4e93a75f452bf1701b26f
e351e76746b37fb9c3b2bf0f2fd24eff8c542647
/HandsOnTSA/Chapter 02 Working with date and time objects.R
b4677b5ab068f4c6cbd1e23379c26ab877bc953b
[]
no_license
PyRPy/TimeSeries_R
2a26fcba2cd8fc2388f543e4f8944a20c9199bfb
a94394f1a2882157d4f58c915592f77f568cae83
refs/heads/master
2022-05-20T02:34:47.970015
2022-03-14T01:02:38
2022-03-14T01:02:38
156,481,087
2
3
null
null
null
null
UTF-8
R
false
false
8,081
r
Chapter 02 Working with date and time objects.R
# Chapter 2 Working with date and time objects # -------- Code Chank 1 -------- # The Sys.Date function get the current date from the system date <- Sys.Date() date class(date) # The Sys.time function get the current date and time from the system # The defualt format is POSIXct time_ct <- Sys.time() time_ct class(time_ct) # Converting the POSIXct object to POSIXlt time_lt <- as.POSIXlt(time_ct) time_lt class(time_lt) # -------- Code Chank 2 -------- # Unclass the POSIXct object unclass(time_ct) # Unclass the POSIXlt object unclass(time_lt) # -------- Code Chank 3 -------- # Quering for the second value unclass(time_lt)$sec # Quering for the day of the year value unclass(time_lt)$yday # -------- Code Chank 4 -------- date <- as.Date("2014-5-12") date class(date) # Creating POSIXct and POSIXlt objects # Setting the time zone to EST time_ct <- as.POSIXct("2014-5-12 20:05:35", tz = "EST") time_ct class(time_ct) time_lt <- as.POSIXlt("2014-5-12 20:05:35", tz = "EST") time_lt class(time_lt) # -------- Code Chank 5 -------- # dates_df <- read.csv(url, stringsAsFactors = FALSE) dates_df <- read.csv("dates_formats.csv", stringsAsFactors = FALSE) # -------- Code Chank 6 -------- str(dates_df) # -------- Code Chank 7 -------- dates_df$Japanese_format_new <- as.Date(dates_df$Japanese_format) # -------- Code Chank 8 -------- head(dates_df[, c("Japanese_format", "Japanese_format_new")]) # -------- Code Chank 9 -------- identical(dates_df$Japanese_format, dates_df$Japanese_format_new) class(dates_df$Japanese_format) class(dates_df$Japanese_format_new) # -------- Code Chank 10 -------- as.Date("31-01-2018") # -------- Code Chank 11 -------- as.Date("31-01-2018", format = "%d-%m-%Y") # -------- Code Chank 12 -------- # Excel US date format dates_df$US_format[1] dates_df$US_format_new <- as.Date(dates_df$US_format, format = "%m/%d/%Y") # Excel US date long format dates_df$US_long_format[1] dates_df$US_long_format_new <- as.Date(dates_df$US_long_format, format = "%A, %B %d, %Y") # Excel Canada format dates_df$CA_mix_format[1] dates_df$CA_mix_format_new <- as.Date(dates_df$CA_mix_format, format = "%B %d, %Y") # Excel South Africa format dates_df$SA_mix_format[1] dates_df$SA_mix_format_new <- as.Date(dates_df$SA_mix_format, format = "%d %B %Y") # Excel South Africa format dates_df$NZ_format[1] dates_df$NZ_format_new <- as.Date(dates_df$NZ_format, format = "%d/%m/%Y") # -------- Code Chank 13 -------- date1 <- as.Date("1970-01-01") date2 <- Sys.Date() print(c(date1, date2)) # -------- Code Chank 14 -------- date2 - date1 # -------- Code Chank 15 -------- as.numeric(date1) as.numeric(date2) # -------- Code Chank 16 -------- head(dates_df$Excel_Numeric_Format) # -------- Code Chank 17 -------- dates_df$Excel_Numeric_Format_new <- as.Date(dates_df$Excel_Numeric_Format, origin = as.Date("1899-12-30")) head(dates_df$Excel_Numeric_Format_new) # -------- Code Chank 18 -------- str(dates_df) # -------- Code Chank 19 -------- # Storing a time object as a string time_str <- "2018-12-31 23:59:59" class(time_str) # Converting the time object to POSIXct object time_posix_ct1 <- as.POSIXct(time_str) class(time_posix_ct1) # Lets see how the two objects looks like time_str time_posix_ct1 # Test if the two objects are identical identical(time_str, time_posix_ct1) # -------- Code Chank 20 -------- # Storing a numeric representation of the time object time_numeric <- 1546318799 class(time_numeric) # Converting the numeric time object to a POSIXct object time_posix_ct2 <- as.POSIXct(time_numeric, origin = "1970-01-01") # Let's print the two objects print(c(time_posix_ct1, time_posix_ct2)) # Let's check if the two objects are identical identical(time_posix_ct1, time_posix_ct2) class(time_posix_ct1) class(time_posix_ct2) # -------- Code Chank 21 -------- # Store the time object as a string input time_US_str <- "Monday, December 31, 2018 11:59:59 PM" # Convert the time input to POSIXct format time_posix_ct3 <- as.POSIXct(time_US_str, format = "%A, %B %d, %Y %I:%M:%S %p") time_posix_ct3 # Check if the 3 conversion are identicals identical(time_posix_ct1, time_posix_ct2, time_posix_ct3) # -------- Code Chank 22 -------- # Returning the location Sys.timezone() # or returning the abbreviation Sys.timezone(location = FALSE) # -------- Code Chank 23 -------- # Let's use again the same time object as earlier time_str <- "2018-12-31 23:59:59" # Converting the object without declaring about the time zone time_default_tz <- as.POSIXct(time_str) # Converting the object but declaring about the time zone time_assign_tz <- as.POSIXct(time_str, tz = "GMT") # Those object are not identical print(c(time_default_tz, time_assign_tz)) identical(time_default_tz, time_assign_tz) # -------- Code Chank 24 -------- head(OlsonNames(), 20) # -------- Code Chank 25 -------- # Creating daily index with date object daily_index <- seq.Date(from = as.Date("2016-01-01"), # Setting the starting date to = as.Date("2018-12-31"), # Setting the end date by = "day" # Setting the time increment ) head(daily_index) # -------- Code Chank 26 -------- # Creating an index with 3 days interval space daily_3_index <- seq.Date(from = as.Date("2016-01-01"), # Setting the starting date to = as.Date("2018-12-31"), # Setting the end date by = "3 days" # Setting the time increment ) head(daily_3_index) # -------- Code Chank 27 -------- hourly_index <- seq.POSIXt(from = as.POSIXct("2018-06-01"), by = "hours", length.out = 48) str(hourly_index) # -------- Code Chank 28 -------- # Store the time object as a string input time_US_str <- "Monday, December 31, 2018 11:59:59 PM" class(time_US_str) time_US_str # -------- Code Chank 29 -------- # Time object conversion with the "base" package time_base <- as.POSIXct(time_US_str, format = "%A, %B %d, %Y %I:%M:%S %p") class(time_base) time_base # -------- Code Chank 30 -------- # Time object conversion with the "lubridate" package library(lubridate) time_lubridate <- mdy_hms(time_US_str, tz = "EST") class(time_lubridate) time_lubridate # -------- Code Chank 31 -------- dates_df <- read.csv("dates_formats.csv", stringsAsFactors = FALSE) str(dates_df) dates_df$Japanese_format_new <- ymd(dates_df$Japanese_format) dates_df$US_format_new <- mdy(dates_df$US_format) dates_df$US_long_format_new <- mdy(dates_df$US_long_format) dates_df$CA_mix_format_new <- mdy(dates_df$CA_mix_format) dates_df$SA_mix_format_new <- dmy(dates_df$SA_mix_format) dates_df$NZ_format_new <- dmy(dates_df$NZ_format) # -------- Code Chank 32 -------- dates_df$Excel_Numeric_Format_new <- as_date(dates_df$Excel_Numeric_Format, origin = ymd("1899-12-30")) str(dates_df) # -------- Code Chank 33 -------- time_obj <- mdy_hms(time_US_str, tz = "EST") class(time_obj) time_obj yday(time_obj) # Extract the day of the year qday(time_obj) # Extract the day of the quarter day(time_obj) # Extract the day of the month wday(time_obj, label = TRUE) # Extract the day of the week # -------- Code Chank 34 -------- hour(time_obj) <- 11 # Modifying the time from 11 p.m to 11 a.m time_obj # -------- Code Chank 35 -------- time_obj <- mdy_hms(time_US_str, tz = "EST") time_obj round_date(time_obj, unit = "minute") # Rounding by the minute floor_date(time_obj, unit = "hour") # Rounding down by the hour ceiling_date(time_obj, unit = "day") # Rounding up by the day
9ea9d6d1ee3185a71be20f5ebbf2f0bfc19e65fd
0e92c0b362b230341f9cc31207df8139dbc3ac18
/R/mean.R
b5ab70572e342130a3833bead2378b4f130594d2
[]
no_license
cran/raster
b08740e15a19ad3af5e0ec128d656853e3f4d3c6
dec20262815cf92b3124e8973aeb9ccf1a1a2fda
refs/heads/master
2023-07-09T20:03:45.126382
2023-07-04T10:40:02
2023-07-04T10:40:02
17,699,044
29
35
null
2015-12-05T19:06:17
2014-03-13T06:02:19
R
UTF-8
R
false
false
5,927
r
mean.R
# Author: Robert J. Hijmans # Date : October 2008 # revised: October 2011 # Version 1.0 # Licence GPL v3 .deepCopyRasterLayer <- function(x, filename="", ...) { out <- raster(x) if (canProcessInMemory(x)) { return( setValues(out, getValues(x)) ) } else { tr <- blockSize(x) pb <- pbCreate(tr$n, label='copy') out <- writeStart(out, filename=filename) x <- readStart(x, ...) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return( writeStop(out) ) } } setMethod("mean", signature(x='Raster'), function(x, ..., trim=NA, na.rm=FALSE){ if (!is.na(trim)) { warning("argument 'trim' is ignored") } dots <- list(...) if (length(dots) > 0) { x <- stack(.makeRasterList(x, ...)) add <- unlist(.addArgs(...)) } else { add <- NULL } out <- raster(x) d <- dim(x) nc <- ncell(out) if (is.null(add)) { if (nlayers(x) == 1) { return(.deepCopyRasterLayer(x)) } if (canProcessInMemory(x)) { x <- getValues(x) x <- setValues(out, .rowMeans(x, nc, d[3], na.rm=na.rm)) return(x) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='mean') out <- writeStart(out, filename="") x <- readStart(x, ...) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowMeans(v, tr$nrows[i]*d[2], d[3], na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return( writeStop(out) ) } else { d3 <- d[3] + length(add) if (canProcessInMemory(x)) { if (length(add) == 1) { x <- cbind(getValues(x), add) } else { x <- getValues(x) x <- t(apply(x, 1, function(i) c(i, add))) } x <- setValues(out, .rowMeans(x, nc, d3, na.rm=na.rm)) return(x) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='mean') out <- writeStart(out, filename="") x <- readStart(x, ...) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- t(apply(v, 1, function(i) c(i, add))) v <- .rowMeans(v, tr$nrows[i]*d[2], d3, na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return( writeStop(out) ) } } ) .sum <- function(x, add=NULL, na.rm=FALSE){ out <- raster(x) d <- dim(x) nc <- ncell(out) if (is.null(add)) { if (canProcessInMemory(x)) { return( setValues(out, .rowSums(getValues(x), nc, d[3], na.rm=na.rm)) ) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='sum') out <- writeStart(out, filename="") x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowSums(v, tr$nrows[i]*d[2], d[3], na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return ( writeStop(out) ) } else { add <- sum(add, na.rm=na.rm) d3 <- d[3] + 1 if (canProcessInMemory(x)) { return( setValues(out, .rowSums(cbind(getValues(x), add), nc, d3, na.rm=na.rm)) ) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='sum') out <- writeStart(out, filename="") x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowSums(cbind(v, add), tr$nrows[i]*d[2], d3, na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) writeStop(out) } } .min <- function(x, add=NULL, na.rm=FALSE) { out <- raster(x) if (is.null(add)) { if (canProcessInMemory(x)) { return( setValues(out, .rowMin(getValues(x), na.rm=na.rm)) ) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='min') out <- writeStart(out, filename="") #x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowMin(v, na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) #x <- readStop(x) return ( writeStop(out) ) } else { add <- min(add, na.rm=na.rm) if (canProcessInMemory(x)) { x <- setValues(out, .rowMin(cbind(getValues(x), add), na.rm=na.rm)) return(x) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='min') out <- writeStart(out, filename="") x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowMin(cbind(v, add), na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return ( writeStop(out) ) } } .max <- function(x, add=NULL, na.rm=FALSE){ out <- raster(x) if (is.null(add)) { if (canProcessInMemory(x)) { return( setValues(out, .rowMax(getValues(x), na.rm=na.rm)) ) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='max') out <- writeStart(out, filename="") x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowMax( v, na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return( writeStop(out) ) } else { add <- max(add, na.rm=na.rm) if (canProcessInMemory(x)) { x <- setValues(out, .rowMax(cbind(getValues(x), add), na.rm=na.rm)) return(x) } tr <- blockSize(x) pb <- pbCreate(tr$n, label='max') out <- writeStart(out, filename="") x <- readStart(x) for (i in 1:tr$n) { v <- getValues( x, row=tr$row[i], nrows=tr$nrows[i] ) v <- .rowMax( cbind(v, add), na.rm=na.rm) out <- writeValues(out, v, tr$row[i]) pbStep(pb, i) } pbClose(pb) x <- readStop(x) return( writeStop(out) ) } }
1d4ed04086a1263489c786cfca34f3fb7ee52a3a
02536fad62b930b2e4da91f55d800d805bcb9bce
/staphopia/man/get_samples.Rd
7b1f78f9505690f91be34318b2f3166378b3cd0e
[]
no_license
staphopia/staphopia-r
ef22481b947e0717cbcdda1ae7e7755fd0af1b88
df19fa91421e18a990d861b5d138698acf0a731c
refs/heads/master
2023-08-07T15:43:10.300845
2023-08-01T21:06:34
2023-08-01T21:06:34
59,863,793
4
3
null
2018-04-09T17:17:13
2016-05-27T21:18:55
HTML
UTF-8
R
false
true
377
rd
get_samples.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sample.R \name{get_samples} \alias{get_samples} \title{get_samples} \usage{ get_samples(sample_id) } \arguments{ \item{sample_id}{A Sample ID} } \value{ Parsed JSON response. } \description{ Given a sample ID(s) return sample information. } \examples{ get_samples(5000) get_samples(c(5000, 5001)) }
52ada2b7749f07062889809c04bd4fc35850bed3
568962b96f03714be82940029fe707facf4bffe0
/man/read10xVisiumWrapper.Rd
525d73d8f6de5bba59505a6cd28356363de0714e
[]
no_license
LieberInstitute/spatialLIBD
7064a3ddebd5de3be65556ad738a3816fd6296a4
66f7b7ba72be86ed65ed70bb1251f314bcdc332e
refs/heads/devel
2023-07-07T09:15:10.011687
2023-07-04T02:19:03
2023-07-04T02:19:03
225,913,568
53
13
null
2023-04-28T13:08:17
2019-12-04T16:46:09
HTML
UTF-8
R
false
true
3,048
rd
read10xVisiumWrapper.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read10xVisiumWrapper.R \name{read10xVisiumWrapper} \alias{read10xVisiumWrapper} \title{Load data from a 10x Genomics Visium experiment and make it spatialLIBD-ready} \usage{ read10xVisiumWrapper( samples = "", sample_id = paste0("sample", sprintf("\%02d", seq_along(samples))), type = c("HDF5", "sparse"), data = c("filtered", "raw"), images = c("lowres", "hires", "detected", "aligned"), load = TRUE, reference_gtf = NULL, chrM = "chrM", gtf_cols = c("source", "type", "gene_id", "gene_version", "gene_name", "gene_type"), verbose = TRUE ) } \arguments{ \item{samples}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{sample_id}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{type}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{data}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{images}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{load}{Passed to \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}}.} \item{reference_gtf}{A \code{character(1)} specifying the path to the reference \code{genes.gtf} file. If not specified, it will be automatically inferred from the \code{web_summary.html} file for the first \code{samples}.} \item{chrM}{A \code{character(1)} specifying the chromosome name of the mitochondrial chromosome. Defaults to \code{chrM}.} \item{gtf_cols}{A \code{character()} specifying which columns to keep from the GTF file. \code{"gene_name"} and \code{"gene_id"} have to be included in \code{gtf_cols}.} \item{verbose}{A \code{logical(1)} specifying whether to show progress updates.} } \value{ A \link[=SpatialExperiment-class]{SpatialExperiment} object with the clustering and dimension reduction (projection) results from SpaceRanger by 10x Genomics as well as other information used by \code{run_app()} for visualzing the gene expression data. } \description{ This function expands \code{\link[SpatialExperiment:read10xVisium]{SpatialExperiment::read10xVisium()}} to include analysis results from SpaceRanger by 10x Genomics as well as add information needed by \code{run_app()} to visualize the data with the \code{spatialLIBD} shiny web application. } \examples{ ## See 'Using spatialLIBD with 10x Genomics public datasets' for ## a full example using this function. if (interactive()) { browseVignettes(package = "spatialLIBD") } ## Note that ?SpatialExperiment::read10xVisium doesn't include all the files ## we need to illustrate read10xVisiumWrapper(). } \seealso{ Other Utility functions for reading data from SpaceRanger output by 10x Genomics: \code{\link{add10xVisiumAnalysis}()}, \code{\link{read10xVisiumAnalysis}()} } \concept{Utility functions for reading data from SpaceRanger output by 10x Genomics}
3a509d1b4b6fc6bd1a61e7af7da86a29ecf0b3d5
536cf445a4a9465270d79f0c37e0eb4f8d61403a
/man/similarity_matrix.Rd
cf7812582e9ead1e2cbd92d2e45030d3ac2609ba
[ "MIT" ]
permissive
sollano/forestmangr
a01cc90925e582c69c3c77033c806adae5b38781
e14b62fafd859871277755cfa097d8d032a4af82
refs/heads/master
2023-02-19T21:53:44.213144
2023-02-15T21:57:42
2023-02-15T21:57:42
152,138,499
13
7
NOASSERTION
2022-12-11T22:43:03
2018-10-08T19:59:13
R
UTF-8
R
false
true
2,714
rd
similarity_matrix.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/similarity_matrix.R \name{similarity_matrix} \alias{similarity_matrix} \title{Get the similarity matrix of an area} \usage{ similarity_matrix( df, species, comparison, NI_label = "", index = "Sorensen", dendrogram = FALSE, n_groups = 3 ) } \arguments{ \item{df}{A data frame.} \item{species}{Quoted name of the scientific names variable, or any variable used to differentiate the different species found in data. If supplied, will be used to classify the species in the diameter data.} \item{comparison}{Quoted name of the variable containing levels to be compared with each other.} \item{NI_label}{Label used for Species not identified. This parameter works along with species. The level supplied here will not be considered in the classification. Default \code{""}.} \item{index}{Character value for the desired index to be used. Can be either \code{"Jaccard"}, for a matrix based on the Jaccard index of similarity, \code{"Sorensen"}, for a matrix based the Sorensen index of similarity, or \code{"all"}, for a list with matrices for both indexes. Default: \code{"Sorensen"}.} \item{dendrogram}{If \code{TRUE}, a dendrogram will also be created. Default: \code{FALSE}.} \item{n_groups}{Number of groups in the dendrogram. Default \code{3}.} } \value{ a matrix object with a similarity matrix, or a list, according to the \code{"index"} and \code{"dendrogram"} arguments. } \description{ Calculates the Jaccard similarity index and Sorensen similarity index. } \examples{ library(forestmangr) data("exfm20") head(exfm20) # To get the similarity matrix of an area, we simply need to provide # the species variable name, and a subdivision variable name, like # transect. By default we get a a matrix based on the Sorensen index: similarity_matrix(exfm20, "scientific.name", "transect") # To get the similarity matrix of Jaccard, use the index argument: similarity_matrix(exfm20, "scientific.name", "transect", index = "Jaccard") # To get a dendrogram with the matrix, use dendrogram=TRUE: similarity_matrix(exfm20, "scientific.name", "transect", index = "Jaccard", dendrogram = TRUE) # To get a list with both matrices, use index="all": similarity_matrix(exfm20, "scientific.name", "transect", index = "all") # If the data supplied only has 2 levels, a paired comparison is made instead: ex_pair <- exfm20[exfm20$transect \%in\% c("T01", "T02") , ] ex_pair similarity_matrix(ex_pair, "scientific.name", "transect", index = "all") } \references{ Souza, A. L. and Soares, C. P. B. (2013) Florestas Nativas: estrutura, dinamica e manejo. Vicosa: UFV. } \author{ Eric Bastos Gorgens \email{e.gorgens@gmail.com} }
36f1fe2bbeb461c3288dfac242f0dd040554a5aa
f4d77c60cc21b50656522d272b49243b5993d571
/01_gene-level-analysis/utils/load_data.R
5e66618bd50e5b0bf727c940e5d7fc98d8e6b479
[]
no_license
tanminkang/CB2-Experiments
453c20a723bcc8563a18da73109d293690596454
8c5245c7bd63987f01cffb20a57a00218e9d7924
refs/heads/master
2020-09-29T16:22:50.469016
2019-03-25T21:33:13
2019-03-25T21:33:13
null
0
0
null
null
null
null
UTF-8
R
false
false
669
r
load_data.R
load_data <- function(study_id) { require(tidyr) require(magrittr) require(dplyr) require(glue) mk_df <- function(f) { read_csv(f) %>% mutate( method = basename(f) %>% str_replace(".csv", ""), dataset = f %>% strsplit("/")) %>% mutate(dataset=dataset[[1]][3]) %>% filter(gene %in% c(e, n)) %>% mutate(essential = gene %in% e) } files <- Sys.glob("results/{study_id}/*/FDR/*" %>% glue) e <- scan("data/{study_id}/essential-genes.txt" %>% glue, what ="character") n <- scan("data/{study_id}/non-essential-genes.txt" %>% glue, what ="character") all_df <- lapply(files, mk_df) %>% bind_rows }
10b369e0106b6d204f960350f66aaa40a9785aa9
0a8b6890acc567c4536af126c0c0d1a197b6f62a
/code/model.R
1cec36aa45168f1898683e5da1c16107e17b7365
[]
no_license
xhq318/628module1-bodyfat
eeaa57c1296473e353f4866cbe96a60ad466ae15
0baf4626bdde3e3295b14b94bc8dd2149d895f44
refs/heads/master
2020-04-18T14:04:57.252018
2019-02-07T05:46:40
2019-02-07T05:46:40
167,579,647
0
0
null
null
null
null
UTF-8
R
false
false
1,436
r
model.R
dat=dat[,-2]#remove density #use stepwise method library(leaps) steps <- regsubsets(BODYFAT~., data = dat, nvmax = 4, method = "seqrep") summary(steps) #create accuracy matric model=c() R2=c() #one predictor step1<-lm(BODYFAT~ABDOMEN,data=dat) summary(step1) model <- c(model, "step1") #two predictor step2<-lm(BODYFAT~ABDOMEN+WEIGHT,data=dat) summary(step2) model <- c(model, "step2") #three predictor step3<-lm(BODYFAT~ABDOMEN+WEIGHT+WRIST,data=dat) summary(step3) model <- c(model, "step3") #four predictor step4<-lm(BODYFAT~ABDOMEN+WEIGHT+FOREARM+WRIST,data=dat) summary(step4) model <- c(model, "step4") #test collinearity library(carData) library(car) vif(step2) #ridge regression library(MASS) ridge2=lm.ridge(BODYFAT~ABDOMEN+WEIGHT,data=dat,lambda = seq(0,10,0.1)) select(ridge2) ridge2=lm.ridge(BODYFAT~ABDOMEN+WEIGHT,data=dat,lambda = 0.2) pred.ridge2 <- coef(ridge2)[1] + coef(ridge2)[2]*dat$ABDOMEN + coef(ridge2)[3]*dat$WEIGHT # calculate R squared y=dat$BODYFAT y_predicted=pred.ridge2 sst <- sum((y - mean(y))^2) #Sum of Squares Total sse <- sum((y_predicted - y)^2) #SSE rsq <- 1 - sse / sst # R squared MSE=mean((y_predicted - y)^2) #Diagnostic for ridge Model res=y_predicted - y shapiro.test(res) #final model fit=lm(BODYFAT~ABDOMEN+WEIGHT:WRIST,data=dat) coefficients(fit) WW=dat$WEIGHT*dat$WRIST plot(BODYFAT~WEIGHT,data=dat) plot(BODYFAT~WRIST,data=dat) plot(BODYFAT~WW,data=dat)
dd1842986a3c85a57c6b788b7be0fe74467caa76
6f47a7832483a437b68cd73e025d4a389c1ed2a9
/R/newfunctions-Survival.R
06fb0b32acf2b2604030dcf51502af8032d0ce8a
[]
no_license
cran/partDSA
6ba5af11a537621b9c41a6dd34ecb425a2ff8939
2ccc836d61817c037b175a29565fa2089672b3ca
refs/heads/master
2021-01-22T08:19:35.583443
2017-01-05T09:39:59
2017-01-05T09:39:59
17,698,233
0
0
null
null
null
null
UTF-8
R
false
false
14,132
r
newfunctions-Survival.R
## This is the top level of the survival function which is called. The first if clause is for Brier and it repeats the lower level survival function the number of times ##corresponding to the length of the brier.vec. It then sums the risk over these times. The second if clause is for IPCW and just runs the lower risk function once. survival.overall.risk <- function(bas.fx,y,wt,opts){ if(opts$loss.fx=="Brier"){ ##Defines the function to be called for each cutpoint in Brier.vec get.each.risk <- function(index, bas.fx, y, wt, opts){ ##creates a binary y vector given a specific values of T.star T.star=opts$brier.vec[index] y.Brier=as.numeric(y[,1]>T.star) ##Finds the corresponding weights for this y vector wt.Brier=wt[,index] ##Gets the risk for this particular y, wt, and bas.fx combination get.risk=survival.risk(bas.fx,y.Brier,wt.Brier,opts) return(get.risk) } ##risks is a list with each element corresponding to the risk for each cutpoint in brier.vec risks=lapply(1:length(opts$brier.vec),get.each.risk, bas.fx=bas.fx, y=y, wt=wt, opts=opts) ## get the overall risk by summing risks for each individual brier cutpoint. added in multiplication by IBS.wt get.overall.risk=sum(unlist(risks) * opts$IBS.wt) } else if(opts$loss.fx=="IPCW" ){ ## For IPCW, just called the survival risk function once. get.overall.risk=survival.risk(bas.fx,y[,1],wt,opts) } return(get.overall.risk) } ## Note that y is just the survival time (without the censoring component). For IPCW, this will be a continuous variable and for brier it will be binary ## derived from the continuous variable and the cutpoint. survival.risk<-function(bas.fx,y,wt,opts){ ## probably don't need this "if" statement if (opts$loss.fx=="IPCW"|opts$loss.fx=="Brier"){ ## calculate the average within each basis function vector by first multiplying the weights by the y-values numerats<-y*wt denomins<-wt ## Do some formatting if(!is.matrix(bas.fx)){ if(is.vector(bas.fx)){ bas.fx<-as.matrix(bas.fx,ncol=1) }else bas.fx<-as.matrix(bas.fx,ncol=ncol(bas.fx)) } if (!is.vector(numerats)){ numerats=as.vector(numerats) } if (!is.vector(denomins)){ denomins=as.vector(denomins) } ## Then sum the numerats within each basis function and divide by the total weight in that basis function. betas will be predicted values in each basis function. betas<- apply(bas.fx*numerats,2,sum)/(apply(bas.fx*denomins,2,sum)) ### Added by KL on 10/25/11 to deal with case when all weights in node are 0 if(length(which(is.na(betas)))>0){ toReplace=which(is.na(betas)) for(k in toReplace){ betas[k]=max(y[which(bas.fx[,k]==1)]) } } ## Get predicted value for each observation by multiplying bas.fx by betas. pred.val <- bas.fx %*% betas ##calculate loss by comparing y to predicted value get.loss <- sum(wt * (y - pred.val) ^ 2)/sum(wt) return(get.loss) } } ##### get.at.surv.times #### from AM "get.at.surv.times"<- function(surv.cens, times) { # # surv.cens is an object created by survfit # times is a vector of times at which you want # an estimate of the survival function # nt <- length(times) outs <- rep(0, nt) survv <- surv.cens$surv ns <- length(survv) timev <- surv.cens$time for(i in 1:nt) { if(times[i] < timev[1]) { outs[i] <- 1 } else if(times[i] >= timev[ns]) { outs[i] <- survv[ns] } else { outs[i] <- survv[timev == max(timev[timev <= times[i]]) ][1] } } no <- length(outs[outs == 0]) outs[outs == 0] <- rep(survv[ns - 1], no) return(outs) } #### get.init.am ### from AM "get.init.am"<- function(surv.cens, coeff.cox, w, delta, ttilde,deltaMod=delta){ w <- ifelse(is.na(w),0,w) if(!is.matrix(w)){ w <- matrix(w,nrow=length(w),ncol=1) } nn <- length(ttilde) coeff.cox <- matrix(coeff.cox,nrow=length(coeff.cox),ncol=1) coeff.cox[is.na(coeff.cox)] <- 0 linpred <- w %*%coeff.cox sum.surv.cond <- surv.cens^(exp(linpred)) sum.surv.cond <- ifelse(sum.surv.cond<.2,.2,sum.surv.cond) if(is.na(min(sum.surv.cond))){ if(delta[is.na(sum.surv.cond)]==0){ sum.surv.cond[is.na(sum.surv.cond)] <- 1 } } ##### For IPCW, deltaMod is just delta#### B <- (deltaMod)/sum.surv.cond return(B) } ################## Assign Survival Weights ################################################################### ## Note x is only passed in if we are doing IPCW using the cox method to calculate the weights. assign.surv.wts <- function (x,y,opts,T.star=T.star, cox.vec=cox.vec){ ##note: default for IPCW in opts will be cox cens=y[,2] censC=1-cens Time=y[,1] #case 1 with loss function as IPCW if (opts$loss.fx=="IPCW") { ## The method for calculating the weights for IPCW can be cox or KM, first option is cox if(opts$wt.method=="Cox"){ ## if condition means that at least some patients are censored if(sum(censC)>0){ mm <- model.matrix(~.,data.frame(x[,cox.vec])) keep.for.cox <- apply(rbind(apply(data.frame(x[,cox.vec]),1,is.na)),2,sum) Time.cox<- Time[keep.for.cox==0] censC.cox <- censC[keep.for.cox==0] cens.cox <- cens[keep.for.cox==0] cov.tr <- as.matrix(mm[,-1]) surv.cox <- coxph(Surv(Time.cox, censC.cox) ~ cov.tr) # Get coefficients from cox model coeff.cox <- surv.cox$coefficients # Get baseline survival # AMM changed 10/15/11 - DUe to change Therneau made in survfit for baseline for mean instead of 0 #surv.cens <- survfit(surv.cox, newdata = data.frame(cov.tr = 0),type = "kaplan-meier") surv.cens <- survfit(surv.cox, type = "kaplan-meier") # Vector of baseline survival basesurv <- get.at.surv.times(surv.cens, Time.cox) # UG(D) for each subject ic0 <- get.init.am(basesurv, coeff.cox, cov.tr, cens.cox, Time.cox) ## what to do with missing data in \bar{G} now give them a one or 0 cox.wt1 <- rep(NA,length(keep.for.cox)) cox.wt1[keep.for.cox==0] <- ic0 cox.wt1[keep.for.cox==1] <- cens[keep.for.cox==1] } else cox.wt1<-cens return(as.vector(cox.wt1)) }else if(opts$wt.method=="KM"){ #case 2 where loss function is IPCW and wt.method is KM #create kaplan meier model KMfit=survfit(Surv(Time,censC)~1) #calculate weights by finding the "survival" probability corresponding to the given Brier time G=NULL for (i in 1:length(Time)){ # FIXME: floating point equality is failing, causing exception #G[i]<-KMfit$surv[which(KMfit$time==Time[i])] G[i]<-KMfit$surv[which.min(abs(KMfit$time - Time[i]))] # XXX must be better solution if(G[i]<.2){G[i]=.2} } #the actual weight is 1/G KM.wt1<-1/G #set those people censored to have weight 0 KM.wt1[which(cens==0)] <- 0 return (KM.wt1) } }else if(opts$loss.fx=="Brier"){ #now with loss function as brier, we use kaplan meier or cox to calculate weights # set T.star to be the next smallest Time value T.star.closest=Time[(Time-T.star)>=0][which.min((Time-T.star)[(Time-T.star)>=0])] #For people with event time after T.star, set their brier time equal to T* in order to find the weight according to Graf equation Time_Brier=ifelse(Time>T.star,T.star.closest,Time) if (opts$wt.method=="Cox"){ Time_Brier=ifelse(Time>T.star,T.star,Time) ## if condition means that at least some patients are censored if(sum(censC)>0){ mm <- model.matrix(~.,data.frame(x[,cox.vec])) keep.for.cox <- apply(rbind(apply(data.frame(x[,cox.vec]),1,is.na)),2,sum) Time.cox<- Time_Brier[keep.for.cox==0] censC.cox <- censC[keep.for.cox==0] cens.cox <- cens[keep.for.cox==0] cov.tr <- as.matrix(mm[,-1]) surv.cox <- coxph(Surv(Time.cox, censC.cox) ~ cov.tr) # Get coefficients from cox model coeff.cox <- surv.cox$coefficients # Get baseline survival # AMM changed 10/15/11 - DUe to change Therneau made in survfit for baseline for mean instead of 0 #surv.cens <- survfit(surv.cox, newdata = data.frame(cov.tr = 0),type = "kaplan-meier") surv.cens <- survfit(surv.cox, type = "kaplan-meier") # Vector of baseline survival basesurv <- get.at.surv.times(surv.cens, Time.cox) # UG(D) for each subject ic0 <- get.init.am(basesurv, coeff.cox, cov.tr, cens.cox, Time_Brier,deltaMod=rep(1,length(cens.cox))) ## what to do with missing data in \bar{G} now give them a one or 0 cox.wt1 <- rep(NA,length(keep.for.cox)) cox.wt1[keep.for.cox==0] <- ic0 cox.wt1[keep.for.cox==1] <- cens[keep.for.cox==1] cox.wt1[which(Time<=T.star& cens==0)] <- 0 }else { cox.wt1<-cens } return(as.vector(cox.wt1)) }else if(opts$wt.method=="KM"){ #create kaplan meier model KMfit=survfit(Surv(Time,censC)~1) #calculate weights by finding the "survival" probability corresponding to the given Brier time G=NULL for (i in 1:length(Time)){ # FIXME: floating point equality is failing, causing exception #G[i]<-KMfit$surv[which(KMfit$time==Time_Brier[i])] G[i]<-KMfit$surv[which.min(abs(KMfit$time - Time_Brier[i]))] # XXX must be better solution if(G[i]<.2){G[i]=.2} } #the actual weight is 1/G KM.wt1<-1/G #set those people censored before T.star to have weight 0 KM.wt1[which(Time<=T.star& cens==0)] <- 0 return (KM.wt1) } } } ### We will only go into this function for the brier loss function. We will reassign the coefficients in the model constructed on the ### training set by recalculating the predicted values based on the binary y and the weight for a given time point. calculate.brier.risk <- function(model, x, y, wt, x.test, y.test, wt.test, opts){ test.set.risk.DSA=array(0,length(model$coefficients)) #brier.vec is a vector containing the times we are looking at for brier brier.vec=opts$brier.vec for (i in 1:length(brier.vec)){ T.star=brier.vec[i] #create binary versions of y based on this particular T.star value y.Brier=as.numeric(y[,1]>T.star) y.test.Brier=as.numeric(y.test[,1]>T.star) #find weights using this particular T.star value wt.Brier=wt[,i] wt.test.Brier=wt.test[,i] #using these weights and y-values, modify the field ty$coefficients to represent the new predicted values for each node fun <- function(k) get.coefs(k, dat=x, y=y.Brier, wt=wt.Brier)$coef get.coef.from.training <- lapply(model$IkPn, fun) model$coefficients <- get.coef.from.training #use this updated ty, to predict values based on the test set pred.test.DSA <- predict.dsa(model,x.test) #calculate the loss and make cv.risk the sum over all values in brier.vec tmp <- as.vector(wt.test.Brier )* (as.vector(y.test.Brier) - pred.test.DSA) ^ 2 test.set.risk.DSA <- test.set.risk.DSA + opts$IBS.wt[i] * apply(tmp, 2, sum) / sum(wt.test.Brier) } return (test.set.risk.DSA) }
8832253ba9ca92169c1fa82ef2404539db16674f
ec9ad043b7eb8c868e972fc21011b89e32a0433e
/man/fars_read.Rd
9974f94504dfc161cc14848365135bbe983a7ebf
[]
no_license
olb1605/FARS_Package_Coursera
ef65a1063435af897926a4780b2518ef1409481b
f16459798041f02cef306f24d67bf77763a29310
refs/heads/master
2020-03-14T03:59:55.807754
2018-04-28T21:13:33
2018-04-28T21:13:33
131,431,675
0
0
null
null
null
null
UTF-8
R
false
true
1,341
rd
fars_read.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars_functions.R \name{fars_read} \alias{fars_read} \title{FARS functions These functions provided are using data from the US National Highway Traffic Safety Administration's Fatality Analysis Reporting System (FARS), which is a nationwide census providing the American public yearly data regarding fatal injuries suffered in motor vehicle traffic crashes. Function "fars_read" - Open CSV file} \usage{ fars_read(filename) } \arguments{ \item{filename}{A character string giving the file name with its location. This parameter generated by fars_map_state.} } \value{ This function returns a data frame that containes data from CSV file. If file does not exist it returns an error message. } \description{ This function reads CSV file and opens it in a data frame form. } \details{ This function opens CSV file from FARS. If the file with the specified name does not exist the function gives an error message"file....does not exist". Function suppress any warning messages that can be generated by read_csv function used to open CSV file. It contributes to the function fars_read_years. } \examples{ \dontrun{ library(dplyr) library(readr) library(magrittr) library(tidyr) library(dplyr) library(maps) library(graphics) fars_read("accident_2015.csv.bz2")} }
7244facc3899312413b6aad91a1d9e34c003366b
7c0ce79410cb02e36fc524346a3c316c2108376b
/tests/testthat/test-save-configuration.R
da29389040c9f81a9bcb921412e2b43d660e6fd2
[]
no_license
bheavner/minidtools
471ba86953ec06e9467298cf6c3d17f3262d3e97
7be9895f40c93b318f169c3cfc00cd354477ccd0
refs/heads/master
2022-11-27T05:59:11.045330
2020-07-29T14:46:37
2020-07-29T14:46:37
61,151,866
0
0
null
null
null
null
UTF-8
R
false
false
1,964
r
test-save-configuration.R
context("test_save_configuration - unit tests") test_that("save_configuration() writes expected JSON", { config <- configuration( server = "http://minid.bd2k.org/minid", user = "Jane Smith", email = "jsmith@example.com", orcid = "0000-0003-2898-9044", code = "XXXX-XXX-XXXXX-XXXX-XXXX" ) save_configuration(config, config_path = "./test_save.json", overwrite = TRUE) target_json <- paste0('{"default":{"minid_server":["http://minid.', 'bd2k.org/minid"],"user":["Jane Smith"],"email":', '["jsmith@example.com"],"orcid":', '["0000-0003-2898-9044"],', '"code":["XXXX-XXX-XXXXX-XXXX-XXXX"]}}\n') expect_identical(readr::read_file("./test_save.json"), target_json) }) test_that("save_configuration() writes expected python config", { config <- configuration( server = "http://minid.bd2k.org/minid", #nolint user = "Jane Smith", email = "jsmith@example.com", orcid = "0000-0003-2898-9044", code = "XXXX-XXX-XXXXX-XXXX-XXXX" ) save_configuration(config, config_path = "./test_save.cfg", python_style = TRUE, overwrite = TRUE) target_cfg <- paste0("[general]\nminid_server: http://minid.bd2k.org/", "minid\nuser: Jane Smith\nemail: jsmith@", "example.com\norcid: 0000-0003-2898-9044\n", "code: XXXX-XXX-XXXXX-XXXX-XXXX\n") expect_identical(readr::read_file("./test_save.cfg"), target_cfg) }) test_that("save_configuration() gives error on attempted overwrite", { config <- configuration( server = "http://minid.bd2k.org/minid", #nolint user = "Jane Smith", email = "jsmith@example.com", orcid = "0000-0003-2898-9044", code = "XXXX-XXX-XXXXX-XXXX-XXXX" ) expect_error(save_configuration(config, config_path = "./test_save.cfg", python_style = TRUE, overwrite = FALSE)) })
cbb3431f38ded2dfaf3d7be92035d0d6f375b6f7
f1de7628bebc854846128bddddbeccee79f65102
/plot1.R
eab3cb1504cc5ddd4fa356d55a93c70276bf2dba
[]
no_license
nilomartin/ExploratoryAnalisysAssignement1
e83c007829237f55df19b44b59bb6a5467a6f9c0
bcfc1a87c7a68637cbe90a8bc10e8efe28db25cc
refs/heads/master
2023-03-19T13:02:58.251614
2021-03-08T17:35:06
2021-03-08T17:35:06
345,739,302
0
0
null
null
null
null
UTF-8
R
false
false
616
r
plot1.R
#using sqldf package to subset while reading the file library(sqldf) data<-read.csv.sql("household_power_consumption.txt", header = T, sep = ";", sql = "select * from file where Date == '1/2/2007' OR Date =='2/2/2007'") #converting Date data to date class data$Date<-as.Date(data$Date, "%d/%m/%Y") #converting time data to date class with chron package library(chron) data$Time<-times(data$Time) #creating the plot into a png file png("plot1.png", width=480, height=480) hist(data$Global_active_power, col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)") dev.off()
cef609b850c55876b92272d129188cf4ca541e59
ba702a1551dd6c01e19b6d7aaec19c2130ae66d8
/R/expression/DE/preprocess-GSE76097.R
51f5873e0dc0a28462d640b9aad58afb07c91aa5
[ "MIT" ]
permissive
skinnider/spinal-cord-injury-elife-2018
a6d5d971a1eef566a68283d2b3d8feaebe9b73a6
a8e26008d1bc34ff98a88cd42ee32ebf5d467252
refs/heads/master
2020-03-25T12:06:23.001331
2018-09-09T19:33:39
2018-09-09T19:33:39
143,760,533
1
3
null
null
null
null
UTF-8
R
false
false
3,501
r
preprocess-GSE76097.R
# Preprocess the Anderson et al. RNA-seq dataset. setwd("~/git/spinal-cord-injury-elife-2018") options(stringsAsFactors = F) library(tidyverse) library(magrittr) library(preprocessCore) library(org.Mm.eg.db) source("R/expression/DE/DE-functions.R") # set expt expt = "GSE76097" # read SOFT file lines = readLines("data/expression/geo/GSE76097/GSE76097_family.soft.gz") # create design samples = lines[startsWith(lines, "^SAMPLE")] %>% gsub("^.*= ", "", .) names = lines[startsWith(lines, "!Sample_title")] %>% gsub("^.*= ", "", .) genotype = lines[startsWith( lines, "!Sample_characteristics_ch1 = genotype")] %>% gsub("^.*: ", "", .) condition = lines[startsWith( lines, "!Sample_characteristics_ch1 = condition")] %>% gsub("^.*: ", "", .) # adjust genotype genotype = ifelse(genotype == "Wild type", "WT", "STAT3") # create targets targets = data.frame(sample = samples, name = names, genotype = genotype, condition = condition) # keep only flow through fraction targets = targets %>% filter(grepl("FLOW", name)) # read gene count matrix, aggregate ensembl, reshape expr = read.csv( "data/expression/geo/GSE76097/GSE76097_Complete_geneList_flow_only.csv.gz") expr = expr %>% dplyr::select(X, ends_with("raw_count")) # map probe IDs to Ensembl map = select(org.Mm.eg.db, expr$X, keytype = "SYMBOL", columns = c("SYMBOL", "ENSEMBL")) # get only first match for duplicates map = map[match(unique(map$ENSEMBL), map$ENSEMBL), ] # collapse probesets to Ensembl IDs by median ensembl.list = map$ENSEMBL[match(expr[, 1], map$SYMBOL)] expr = aggregate(expr[, -1], by = list(ensembl.list), FUN = median) rownames(expr) = expr[, 1] expr = expr[, -1] # filter genes expressed only in a single sample keep = rowSums(expr > 0) > 1 expr = expr[keep, ] # convert to matrix for processing mat = as.matrix(expr) # quantile normalize to average empirical distribution across all samples mat = normalize.quantiles(mat) # quantile normalize each gene to standard normal distribution quantNorm = function(x) qnorm( rank(x, ties.method = "average") / (length(x) + 1)) mat.n = apply(mat, 2, quantNorm) # convert back to data frame to set dimension names exprEnsembl = as.data.frame(mat.n) exprEnsembl = cbind(rownames(expr), exprEnsembl) colnames(exprEnsembl) = c("Gene", colnames(expr)) # load GTEx modules and seed proteins modules = read.delim("data/modules/GTEx-modules.tsv") # map to human consensus orthologs orthologs = read.delim("data/orthologs/biomart-ensembl-orthologs-mouse.tsv") orthologs = orthologs[orthologs[, 2] != "", ] # remove unmapped orthologs # pick a single ortholog when multiple match orthologs = pick.orthologs(orthologs, modules$gene) exprEnsembl[, 1] = orthologs[match(exprEnsembl[, 1], orthologs[, 1]), 2] exprEnsembl = exprEnsembl[!is.na(exprEnsembl[, 1]),] # remove NA exprEnsembl = exprEnsembl[which(exprEnsembl[, 1] != ""),] # remove blank # aggregate again, if needed exprEnsembl = aggregate(exprEnsembl[, -1], by = list(exprEnsembl[, 1]), FUN = median) # re-format as matrix geneNames = exprEnsembl[, 1] exprEnsembl = exprEnsembl[, -1] rownames(exprEnsembl) = geneNames colnames(exprEnsembl) = targets$sample exprEnsembl = as.matrix(exprEnsembl) # save the matrix save(exprEnsembl, file = paste0( "data/expression/geo/GSE76097/", expt, "-human.Rdata")) # save targets write.table(targets, paste0("data/expression/geo/GSE76097/", expt, "-targets.txt"), quote = F, row.names = F, sep = "\t")
ec530aab54f71c80f4e8716346076c34e9a21211
f9963618c8d61c7ea5ab1d7ea6c201975f04fe4e
/R/expressedGenes.R
e31f0363b44be92e3002efe19790ea0699186020
[]
no_license
Kraus-Lab/groHMM
7f31d0f0ac026e99dd7c8eaab74bb5b5411973c3
514f58a04befc2d3c9c8e10ce63ce30f5f05560e
refs/heads/master
2022-10-19T16:00:39.726593
2022-09-28T20:51:02
2022-09-28T20:51:02
47,837,375
1
1
null
2015-12-11T16:23:24
2015-12-11T16:23:24
null
UTF-8
R
false
false
7,910
r
expressedGenes.R
########################################################################### ## ## Copyright 2013, 2014 Charles Danko and Minho Chae. ## ## This program is part of the groHMM R package ## ## groHMM is free software: you can redistribute it and/or modify it ## under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ## for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. If not, see <http://www.gnu.org/licenses/>. ## ########################################################################## #' Function identifies expressed features using the methods introduced in #' Core, Waterfall, Lis; Science, Dec. 2008. #' #' Supports parallel processing using mclapply in the 'parallel' package. #' To change the number of processors use the argument 'mc.cores'. #' #' @param features A GRanges object representing a set of genomic coordinates. #' The meta-plot will be centered on the start position. #' There can be optional "ID" column for gene ids. #' @param reads A GRanges object representing a set of mapped reads. #' @param Lambda Measurement of assay noise. Default: 0.04 reads/ kb in a #' library of 10,751,533 mapped reads. (background computed in Core, #' Waterfall, Lis. (2008) Science.). #' @param UnMap List object representing the position of un-mappable reads. #' Default: not used. #' @param debug If set to true, returns the number of positions. #' Default: FALSE. #' @param ... Extra argument passed to mclapply #' @return Returns a data.frame representing the expression p.values for #' features of interest. #' @author Charles G. Danko ## This identifes genes that are expressed in a given cell, based on short ## read data. ## f == genes/annotations; columns represent: Chr, Start, End, Strand, ID. ## p == short reads; columns represent: Chr, Start, End, Strand, ID. ## UnMAQ == unmappable regions in the genome of interest. ## ## Function defines expression as in Core, Waterfall, Lis; Science, Dec. 2008. expressedGenes <- function(features, reads, Lambda= NULL, UnMap=NULL, debug=FALSE, ...) { # Order -- Make sure, b/c this is one of our main assumptions. Otherwise # violated for DBTSS. reads <- reads[order(as.character(seqnames(reads)), start(reads)),] C <- sort(unique(as.character(seqnames(features)))) if(is.null(Lambda)) Lambda <- 0.04*NROW(reads)/10751533/1000 #NROW(reads)/genomeSize ## Run parallel version. mcp <- mclapply(seq_along(C), expressedGenes_foreachChrom, C=C, features=features, reads=reads, Lambda=Lambda, UnMap=UnMap, debug=debug, ...) ## Unlist... ANSgeneid <- rep("char", NROW(features)) ANSpvalue <- rep(0,NROW(features)) ANScounts <- rep(0,NROW(features)) ANSunmapp <- rep(0,NROW(features)) ANSgsize <- rep(0,NROW(features)) for(i in seq_along(C)) { indxF <- which(as.character(seqnames(features)) == C[i]) indxPrb <- which(as.character(seqnames(reads)) == C[i]) if((NROW(indxF) >0) & (NROW(indxPrb) >0)) { ANSgeneid[indxF][mcp[[i]][["ord"]]] <- mcp[[i]][["ANSgeneid"]] ANSpvalue[indxF][mcp[[i]][["ord"]]] <- mcp[[i]][["ANSpvalue"]] ANScounts[indxF][mcp[[i]][["ord"]]] <- mcp[[i]][["ANScounts"]] ANSunmapp[indxF][mcp[[i]][["ord"]]] <- mcp[[i]][["ANSunmapp"]] ANSgsize[indxF][mcp[[i]][["ord"]]] <- mcp[[i]][["ANSgsize"]] } } return(data.frame(ID= ANSgeneid, pval= ANSpvalue, readCounts= ANScounts, nonMappablePositions= ANSunmapp, size= ANSgsize)) } expressedGenes_foreachChrom <- function(i, C, features, reads, Lambda, UnMap, debug) { if(debug) { message("Doing chromosome ", C[i]) } # Which KG? prb? indxF <- which(as.character(seqnames(features)) == C[i]) indxPrb <- which(as.character(seqnames(reads)) == C[i]) if((NROW(indxF) >0) & (NROW(indxPrb) >0)) { # Order -- Make sure, b/c this is one of our main assumptions. # Otherwise violated for DBTSS. Ford <- order(start(features[indxF,])) # Type coersions. FeatureStart <- start(features[indxF,][Ford]) FeatureEnd <- end(features[indxF,][Ford]) FeatureStr <- as.character(strand(features[indxF,][Ford])) PROBEStart <- start(reads[indxPrb,]) PROBEEnd <- end(reads[indxPrb,]) PROBEStr <- as.character(strand(reads[indxPrb,])) # Set dimensions. dim(FeatureStart) <- c(NROW(FeatureStart), NCOL(FeatureStart)) dim(FeatureEnd) <- c(NROW(FeatureEnd), NCOL(FeatureEnd)) dim(FeatureStr) <- c(NROW(FeatureStr), NCOL(FeatureStr)) dim(PROBEStart) <- c(NROW(PROBEStart), NCOL(PROBEStart)) dim(PROBEEnd) <- c(NROW(PROBEEnd), NCOL(PROBEEnd)) dim(PROBEStr) <- c(NROW(PROBEStr), NCOL(PROBEStr)) NUMReads <- .Call("CountReadsInFeatures", FeatureStart, FeatureEnd, FeatureStr, PROBEStart, PROBEEnd, PROBEStr, PACKAGE = "groHMM") ## Calculate UN-MAQable regions... if(!is.null(UnMap)) { ## Count start index. chr_indx <- which(UnMap[[1]][[1]] == C[i]) CHRSIZE <- as.integer(UnMap[[1]][[2]][chr_indx]) CHRSTART <- as.integer(0) if(chr_indx > 1) { ## Running on 1:0 gives c(1, 0) CHRSTART <- as.integer( sum(UnMap[[1]][[2]][ c(1:(chr_indx-1)) ]) +1) } if(debug) { message(C[i],": Counting unMAQable regions.") message("CHRSIZE:", CHRSIZE, "CHRSTART:", CHRSTART) } ## Count unMAQable regions, and size of everything ... nonmappable <- .Call("CountUnMAQableReads", FeatureStart, FeatureEnd, UnMap[[2]], CHRSTART, CHRSIZE, PACKAGE = "groHMM") ## Adjust size of gene body. MappablePositions <- (FeatureEnd - FeatureStart) - nonmappable + 1 if(debug) { print(summary(nonmappable)) print(summary(MappablePositions)) } } else { nonmappable <- 1 } MappablePositions <- (FeatureEnd - FeatureStart) - nonmappable + 1 ## Calculate poisson prob. of each. if ("ID" %in% colnames(mcols(features))) { # there is "ID" column ANSgeneid_c <- elementMetadata(features[indxF,])$ID } else { ANSgeneid_c <- rep(NA, NROW(indxF)) } ANSpvalue_c <- ppois(NUMReads, (Lambda*MappablePositions), lower.tail=FALSE) ANScounts_c <- NUMReads ANSunmapp_c <- nonmappable ANSgsize_c <- (FeatureEnd-FeatureStart) #[indxF][Ford] return(list(ANSgeneid= ANSgeneid_c, ANSpvalue= ANSpvalue_c, ANScounts= ANScounts_c, ANSunmapp= ANSunmapp_c, ANSgsize= ANSgsize_c, ord= Ford)) } return(integer(0)) }
e1f3577609e6b6fc1c3066a6fcbc47bbf8bcb9b4
565132aa83ca2a741b21c7fde1f00e1ab9ef1cd2
/BMA_development/SF_mimic/indicator_bma/run_sims.r
7a7d09d50c8c05ce9b072343e3e8beb3a28da981
[]
no_license
JHHatfield/BRCindicators
e1c61744887b01a567982d2d47fe9e42038949b2
fb80e7aa0b31025e4f0d1eac2f57993eeaa0380b
refs/heads/master
2022-02-21T22:23:52.130275
2019-10-16T14:51:35
2019-10-16T14:51:35
null
0
0
null
null
null
null
UTF-8
R
false
false
2,167
r
run_sims.r
######################################### download and install the package library(devtools) install_github(repo = 'drnickisaac/BRCindicators') library(BRCindicators) # load the help file vignette("BRCindicators") ######################################## simulate some data data_in <- simulate_indicator(nsp=25, nyears=40, sigma=0.18, mg= -0.01, sdg=0.005) # NB this is the dataset I generated in January. # It has more species that Steve's original. # Also, the growth rate varies more among species than Steve's # both parameters were chosen to resemble the UKBMS # plot the simulated data library(ggplot2) qplot(data=data_in, y=index, x=year) + facet_wrap(~species) # Now it is in the right format head(data_in) # .. but we need an SE column, which the simulations do not contain # This does not work with NAs, so set them all to the same alue #data_in$se <- 0.125 # if all have the same value then stoch_smooth2 does not smooth data_in$se <- runif(n=nrow(data_in), min=0.01, max=2) ######################################## simple growth-rate model bma_indicator_FNgr <- bma(data_in, model = 'FNgr2', m.scale = "log10", parallel=TRUE, n.iter = 5000) plot_indicator(indicator = bma_indicator_FNgr[,'Index'], CIs = bma_indicator_FNgr[,c(3,4)], main = 'Simulation - FNgr2') ######################################## deterministic smoother bma_indicator_smooth_det <- bma(data_in, model = 'smooth_det2', m.scale = "log10", parallel=TRUE, n.iter = 5000) plot_indicator(indicator = bma_indicator_smooth_det[,'Index'], CIs = bma_indicator_smooth_det[,c(3,4)], main = 'Simulation - smooth_det2') ######################################## digging deeper # look at the R code that does the work bma # look at the model object str(bma_indicator_smooth_det) # get the bugs model output bugs_model <- attr(bma_indicator_smooth_det, 'model') # find out where the bugs code is stored bugs_model$modfile # print out the bugs code to the screen read.delim(bugs_model$modfile)
1f1984215dca34649f6f0aa683bcf24ece735244
d7400f2feeb54bc3a7bf1048d7040c1f29c55f54
/man/orderedLasso.cv.Rd
0fcf16753674de07bd2f394587a4ad0d152f7e78
[]
no_license
cran/orderedLasso
30e9238aa217024635982aace7da38969be1b2b6
6e3d1b2291e543aedbfbf91c6b3ec9a4fd8c47ba
refs/heads/master
2021-06-03T05:32:39.972806
2019-01-07T11:21:23
2019-01-07T11:21:23
null
0
0
null
null
null
null
UTF-8
R
false
false
2,226
rd
orderedLasso.cv.Rd
\name{orderedLasso.cv} \alias{orderedLasso.cv} \title{Cross-validation function for the ordered lasso} \usage{ orderedLasso.cv(x, y, lamlist = NULL, minlam = NULL, maxlam = NULL, nlam = 50, flmin = 5e-04, strongly.ordered = FALSE, intercept = TRUE, standardize = TRUE, nfolds = 10, folds = NULL, niter = 500, iter.gg = 100, method = c("Solve.QP", "GG"), trace = FALSE, epsilon = 1e-05) } \arguments{ \item{x}{A matrix of predictors, where the rows are the samples and the columns are the predictors} \item{y}{A vector of observations, where length(y) equals nrow(x)} \item{lamlist}{Optional vector of values of lambda (the regularization parameter)} \item{minlam}{Optional minimum value for lambda} \item{maxlam}{Optional maximum value for lambda} \item{nlam}{Number of values of lambda to be tried. Default nlam = 50.} \item{flmin}{Fraction of maxlam minlam= flmin*maxlam. If computation is slow, try increasing flmin to focus on the sparser part of the path. Default flmin = 1e-4.} \item{strongly.ordered}{An option which allows users to order the coefficients in absolute value.} \item{intercept}{True if there is an intercept in the model.} \item{standardize}{Standardize the data matrix.} \item{nfolds}{Number of cross-validation folds.} \item{folds}{(Optional) user-supplied cross-validation folds. If provided, nfolds is ignored.} \item{niter}{Number of iterations the ordered lasso takes to converge. Default nither = 500.} \item{iter.gg}{Number of iterations of generalized gradient method; default 100} \item{method}{Two options available, Solve.QP and Generalized Gradient. Details of two options can be seen in the orderedLasso description.} \item{trace}{Output option; trace=TRUE gives verbose output} \item{epsilon}{Error tolerance parameter for convergence criterion; default 1e-5} } \description{ Uses cross-validation to estimate the regularization parameter for the ordered lasso model. } \examples{ set.seed(3) n = 50 b = c(4,3,1,0) p = length(b) x = matrix(rnorm(n*p),nrow = n) sigma = 5 y = x \%*\% b + sigma * rnorm(n, 0, 1) cvmodel = orderedLasso.cv(x,y, intercept = FALSE, trace = TRUE, method = "Solve.QP", strongly.ordered = TRUE) print(cvmodel) plot(cvmodel) }
b8fa7192057fd4272f2034e956a110ed903e64b5
b8a19cc9c443d367da8ce10c435a8c7d9bbffa9b
/tests/testthat/test-convert_dates.R
c412ab8080fa453299334393ee74f0c0521d8fad
[ "MIT" ]
permissive
g3rley/healthcareai-r
9b0a68cc5406f2af2a85dc5318769a94075787a6
9b00c1be1daaa5c387ecee910d7571684447e4ff
refs/heads/main
2023-07-21T19:08:15.174485
2022-09-01T17:16:55
2022-09-01T17:16:55
451,652,775
1
0
NOASSERTION
2022-01-24T22:20:57
2022-01-24T22:20:57
null
UTF-8
R
false
false
3,709
r
test-convert_dates.R
context("Testing convert_date_cols") # Setup ------------------------------------------------------------------------ d <- data.frame( a_DTS = c("2018-3-25", "2018-3-26"), b_DTS = c("2018-03-25", "2018-03-26"), c_DTS = c("3-25-2018", "3-26-2018"), d_DTS = c("03-25-2018", "03-26-2018"), e_DTS = c("Mar 25 2018", "Mar 26 2018"), f_DTS = c("March 25th, 2018", "March 26th, 2018"), stringsAsFactors = FALSE ) dt <- data.frame( aa_DTS = c("2018-3-25 22:30:00", "2018-3-26 22:30:00"), bb_DTS = c("2018-03-25 22:30:00", "2018-03-26 22:30:00"), cc_DTS = c("3-25-2018 22:30:00", "3-26-2018 22:30:00"), dd_DTS = c("03-25-2018 22:30:00", "03-26-2018 22:30:00"), ee_DTS = c("Mar 25 2018 22:30:00", "Mar 26 2018 22:30:00"), ff_DTS = c("March 25th, 2018 22:30:00", "March 26th, 2018 22:30:00"), stringsAsFactors = FALSE ) d_mixed <- data.frame( z_DTS = c("2018-3-5", "2018-3-26", "2019-6-5"), b_nums = c(2, 4, 6), c_DTS = c("03-05-2018", "03-26-2018", "06-05-2019"), d_chars = c("a", "b", "d"), e_date = lubridate::mdy(c("3-05-2018", "3-26-2018", "06-05-2019")), f_DTS = c("2000-1-1 13:30:05", "2000-1-10 1:50:10", "2001-5-10 12:50:10"), stringsAsFactors = FALSE ) # Tests ------------------------------------------------------------------------ test_that("convert dates finds bad DTS columns", { d <- dplyr::bind_cols(d, not_DTS = c("string cheese", "string cheese"), typo_DTS = c("Marches 25th, 2018 22:30:00", "Marches 25th, 2018 22:30:00")) expect_error(convert_date_cols(d), "not_DTS") }) test_that("convert dates finds bad DTS columns in tibble", { d <- dplyr::bind_cols(tibble::as_tibble(d), not_DTS = c("string cheese", "string cheese"), typo_DTS = c("Marches 25th, 2018 22:30:00", "Marches 26th, 2018 22:30:00")) expect_error(convert_date_cols(d), "not_DTS") }) test_that("all common formats are converted", { out <- convert_date_cols(d) expect_true(all(purrr::map_lgl(out, is.Date))) expect_equal(dim(out), c(2, 6)) out <- convert_date_cols(dt) expect_true(all(purrr::map_lgl(out, is.POSIXt))) expect_equal(dim(out), c(2, 6)) }) test_that("all common formats are converted in tibble", { out <- convert_date_cols(tibble::as_tibble(d)) expect_true(all(purrr::map_lgl(out, is.Date))) expect_equal(dim(out), c(2, 6)) out <- convert_date_cols(tibble::as_tibble(dt)) expect_true(all(purrr::map_lgl(out, is.POSIXt))) expect_equal(dim(out), c(2, 6)) }) test_that("Mixed data frame converts all date columns", { out <- convert_date_cols(d_mixed) # check type on all but date time column expect_equal(as.character(purrr::map_chr(select(out, -f_DTS), class)), c("Date", "numeric", "Date", "character", "Date")) # check type on date time column expect_equal(as.character(class(out$f_DTS)), c("POSIXct", "POSIXt")) # check converted contents expect_equal(as.character(out$z_DTS), c("2018-03-05", "2018-03-26", "2019-06-05")) expect_equal(as.character(out$c_DTS), c("2018-03-05", "2018-03-26", "2019-06-05")) expect_equal(as.character(out$e_date), c("2018-03-05", "2018-03-26", "2019-06-05")) expect_equal(as.character(out$f_DTS), c("2000-01-01 13:30:05", "2000-01-10 01:50:10", "2001-05-10 12:50:10")) }) test_that("Mixed tibble converts all date columns", { out <- convert_date_cols(tibble::as_tibble(d_mixed)) expect_equal(as.character(purrr::map_chr(select(out, -f_DTS), class)), c("Date", "numeric", "Date", "character", "Date")) expect_equal(as.character(class(out$f_DTS)), c("POSIXct", "POSIXt")) })
5f77cd8dcf4c5149de92a627b66fc151f0317336
a16f28d10e26710c831a6f66902abd985c51c096
/analysis_Step7.R
290540b66b0a0c2299176d85af3f409df1bef6e9
[]
no_license
sandyschumann/cogflexreplication
2e9dbf81aae7fcb935f7d7aeee78aeb1f8e74d1e
1b06dda8c3cc3f8a818296c208cffe4dc528a8c2
refs/heads/main
2023-05-31T03:11:07.058933
2021-06-24T08:31:27
2021-06-24T08:31:27
333,808,201
0
0
null
null
null
null
UTF-8
R
false
false
827
r
analysis_Step7.R
###Goals: Complete additional analyses to examine scale properties of willingness to fight and die ###Requirements install.packages("lavaan") library(lavaan) ###Confirmatory factor analysis Model 1: one latent factor CFA_model1<- 'progroup =~ killandfight_1+killandfight_2+killandfight_3+killandfight_4+killandfight_5+die' CFA_model1fit <- cfa(CFA_model1, data=fulldataset, estimator = "MLM") summary(CFA_model1fit, fit.measures=TRUE) ###Confirmatory factor analysis Model 2: two correlated factors CFA_model2<- '##two latent factors willingness to fight and willingness to die progroup2 =~ killandfight_1+killandfight_2+killandfight_3+killandfight_4+killandfight_5 die_l=~ NA*die' CFA_model2fit <- cfa(CFA_model2, data=fulldataset, estimator = "MLM") summary(CFA_model2fit, fit.measures=TRUE)
67c2998ef3dcaac05bb2c036093df19854e7d4e4
e105a5cc2ab6cdb71c788e5957ff2e8cbf27dfc7
/scripts/1.Pre-processing/A. Predictors/remove_extra_lines_from_dbf.R
bed05d15396726f7693199a4988ea9069ee01708
[]
no_license
mrc-ide/DENV_risk_maps
214c4c199457f4173dccd9b0e7524fd879faf3c9
32a1ca0669757d3d015dbfa02798aac27500d0e1
refs/heads/master
2022-03-30T01:50:45.494730
2020-01-21T09:09:02
2020-01-21T09:09:02
120,633,767
0
0
null
null
null
null
UTF-8
R
false
false
2,449
r
remove_extra_lines_from_dbf.R
rm(list=ls()) # load R packages library(maptools) # load data all_countries_in_world_shp_admin_1 <- read.csv (file.path("output", "datasets", "all_adm_1_countries.csv"), header = TRUE, sep = ",", stringsAsFactors = FALSE) all_countries_in_world_shp_admin_2 <- read.csv (file.path("output", "datasets", "all_adm_2_countries.csv"), header = TRUE, sep = ",", stringsAsFactors = FALSE) my_fun <- function(x) {grep("\r\n", x)} ### Define parameters Dataset <- all_countries_in_world_shp_admin_2 adm_code <- 2 shp_folder <- "shapefiles" REPAIR <- FALSE ### output <- NULL for (j in 1:nrow(Dataset)) { country_name <- Dataset[j, "country"] cat("country name = ", country_name, "\n") country_code <- Dataset[j, "country_code"] cat("country code = ", country_code, "\n") file_ex <- file.exists(file.path("data", shp_folder, paste(country_code, "adm_shp", sep="_"), paste0(country_code, "_adm", adm_code, ".shp"))) if(file_ex == TRUE) { print("shp file exists") shp_file <- readShapePoly(file.path("data", shp_folder, paste(country_code, "adm_shp", sep="_"), paste0(country_code, "_adm", adm_code, ".shp"))) check_list <- apply(shp_file@data, 2, my_fun) check_vec <- sapply(check_list, length) if(any(check_vec > 0)) { print("extra lines are present in the dbf file") output <- rbind(output, Dataset[j,]) if(REPAIR) { bad_cols <- which(check_vec>0) for (i in 1:length(bad_cols)) { col_index <- bad_cols[i] new_names <- sapply(shp_file@data[[col_index]], function(x) { gsub("\r\n|\r\nAlderney\r\nAlderney\r\n", "", x) }) shp_file@data[[col_index]] <- as.factor(new_names) } dir.create(file.path("data", "repaired_shapefiles", paste(country_code, "adm_shp", sep="_")), FALSE, TRUE) writeSpatialShape(shp_file, file.path("data","repaired_shapefiles", paste(country_code, "adm_shp", sep="_"), paste0(country_code, "_adm", adm_code))) } } } } file_name <- sprintf("adm_%s_countries_dbf_extra_lines%s", adm_code, ".csv") write.table(output, file.path("output", "datasets", file_name), row.names=FALSE, sep=",")
f69dc712a1e531c1df704544c433909c1c84e0a6
08a550500164fd09ba110fc9a9403053406c5801
/Jackie/text_analysis.R
b18f67acdd2ffbecf78b4e8461c0f4517232ba67
[]
no_license
jliangus/Final
85acf4cc86bf9c3be8b3783187bfb54bfcb956fb
4292fb447bafbfb22fe519e67f74a6600bc0d11c
refs/heads/master
2020-04-12T00:21:44.620952
2018-12-18T00:28:49
2018-12-18T00:28:49
null
0
0
null
null
null
null
UTF-8
R
false
false
1,342
r
text_analysis.R
#linear model Model <- function(corpus){ dtm<-DocumentTermMatrix(corpus) sparse<-removeSparseTerms(dtm, sparse = 0.6) term_freq<-findFreqTerms(dtm,1000) dtm1<-as.matrix(sparse) dtm<-TermDocumentMatrix(corpus,control= list(dictionary=term_freq,removeNumbers=TRUE, stopwords=TRUE, weighting=weightTfIdf)) dtm_matrix<-as.matrix(dtm) winecharacter<-t(dtm_matrix) colnames(winecharacter) <- names(dtm_matrix[,1]) winecharacter<-data.frame(winecharacter,country=wine$country, designation=wine$designation, points=wine$points, price=wine$price) winecharacter1<-data.frame(winecharacter[,1:200],points=winecharacter$points) model<-lm(points~.,winecharacter1) return(model) } model <- Model(corpus) create_table <- function(corpus){ model <- Model(corpus) goodword<-names(model$coefficients[model$coefficients>0]) badword<-names(model$coefficients[model$coefficients<0]) df <- data.frame(goodword, badword) } goodword<-names(model$coefficients[model$coefficients>0]) badword<-names(model$coefficients[model$coefficients<0]) df <- data.frame(goodword, badword) goodword
08fc558f1dbde3b3e2ac8a1076ca9f83fca690b3
140126d428822112ccbdb7f7044e8e5a209f2607
/main.r
8b60bcf29fbed32c6692c77cfe2381c5fb593964
[]
no_license
mziegler611/ProgLanProj-R
3a2ef07e3de66ff25fad3e0c4691347ba3b992ca
d40acf7abde73831eab7e01fdc20841d05655302
refs/heads/main
2023-01-15T20:22:15.798859
2020-11-20T01:56:46
2020-11-20T01:56:46
312,638,092
0
0
null
null
null
null
UTF-8
R
false
false
2,436
r
main.r
#-------------------------------------- #Author: Lei Chen, Morgan Ziegler #Date: November 17,2020 #Comp321 final #-------------------------------------- source("/Users/leianna/Documents/321F20/ProgLanProj-R/functions.R", chdir = TRUE) #source("/Users/morganziegler/Desktop/ProgLanProj-R-main/functions.R", chdir = TRUE) commands <- function(){ print("Functions Available:", quote = FALSE) print("1. Print Data", quote = FALSE) print("2. Print Headers", quote = FALSE) print("3. Find Minimum", quote = FALSE) print("4. Find Maximum", quote = FALSE) print("5. Find Info", quote = FALSE) print("6. Find Average of Column", quote = FALSE) print("7. Find Frequency of A Value in Column", quote = FALSE) print("8. Reprint Commands", quote = FALSE) } main <- function(){ #myData <- read_excel("/Users/morganziegler/Desktop/ProgLanProj-R-main/catsvdogs.xlsx") myData <- read_excel("/Users/leianna/Documents/321F20/ProgLanProj-R/catsvdogs.xlsx") options(tibble.print_max = Inf) options(tibble.width = Inf) list_header<-names(myData) input <- 'y' commands() userInputValue <- readline(prompt="What would you like to do: ") while (userInputValue != 'n'){ if (userInputValue==1){ print(myData) } else if(userInputValue == 2){ printHeader(myData) } else if(userInputValue ==3){ userInputColumn <- readline(prompt="Enter a column name/key or all: ") print(findMin(myData,userInputColumn)) } else if(userInputValue == 4){ userInputColumn <- readline(prompt="Enter a column name/key or all: ") print(findMax(myData,userInputColumn)) } else if(userInputValue == 5){ userInputColumn <- readline(prompt="Enter column name/key: ") userInputValue <- readline(prompt="Enter value to Find: ") print(findInfo(userInputColumn, userInputValue)) } else if(userInputValue == 6){ columnInput <- readline(prompt="What Column do you want to average: ") print(findAvg(myData, columnInput)) } else if(userInputValue == 7){ columnInput <- readline(prompt="What Column do you want to look in: ") valueInput <- readline(prompt="What Value are you looking for: ") print(findFrequency(myData, columnInput, valueInput)) } else if (userInputValue ==8) { commands() } userInputValue <- readline(prompt="What would you like to do (enter 'n' to quit): ") } } main()
7d2105b188bb2bc4e3dd4f6bbe7d8ecd4548f317
20746cd95fee322979cc3464f01e8970796d1d2e
/data/union.R
9e2bc8a19bdad99080f23060df419de5ddad809c
[ "Apache-2.0" ]
permissive
92amartins/hacka-ime
a27730b9e9341ddf54271d2acdedecc9db4525bc
5a06c7364a90b7bced738477b3768f10781b47e1
refs/heads/master
2021-06-21T19:29:14.105379
2017-08-20T11:34:07
2017-08-20T11:34:07
null
0
0
null
null
null
null
UTF-8
R
false
false
1,124
r
union.R
library(dplyr) setwd("../../data/") paths <- c("cs/sarajane.csv", "cs/clodoaldo.csv", "cs/karina.csv", "cs/fabio_kon.csv", "cs/ivandre.csv", "fis/alencar.csv", "fis/salvadori.csv", "med/kalil.csv", "med/ludhmilla.csv", "med/sommer.csv") df <- data.frame(Author = character(), Title = character(), Year = character(), Link = character(), Affiliation = character(), Abstract = character(), "Author Keywords" = character(), "Index Keywords" = character(), stringsAsFactors = FALSE) for(path in paths){ df <- rbind(df, read.csv(path, stringsAsFactors = FALSE)) } res <- mutate(df, Text = paste(Abstract, Author.Keywords, Index.Keywords, sep = ' ')) %>% select(Author, Text, Affiliation) df2 <- aggregate(res$Text, by=list(res$Author, res$Affiliation), paste, collapse=";") %>% tail(10) names(df2) <- c("Author", "Affiliation", "Text") write.csv(df2, file = "data.csv")
373b445dbe0695616da49881b8f25cd9e8d1710d
e213599e52e0f943a5dd99aa0f4716ebc3493fc3
/Figure/FigureS13/S13.04.A.02.R
0f9de0feb0653a0dee71e57b3d1425a7a376a31f
[]
no_license
hegu2692/VAEN
d78a6580b2430babe62702f9b775cd2e741a84a1
69b90b80c1c1419679c51c65a681c071fdba2779
refs/heads/master
2023-05-07T14:36:49.427975
2021-05-30T06:25:42
2021-05-30T06:25:42
null
0
0
null
null
null
null
UTF-8
R
false
false
1,134
r
S13.04.A.02.R
#setwd("/path/to/VAEN/Figure/FigureS13") source("../../code/multiplot.R") library(ggplot2) library(ggrepel) abc = read.table("FigureS13.A.txt", as.is=T, header=T, sep="\t") dat.plot = data.frame(FC = as.numeric(abc[,5]), log10p=-log10(abc[,4]), nMutProp = as.numeric(abc[,7]), color = as.factor(abc$color) ) lab = apply(abc, 1, function(u)paste(u[1], u[2], u[3], sep="-") ) dat.plot$lab = lab p2 = ggplot(dat.plot, aes(x=FC, y=log10p, size=nMutProp, color=color)) + geom_point(alpha=0.2) + scale_color_manual(values=c("black", "blue", "red")) + theme(legend.position = c(0.9, 0.9), plot.title = element_text(hjust = 0.5, size=8) ) + geom_vline(xintercept = 0, linetype="dashed", color = "red", size=1) + ylab("-log10(p)") + xlab("Difference in means of MT and WT samples") + ggtitle("(A) Association results for mutations") + guides(color = FALSE) + labs(size = "% MT") which(dat.plot$log10p > 8) -> ii new.dat.plot = dat.plot[ii, ] p3 = p2+geom_text_repel(data=new.dat.plot, aes(label=lab), color="black", size=2) tiff("S13.A.tif", width=1800, height=2000, res=300) print(p3) dev.off()
bdcc1138611c71e38d99bea0fd872d438c45d60e
bc304dc82564cf44b7fe73d8722f1ac6e680dacc
/BiHLaws/script/2018_exctract_laws_and_votes.R
7466b96a689f174114e2c5b6b5e207205726c90d
[]
no_license
rs2903/BiH
7f993a06724e6a72a73810d2f59823c67dba8882
6c2f9b7413634ea0e0bd347cd74756cb989986ae
refs/heads/master
2020-06-19T04:09:30.983391
2019-07-12T09:34:33
2019-07-12T09:34:33
196,551,852
0
0
null
null
null
null
UTF-8
R
false
false
4,100
r
2018_exctract_laws_and_votes.R
# description ------------------------------------------------------------- # scraps links to laws # scraps info on developments pertaining to each law => basis for 2018_law_analysis # scraps from each law development links to votes # scraps pdfs behind each link on votes => basis for tesseract_extract_individual_votes.R # setup ------------------------------------------------------------------- library(rvest) library(tidyverse) library(glue) wdr <- getwd() # scrap links for all laws ------------------------------------------------ page_seq<- seq(1:17) page_links <- glue("http://parlament.ba/oLaw/GetOLawsByStatus?page={page_seq}&MandateId=4&Status=-1") %>% enframe(name=NULL) pb <- progress_estimated(nrow(page_links)) fn_scrap <- function(x){ pb$tick()$print() x %>% read_html() %>% html_nodes("a") %>% html_attr(.,"href") %>% enframe(name=NULL) %>% filter(str_detect(value, "OLawDetails")) %>% mutate(link_to_law=paste0("http://parlament.ba",value)) } df_links_to_laws <- page_links$value %>% set_names() %>% map_dfr(., possibly(fn_scrap, otherwise=NULL), .id="page_id") write_csv(df_links_to_laws, paste0(wdr, "/data/2014_2018_links_to_laws.csv")) df_links_to_laws <- readr::read_csv(paste0(wdr, "/data/2014_2018_links_to_laws.csv")) nrow(df_links_to_laws) #163 laws # law timeline ------------------------------------------------------------ df_links_to_laws <- readr::read_csv(paste0(wdr, "/data/2014_2018/2014_2018_links_to_laws.csv")) pb <- dplyr::progress_estimated(nrow(df_links_to_laws)) fn_case.details <- function(x) { pb$tick()$print() x %>% read_html() %>% html_nodes("table") %>% html_table(., trim = T, fill = T) %>% map(., ~ map(., as_tibble)) %>% map(., bind_cols) %>% bind_rows() } df_law_details <- df_links_to_laws$link_to_law %>% set_names() %>% map_dfr(., possibly(fn_case.details, otherwise = NULL), .id = "link_to_law") %>% mutate_at(vars(value, value1), iconv, from="UTF-8", to="windows-1253") write_csv2(df_law_details, paste0(wdr,"/data/2014_2018/2014_2018_law_details.csv")) #here analysis on aggregate of outcomes #origin of law # get links to voting pdfs ------------------------------------------------------- df_links_to_laws <-readr::read_csv(paste0(wdr, "/data/2014_2018_links_to_laws.csv")) fn_scrap_listing <- function(x) { pb$tick()$print() x %>% read_html() %>% #html_nodes("li") %>% html_nodes(xpath="//a[contains(text(), 'Listing')]") %>% #filters links based on text/name of links html_attr('href') %>% #extracts links enframe(name=NULL) %>% mutate(link_to_vote=paste0("http://parlament.ba", value)) } pb <- dplyr::progress_estimated(nrow(df_links_to_laws)) df_links_to_votes <- df_links_to_laws$link_to_law %>% set_names() %>% map_dfr(., possibly(fn_scrap_listing, otherwise=NULL), .id="link_to_law") #write_csv(df_links_to_votes, path=paste0(wdr, "/data/2014_2018_links_to_votes.csv")) # download voting records ------------------------------------------------- df_links_to_votes <- readr::read_csv(paste0(wdr, "/data/2014_2018_links_to_votes.csv")) %>% mutate(law_id=stringr::str_extract(link_to_law, "[:digit:]+")) %>% mutate(vote_id=stringr::str_extract(link_to_vote, "[:digit:]+")) length(unique(df_links_to_votes$link_to_law)) #only 151 laws; means 12 laws without votes? length(unique(df_links_to_votes$link_to_vote)) #793 votes df_links_to_votes %>% group_by(link_to_law) %>% summarise(n_votes_per_law=n()) %>% mutate(law_id=stringr::str_extract(link_to_law, "[:digit:]+")) %>% group_by(n_votes_per_law) %>% summarise(n_laws_in_group=n()) %>% ggplot()+ geom_bar(aes(x=reorder(n_votes_per_law, -n_laws_in_group), y=n_laws_in_group), stat="identity") download_destinaton <- glue("{wdr}/data/voting_records/law_id_{df_links_to_votes$law_id}_vote_id_{df_links_to_votes$vote_id}.pdf") walk2(df_links_to_votes$link_to_vote, download_destinaton, download.file, mode = "wb")
60c3fab7e264d4e7124aa4a2ef11fcb9b68cc724
1115ed2b4ab8073a9fb7937387ff2758258ed4dd
/Simulation/sim_bootstrap.R
f31068dd1f4cac8cb04ccf089f10c0bdfc594d05
[]
no_license
chxyself25/Change-pointDetection
e343bc8cf47e141f579ab6f6b136257087f72a72
4a1b1b60074efb887d1f71c2b83ca3630fc9e409
refs/heads/master
2022-12-03T10:29:51.145653
2020-08-08T21:07:10
2020-08-08T21:07:10
285,974,409
0
0
null
null
null
null
UTF-8
R
false
false
3,810
r
sim_bootstrap.R
library(doParallel) library(devtools) library(fda, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") library(Rcpp, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") library(fdapace, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") sourceDir <- function(path, trace = TRUE, ...) { for (nm in list.files(path, pattern = "[.][RrSsQq]$")) { if(trace) cat(nm,":") source(file.path(path, nm), ...) if(trace) cat("\n") } } sourceDir("../fdapace/R/", trace = FALSE) devtools::load_all("../SRfpca/.") print(sessionInfo()) source("../Funcs/urban_detect_correct.R") registerDoParallel(cores = 50) fres <- data.frame(type = rep(c("slow", "fast"), each = 3), sigma = rep(c(0.3,0.6,1.2), 2), fve = c(99.5, 99.9, 99.9, 99, 99.9, 99.9)) ## information set up type <- "slow"; sigma <- 1.2 dir <- paste0("./", type, "/sigma_", sigma, "/") est.res <- readRDS(paste0(dir, "sim_500_", type, "_", sigma, "_est_res.rds")) ## best FVE for mSUM opt.fve <- fres$fve[fres$type == type & fres$sigma == sigma] est <- subset(est.res, method == "mSUM" & fve == opt.fve) cs <- c(0, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25) ## get centered functional data yeari <- 1:100; c = cs[8] dat <- readRDS(paste0(dir, "sim_500_data_", type, "_", c, ".rds")) BSdist <- list(list(S = list(), A = list()), list(S = list(), A = list()), list(S = list(), A = list())) mBSdist <- list(S = list(), A = list()) #BSdist <- readRDS(paste0(dir, "sim_500_uv_bootstrap_", type, "_", c, ".rds")) #mBSdist <- readRDS(paste0(dir, "sim_500_mv_bootstrap_", type, "_", c, ".rds")) for (id in 1:500) { cat("doing the id ", id, "\n") dati <- dat[[id]] Ly <- dati$Ly; Lt <- dati$Lt P <- est$estimate[est$c == c & est$pointID == id] Ly1 <- Ly[yeari<=P]; Lt1 <- Lt[yeari<=P] Ly2 <- Ly[yeari>P]; Lt2 <- Lt[yeari>P] mui1 <- RFPCAMu(Ly1, Lt1, optns = list(dataType = "Sparse", userBwMu = "GCV", userBwCov = "GCV", mfdName = "Euclidean", kernel = "epan", maxK = 20, methodMuCovEst = "smooth")) mui2 <- RFPCAMu(Ly2, Lt2, optns = list(dataType = "Sparse", userBwMu = "GCV", userBwCov = "GCV", mfdName = "Euclidean", kernel = "epan", maxK = 20, methodMuCovEst = "smooth")) resid1 <- lapply(1:length(Ly1), function(i) { Ly1[[i]] - mui1$muObs[, match(Lt1[[i]], mui1$obsGrid)] }) resid2 <- lapply(1:length(Ly2), function(i) { Ly2[[i]] - mui2$muObs[, match(Lt2[[i]], mui2$obsGrid)] }) resid <- append(resid1, resid2) for (b in 1:3) { cat("doing band ", b, "\n") Lyb <- lapply(resid, function(x) {x[b,]}) #b.idx <- sample(1:100, size = 100, replace = TRUE) #pcab <- FPCA(Ly[b.idx], Lt[b.idx], optns = list(dataType = "Sparse", kernel = "epan", methodBwMu = "GCV", methodBwCov = "GCV", # maxK = 20, methodMuCovEst = "smooth", methodXi = "CE", FVEthreshold = 1)) #cat(length(pcab), "\n") btsib <- fBTSampling(Lyb, Lt, K = 20, bs.n = 100, resample.n = 500) BSdist[[b]]$S[[id]] <- btsib$S BSdist[[b]]$A[[id]] <- btsib$A } btsi <- mfBTSampling(resid, Lt, K = 20, bs.n = 100, resample.n = 500) mBSdist$S[[id]] <- btsi$S mBSdist$A[[id]] <- btsi$A cat(id, " is finished", "\n") saveRDS(BSdist, file = paste0(dir, "sim_500_uv_bootstrap_", type, "_", c, ".rds")) saveRDS(mBSdist, file = paste0(dir, "sim_500_mv_bootstrap_", type, "_", c, ".rds")) } # backup in lss folder #saveRDS(BSdist, file = paste0("/lss/research/zhuz-lab/xchang/Change-pointDetection/Simulation/", type, "/sigma_", sigma, "/sim_500_uv_bootstrap_", type, "_", c, ".rds")) #saveRDS(mBSdist, file = paste0("/lss/research/zhuz-lab/xchang/Change-pointDetection/Simulation/", type, "/sigma_", sigma, "/sim_500_mv_bootstrap_", type, "_", c, ".rds"))
9732e975ef39296a643c9d80d8671038b8e2f207
5e20d81caa0e6585588a4474c73025f900b4c43c
/section3_assessment.R
d9c634c9adb1c589e0d52c92a1464d50d3ec0e33
[ "MIT" ]
permissive
RitRa/edx-Data-Science-Professional-Certificate
9bb39e906ef6e4fdb4ee7d7b6e29fd3010ca79e4
82228d4f623d451dda0e658181d361b91ce5a553
refs/heads/master
2022-12-25T21:07:03.926941
2020-09-14T18:45:23
2020-09-14T18:45:23
291,427,152
0
0
null
null
null
null
UTF-8
R
false
false
3,351
r
section3_assessment.R
library(dslabs) data(heights) options(digits = 3) heights #Q1. First, determine the average height in this dataset. Then create a logical vector ind with the indices for those individuals who are above average height. # # How many individuals in the dataset are above average height? summary(heights) #68.3 isClass(height) ind <- heights %>% filter(height > mean(height)) NROW(ind) #532 mean_height <- mean(heights$height) ind <- heights$height < mean_height mean(ind) #Q2. How many individuals in the dataset are above average height and are female? summary(ind) ind #31 # #Q3. If you use mean() on a logical (TRUE/FALSE) vector, it returns the proportion of observations that are TRUE. # What proportion of individuals in the dataset are female? women <- heights$sex == "Female" mean(women) #0.227 # Q4 a. Determine the minimum height in the heights dataset. min(heights$height) # 50 # 4b. Use the match() function to determine the index of the first individual with the minimum height. min_height <- min(heights$height) match(min_height, heights$height) #1032 #4c Subset the sex column of the dataset by the index in 4b to determine the individual’s sex. subset(heights,heights$height ==50 ) # sex height # 1032 Male 50 # 1045 Male 50 #5a. Determine the maximum height. max_height <- max(heights$height) #82.7 #5b.Which integer values are between the maximum and minimum heights? For example, if the minimum height is 10.2 and the maximum height is 20.8, your answer should be x <- 11:20 to capture the integers in between those values. (If either the maximum or minimum height are integers, include those values too.) max_height<- as.integer(max_height) min_height<-as.integer(min_height) x <- (max_height:min_height) NROW(x) x <- (50:82) # [1] 50 51 52 53 54 55 56 57 58 59 60 61 62 63 # [15] 64 65 66 67 68 69 70 71 72 73 74 75 76 77 # [29] 78 79 80 81 82 # 5c. How many of the integers in x are NOT heights in the dataset? sum(!x %in% heights$height) # 6. Using the heights dataset, create a new column of heights in centimeters named ht_cm. Recall that 1 inch = 2.54 centimeters. Save the resulting dataset as heights2. heights <-mutate(heights, ht_cm = height*2.54) heights[18,] # sex height ht_cm # 18 Female 64 163 # 6b. What is the mean height in centimeters? mean_ht_cm <- mean(heights$ht_cm) # 174 # 7a. How many females are in the heights2 dataset? females <- filter(heights, sex=="Female") count(females) #238 female_mean <- mean(females$ht_cm) # 165 # 8.The olive dataset in dslabs contains composition in percentage of eight fatty acids found in the lipid fraction of 572 Italian olive oils: library(dslabs) data(olive) head(olive) #Plot the percent palmitic acid versus palmitoleic acid in a scatterplot. What relationship do you see? plot(olive$palmitic, olive$palmitoleic) # positive linear relationship # 9. Create a histogram of the percentage of eicosenoic acid in olive. hist(olive$eicosenoic) # the most common value of eicosenoic is below 0.05% # 10. Make a boxplot of palmitic acid percentage in olive with separate distributions for each region. boxplot(palmitic~region, olive) # Which region has the highest median palmitic acid percentage? # Southern Italy #Which region has the most variable palmitic acid percentage? # Southern Italy
bc0b89d0d65c026009dbae6c5ad8ab400d834188
1e705bd7f0363075e574393ea0d61795a01a8752
/assigment/analysis.r
87e56707f4b7777dc447c0cbe459a22ac01c66ff
[]
no_license
cajoho99/ese
ed24b445971dc14bacc05bb7d2e55440ba11acd8
64b40ce2db7195f353f6b0eed1a8dc8d2a4583b1
refs/heads/main
2023-08-18T20:41:43.567631
2021-10-15T17:43:27
2021-10-15T17:43:27
null
0
0
null
null
null
null
UTF-8
R
false
false
1,746
r
analysis.r
library(rethinking) library(readr) path <- paste(getwd(), "/assigment/data.csv", sep = "") print(path) d <- read_delim(path, delim = ";", col_types = cols( .default = "?", category = "factor", technique = "factor" ) ) d$category_id <- as.numeric(d$category) d$technique_id <- as.numeric(d$technique) ot_le <- d[which(d$technique_id == 1 & d$category_id == 1), ] ot_me <- d[which(d$technique_id == 1 & d$category_id == 2), ] nt_le <- d[which(d$technique_id == 2 & d$category_id == 1), ] nt_me <- d[which(d$technique_id == 2 & d$category_id == 2), ] d_ot_le <- density(ot_le$tp) d_ot_me <- density(ot_me$tp) d_nt_le <- density(nt_le$tp) d_nt_me <- density(nt_me$tp) df <- data.frame( technique = c("OT", "OT", "NT", "NT"), experience = c("LE", "ME", "LE", "ME"), mean = c(mean(ot_le$tp), mean(ot_me$tp), mean(nt_le$tp), mean(nt_me$tp)), sd = c(sd(ot_le$tp), sd(ot_me$tp), sd(nt_le$tp), sd(nt_me$tp)) ) print(head(df)) plot(d_ot_le, main = "Density graphs", ylab = "", type = "l", col = "cyan", ylim = c(0, 0.3), xlim = c(0, 10) ) lines(d_ot_me, col = "magenta") lines(d_nt_le, col = "red") lines(d_nt_me, col = "green") legend( "topleft", c("OT-LE", "OT-ME", "NT-LE", "NT-ME"), fill = c("cyan", "magenta", "red", "green") ) # Negative binomial approach # först vanlig poisson dat <- list( T = d$technique_id, E = d$category_id, S = d$subject, F = d$tp ) str(dat) m1.1 <- ulam( alist( F ~ dpois(lambda), log(lambda) <- a[T] + b[E], a[T] ~ dnorm(0, 1.5), b[E] ~ dnorm(0, 1.5) ), data = dat, chains = 4 ) # Det finns inget övre limit av möjliga faults så därför är det inte orimligt med # en Poisson
068db692af260e2bb773202021210d2cc21e2836
9719ea69f693adfddc62b27eaf948fc7b16f6ad0
/R/map_nests.R
1c15f1b3081c86af50a2522eea052286bf3454e8
[]
no_license
dbca-wa/wastdr
49fe2fb1b8b1e518f6d38549ff12309de492a2ad
5afb22d221d6d62f6482798d9108cca4c7736040
refs/heads/master
2022-11-18T01:00:41.039300
2022-11-16T08:32:12
2022-11-16T08:32:12
86,165,655
2
0
null
null
null
null
UTF-8
R
false
false
2,667
r
map_nests.R
#' Map tagged Turtle Nests interactively. #' #' \lifecycle{maturing} #' #' @details Creates a Leaflet map with an interactive legend. #' The maps auto-zooms to the extent of data given. #' #' @param data The output of \code{wastd_data$nest_tags}. #' @template param-wastd_url #' @template param-fmt #' @template param-cluster #' @return A leaflet map #' @family wastd #' @export #' @examples #' \dontrun{ #' data("wastd_data") #' wastd_data$nest_tags %>% map_nests() #' } map_nests <- function(data, wastd_url = wastdr::get_wastd_url(), fmt = "%d/%m/%Y %H:%M", cluster = FALSE) { co <- if (cluster == TRUE) leaflet::markerClusterOptions() else NULL pal <- leaflet::colorFactor(palette = "RdYlBu", domain = data$encounter_status) url <- sub("/$", "", wastd_url) l <- leaflet_basemap() %>% leaflet::addAwesomeMarkers( data = data, lng = ~encounter_longitude, lat = ~encounter_latitude, icon = leaflet::makeAwesomeIcon( icon = "tag", text = ~ toupper(substring(data$encounter_status, 1, 1)), markerColor = ~ pal(encounter_status) ), label = ~ glue::glue( "{datetime} {encounter_status} {flipper_tag_id} {date_nest_laid} {tag_label}" ), popup = ~ glue::glue(' <h3>{flipper_tag_id} {date_nest_laid} {tag_label}</h3> <span class="glyphicon glyphicon-globe" aria-hidden="true"></span> {encounter_area_name} - {encounter_site_name}</br> <span class="glyphicon glyphicon-calendar" aria-hidden="true"></span> {datetime} AWST</br> <span class="glyphicon glyphicon-eye-open" aria-hidden="true"></span> {encounter_observer_name}<br/> <span class="glyphicon glyphicon-pencil" aria-hidden="true"></span> {encounter_reporter_name}<br/> <span class="glyphicon glyphicon-wrench" aria-hidden="true"></span> {humanize(encounter_status)}<br/> <span class="glyphicon glyphicon-comment" aria-hidden="true"></span> {encounter_comments} {comments}<br/> Survey {encounter_survey_id} at {encounter_site_name}<br/>wa {encounter_survey_start_time}-{encounter_survey_end_time} AWST</p> <a href="{url}{encounter_survey_absolute_admin_url}" class="btn btn-xs btn-secondary" target="_" rel="nofollow"> Edit survey in WAStD</a> <p><a class="btn btn-xs btn-secondary" target="_" rel="nofollow" href="{url}{encounter_absolute_admin_url}">Edit on WAStD</a></p> '), group = "Nests", clusterOptions = co ) %>% leaflet::addLayersControl( baseGroups = c("Basemap"), overlayGroups = c("Nests"), options = leaflet::layersControlOptions(collapsed = FALSE) ) l } # usethis::use_test("map_nests")
ef0466d608c7567b45fff1babf81a84053082c34
7917fc0a7108a994bf39359385fb5728d189c182
/cran/paws.management/man/ssm_describe_maintenance_window_tasks.Rd
819be7ef4be49da5bb43e91d328c9da1ac3ac316
[ "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
TWarczak/paws
b59300a5c41e374542a80aba223f84e1e2538bec
e70532e3e245286452e97e3286b5decce5c4eb90
refs/heads/main
2023-07-06T21:51:31.572720
2021-08-06T02:08:53
2021-08-06T02:08:53
396,131,582
1
0
NOASSERTION
2021-08-14T21:11:04
2021-08-14T21:11:04
null
UTF-8
R
false
true
2,382
rd
ssm_describe_maintenance_window_tasks.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ssm_operations.R \name{ssm_describe_maintenance_window_tasks} \alias{ssm_describe_maintenance_window_tasks} \title{Lists the tasks in a maintenance window} \usage{ ssm_describe_maintenance_window_tasks(WindowId, Filters, MaxResults, NextToken) } \arguments{ \item{WindowId}{[required] The ID of the maintenance window whose tasks should be retrieved.} \item{Filters}{Optional filters used to narrow down the scope of the returned tasks. The supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType.} \item{MaxResults}{The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.} \item{NextToken}{The token for the next set of items to return. (You received this token from a previous call.)} } \value{ A list with the following syntax:\preformatted{list( Tasks = list( list( WindowId = "string", WindowTaskId = "string", TaskArn = "string", Type = "RUN_COMMAND"|"AUTOMATION"|"STEP_FUNCTIONS"|"LAMBDA", Targets = list( list( Key = "string", Values = list( "string" ) ) ), TaskParameters = list( list( Values = list( "string" ) ) ), Priority = 123, LoggingInfo = list( S3BucketName = "string", S3KeyPrefix = "string", S3Region = "string" ), ServiceRoleArn = "string", MaxConcurrency = "string", MaxErrors = "string", Name = "string", Description = "string" ) ), NextToken = "string" ) } } \description{ Lists the tasks in a maintenance window. For maintenance window tasks without a specified target, you cannot supply values for \code{--max-errors} and \code{--max-concurrency}. Instead, the system inserts a placeholder value of \code{1}, which may be reported in the response to this command. These values do not affect the running of your task and can be ignored. } \section{Request syntax}{ \preformatted{svc$describe_maintenance_window_tasks( WindowId = "string", Filters = list( list( Key = "string", Values = list( "string" ) ) ), MaxResults = 123, NextToken = "string" ) } } \keyword{internal}
689fe1be6c77710e6afe4712c844caab77069df6
29585dff702209dd446c0ab52ceea046c58e384e
/Scale/R/Scale.R
84a0c4e46cc73f7854d1c99bf9484083c093f4a9
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
6,095
r
Scale.R
Scale <- function(data=data.frame(), orders=list(), orders_id=integer(), reverse=c(), items=character(), col_names=character()){ # A class to represent scale data out <- list(data=data, orders=orders, orders_id=orders_id, reverse=reverse, items=items, col_names=col_names) class(out) <- "ScaleData" return(out) } PreProc <- function(sc){ # operates on a ScaleData object if (class(sc)!= "ScaleData") stop("Input should be of class ScaleData.") if (!(any(class(sc$data) == "data.frame"))){ stop("Data has to be a data.frame.\n See ?data.frame, ?cbind, ?matrix") } if (length(sc$orders) != 0){ # This should handle various orderings of printed questionnaires if (length(sc$orders_id)==0) stop("You need to provide orders_id.\n This is an indicator of which order each participant received.") reordered <- lapply(seq_along(sc$orders), function(x){ df <- sc$data[sc$orders_id==x,] df <- df[,order(sc$orders[[x]])] df }) data <- data.frame(Reduce(rbind, reordered)) } else{data <- sc$data} if (length(sc$reverse)!=0){ data[,sc$reverse] <- (max(data, na.rm=T)+1) - data[,sc$reverse] } else {warning("Continue without reverse items.")} if (length(sc$col_names)!=0) colnames(data) <- sc$col_names if (length(sc$items)!=0){ items <- sc$items } if (length(sc$items)==0){ items <- NULL warning("Items' vector unspecified. out$items is NULL...")} return(list(data=data, items=items)) } .item_set <- function(x, rcut=0.4){ low_cor <- which(x$item.stats$r.cor < rcut) a_drop <- which(x$alpha.drop$raw_alpha >= x$total$raw_alpha) list(low_cor=low_cor, a_drop=a_drop) } ItemAnalysis <- function(prep, method="spearman", fm="gls", nfactors=1, rcut=0.3, score_type = "z", exclude=c()){ data <- if (length(exclude)!=0) prep$data[-exclude] else prep$data if (method=="polychoric") cor_matrix <- psych::polychoric(data)$rho if (method=="spearman") cor_matrix <- Hmisc::rcorr(as.matrix(data), type="spearman")$r al <- psych::alpha(cor_matrix) sug <- .item_set(al, rcut) rely <- list(alpha= al, k=ncol(data), title=deparse(substitute(prep)), method=method, suggest = sug) class(rely) <- "reliability" my_kmo <- psych::KMO(cor_matrix) my_bartlett <- psych::cortest.bartlett(cor_matrix, n=nrow(data)) my_fa <- psych::fa(cor_matrix, fm=fm, nfactors=nfactors) scores <- apply(data, 1, function(x) sum(scale(x) * my_fa$loadings[,1])) if (score_type== "t") scores <- (scores*10)+50 if (score_type == "sten") scores <- (scores*2)+5.5 valid <- list(model=my_fa, method=fm,loadings=my_fa$Structure[,1], kmo=my_kmo, bartlett=my_bartlett, scores=scores) class(valid) <- "validity" items <- prep$items out <- list(data=data, items=items, rely=rely, valid=valid) class(out) <- "ItemAnalysis" out } print.ItemAnalysis<- function(x,...){ print(x$rely) print(x$valid) } print.validity <- function(x,...){ mod <- x$model$fa cat("A", x$method, "factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:\n") print(sort(x$loadings)) } print.reliability <- function(x,...){ ## or Rmd file? cat("\nReliability Analysis of", x$title, "ScaleData object.", "\n\nA",x$method, "correlation matrix of", x$k, "items was calculated and submitted to Reliability analysis. \nThe overall Cronbach's Alpha was", round(x$alpha$total$raw_alpha, 2), ".\n") if (length(x$suggest$low_cor)!=0){ cat("Item(s) that exhibited low correlation with the rest of the scale were:\n", .comma_and(x$suggest$low_cor), ".\n")} if (length(x$suggest$a_drop)!=0){ cat("Furthermore, deleting item(s)", .comma_and(x$suggest$a_drop), "may improve reliability.")} } .comma_and <- function(x){ if (length(x)==1) return(x) paste(paste(x[1:(length(x)-1)], collapse=","), "and", x[length(x)]) } ReportTable <- function(it, write_file=FALSE, sep=";"){ item_means <- colMeans(it$data, na.rm=T) item_sds <- apply(it$data, 2, sd, na.rm=T) out1 <- data.frame(Item = colnames(it$data), Corr = it$rely$alpha$item.stats[,"r.drop"], Loadings = it$valid$loadings, Mean = item_means, SD = item_sds) out1 <- out1[order(out1$Loadings, decreasing=T),] colnames(out1) <- c("Item", "Corr. to scale", "Factor Loading", "Mean", "SD") if (write_file==TRUE){write.table(out1, "ItemAnalysisOutput.csv", sep=sep, row.names=F)} out1 } GetScores <- function(it, write_file=FALSE, sep=";", scale_name="My_Scale"){ my_scores <- data.frame(it$valid$scores) colnames(my_scores) <- scale_name if (write_file==TRUE){ fname <- paste(scale_name, ".csv") write.table(my_scores, fname, sep=sep, row.names=F)} my_scores } ChooseBest <- function(it, n=5){ loads <- it$valid$loadings if (n > length(loads)) stop("This n exceeds the number of available items!") names(head(sort(loads, T), n)) } ShowItems <- function(it, n=5, write_file=FALSE, scale_name="MyItems"){ if (is.null(it$items)) stop("You have not specified the items argument, when defining the ScaleData Object. See ?ScaleData.") best <- ChooseBest(it, n) ind <- which(colnames(it$data) %in% best) out <- it$items[ind] if (write_file) { fn <- paste(scale_name, ".txt", collapse='') writeLines(out, fn)} list(best, out) } ReadItems <- function(filename, enc="UTF-8"){ readLines(filename, encoding=enc) }
f90f7066f5ec64708dab498354751f67eb6f7f5c
306a2f9b10c63884e56f3eddd77c83c3b96df55e
/Sims_svn/gensys/tag/gensys/man/qzdiv.Rd
0a05c53ee28fba6f32ab08cf2c80c5a596472687
[]
no_license
Allisterh/VAR_Sims_rfvar
0f74bda155a1d2e0768dcacc08f7a5fa6de8eddd
befb94ec18da7a7c88c581249e2382e86a319ebc
refs/heads/master
2023-03-17T13:32:11.188302
2017-10-11T19:15:22
2017-10-11T19:15:22
null
0
0
null
null
null
null
UTF-8
R
false
false
1,072
rd
qzdiv.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/qzdiv.R \name{qzdiv} \alias{qzdiv} \title{qzdiv} \usage{ qzdiv(stake, qzlist, flip = FALSE) } \arguments{ \item{stake}{the real number about which the roots are sorted} \item{qzlist}{the qz transform list. The \code{qz} from the QZ package uses different names in its output list, which have to be translated as at the start of \code{gensys} before being passed to this program.} \item{flip}{If \code{flip} is TRUE, then cases with \code{abs(b[i,i]/a[i,i]) < stake} are in lower right.} } \value{ The input list, with all matrices re-ordered. } \description{ Sort the roots in a qz decomposition around a value } \details{ Takes a list containing matrices \code{a, b}, orthonormal matrices \code{q,z}, and rearranges them so that all cases of \code{abs(b[i,i] / a[i,i]) > stake} are in lower right corner, while preserving U.T. and orthonormal properties and \code{q \%*\% a \%*\% t(z)} and \code{q \%*\% b \%*\% t(z)}. } \seealso{ \link{gensys}, \link{QZ::qz}, \link{qzswitch} }
52fc74a7093eabfd80cbecb9874db313ccfeeb6f
0aaecd6991a7f16759a1f8d2b3be6093f8a183af
/inst/snippet/mom-norm.R
2408ab2ed76e7bbd70dbfaa7811b64e6798b7557
[]
no_license
cran/fastR
3f0e3959dad4e5d361c341eb6bea670eab6bfdcc
572a5dc31e5aa85af4126662f95268329179c87b
refs/heads/master
2021-01-21T04:55:14.927487
2017-07-27T19:52:06
2017-07-27T19:52:06
17,695,981
0
0
null
null
null
null
UTF-8
R
false
false
108
r
mom-norm.R
x<-c(57.9, 70.8, 86.3, 92.3, 94.2, 117.0, 118.4, 122.4, 125.8, 134.4); mean(x); sd(x); sqrt(9/10 * var(x));
fb8be5fef246bef0c0eb3b6a987c50cc8d5380b9
fd570307c637f9101ab25a223356ec32dacbff0a
/src-local/specpr/src.specpr/newplot/setwav.r
fcd104ca5f6155ce9242bc62f2875725add3d8f0
[]
no_license
ns-bak/tetracorder-tutorial
3ab4dd14950eff0d63429291c648820fb14bb4cb
fd07c008100f6021c293ce3c1f69584cc35de98a
refs/heads/master
2022-07-30T06:04:07.138507
2021-01-03T22:19:09
2021-01-03T22:49:48
null
0
0
null
null
null
null
UTF-8
R
false
false
1,405
r
setwav.r
subroutine setwav implicit integer*4 (i-n) #ccc Version date: @(#)setwav.r 2.3 08/08/85 12:49:31 include "../common/pltcnt" real*4 temp if (iptype == 1 | iptype == -1) { if (xlabel==' ') #OLD way xlabel = 'Wavelength (!m!m)' xlabel = 'WAVELENGTH (!m!m)' } else if (iptype == 2) { if (wminp==0.0) { # case inverse waves if (wmaxp<0.0) wminp= -1.0e-30 else wminp = 1.0e-30 } if (wmaxp==0.0) { if (wminp<0.0) wmaxp= -1.0e-30 else wmaxp = 1.0e-30 } if (wmaxp < wminp) { temp = wminp wminp = wmaxp wmaxp = temp } if (xlabel==' ') #OLD way xlabel = 'Wavenumbers (cm}-1@)' xlabel = 'WAVENUMBERS (cm}-1@)' } else if (iptype == 3) { if (wminp==0.0) { # case reverse inverse waves if (wmaxp<0.0) wminp= -1.0e-30 else wminp = 1.0e-30 } if (wmaxp==0.0) { if (wminp<0.0) wmaxp= -1.0e-30 else wmaxp = 1.0e-30 } if (wmaxp > wminp) { # make sure range is in reverse order temp = wminp wminp = wmaxp wmaxp = temp } if (xlabel==' ') #OLD way xlabel = 'Wavenumbers (cm}-1@)' xlabel = 'WAVENUMBERS (cm}-1@)' } if (!nhscle) { if (wmaxp > wminp) { temp = (wmaxp-wminp)*0.02 wmaxp = wmaxp+temp wminp = wminp-temp } if (wmaxp < wminp) { # reverse wavenumber case temp = (wminp-wmaxp)*0.02 wmaxp = wmaxp-temp wminp = wminp+temp } } return end
d80c9f3d9e433bfd80468f91187428301bc7cf85
e9f297561abaa4d9870f760507705fda34983183
/man/arthropodaPhyloProfile.Rd
9ddd0c83402a3f45b56e260961a933ef815e74a6
[ "MIT" ]
permissive
mueli94/PhyloProfileData
f1c846fc029b45aa5ba47e294ec291c82e75d4f8
00a7a81a022c2994511063b5c2849edfcf95e69d
refs/heads/master
2020-05-25T22:41:22.562566
2019-05-23T14:18:29
2019-05-23T14:18:29
188,020,000
0
0
MIT
2019-05-23T13:56:06
2019-05-22T10:54:35
R
UTF-8
R
false
true
764
rd
arthropodaPhyloProfile.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{arthropodaPhyloProfile} \alias{arthropodaPhyloProfile} \title{Phylogenetic profiles of Arthropoda taxon set} \format{A data frame with 5 columns and 54656 rows: \itemize{ \item{geneID}{Gene ID, e.g. "97421at6656"} \item{ncbiID}{Taxonomy ID of the search species, e.g. "ncbi9598"} \item{orthoID}{Ortholog ID, e.g. "97421at6656|PANTR@9598@1|H2QTF9|1"} \item{FAS_F}{Forward FAS score, e.g. "0.6872810"} \item{FAS_B}{Backward FAS score, e.g. "0.9654661"} }} \usage{ data(arthropodaPhyloProfile) } \description{ Write the description here (what are the scores, how many taxa, how many proteins - what are those proteins) } \keyword{datasets}
123639a8ed37194215effc535ffa34c0d2a843b9
dec5e66aefd642fcc8a9b51438806f5284e5f58f
/Src/GetSatisfaction 1.0.R
519fc29fec0ba57cf5005f72ec3d58c008913d03
[ "Apache-2.0" ]
permissive
satcos/HappyNurse
dde546034563bf33f8b2fae33cebedd1efea7a77
ce34fdc32a9b129ad8e460a9c855b63e85798a66
refs/heads/master
2021-01-10T23:39:11.836287
2016-10-10T04:00:42
2016-10-10T04:00:42
70,410,197
1
0
null
null
null
null
UTF-8
R
false
false
432
r
GetSatisfaction 1.0.R
# Input: void # Output: Void cat("\nLoading GetSatisfaction 1.0.R") GetSatisfaction <- function(modelSchedule, Solution, noOfOptVar) { modelSchedule <- modelSchedule[, 3:22] Solution <- Solution[, 3:22] Solution[modelSchedule == 0] <- 0 modelSchedule[modelSchedule < 0 ] <- 0 modelSchedule[modelSchedule > 0 ] <- 1 satisfactionRate <- round(sum(modelSchedule == Solution) * 100 / noOfOptVar, 2) return(satisfactionRate) }
d46bfae38dfac291de1c15e3b0476534abea8891
e8584fdf4b9302610483dcb8e87740b238f50cb7
/plot1.R
b8f6614a768c4f733ded4aa105fc7d4ad71408fd
[]
no_license
roseapple0803/PM2.5Plots
c08c2ef69027fadbd1259f3af577d429c823eacd
18fec1837ad175eea488226baebce87ab2fd9570
refs/heads/master
2016-09-10T18:36:01.695020
2015-08-08T14:09:24
2015-08-08T14:09:24
39,744,343
0
0
null
null
null
null
UTF-8
R
false
false
1,796
r
plot1.R
library(plyr) library(dplyr) ## go retrieve these two files on the remote site if they don't exist drawPlot1 <- function() { ##Search and download packages if not present if("ggplot2" %in% rownames(installed.packages()) == FALSE) { install.packages("ggplot2") } library(ggplot2) if (!file.exists("summarySCC_PM25.rds") | !file.exists("Source_Classification_Code.rds")) { fileUrl <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip" download.file(fileUrl, "source.zip") unzip("source.zip", exdir=".") } ## the two files exist already summaryFile <- "summarySCC_PM25.rds" sourceFile <- "Source_Classification_Code.rds" ## read in these two files NEI <- readRDS(summaryFile) SCC <- readRDS(sourceFile) ## see the total pm2.5 from all sources for each of the years 1999, 2002, 2005, and 2008 by_year <- group_by(NEI, year) pm25 <- summarize(by_year, total=sum(Emissions)) png(file="plot1.png", width=480, height=480) ## call a graphics device and send the plot to the file, plot1.png par(mar=c(5.1,5.1,4.1,2.1)) with(pm25, plot(year,total, col=rgb(1,0,0,0.5), pch=19, type="b", xlab="YEAR", ylab="", main="PM2.5 emissions in USA", xaxt="n", yaxt="n")) mtext("TOTAL EMISSIONS (TONS)",side=2,line=4) ## adjust the text on y-axis axis(1, at = seq(1999, 2008, by = 3)) axis(2, at = seq(3e+6, 8e+6, by = 1e+6), las=1) ## consider using: ## xlab="Years", ylab=expression("Total" ~ PM[2.5] ~ "Emission, tons"), ## main=expression("Total emissions of" ~ PM[2.5] ~ "in the US")) ## or ## barplot( ## (Totalperyear$Emissions)/10^6, ## names.arg=Totalperyear$year, ## xlab="Year", ylab="PM2.5 Emissions (10^6 Tons)", ## main="PM2.5 Emissions From All Sources in US", ##col = "wheat" ) dev.off() ## close the device }
d783dadc7eb720195300c6ee3a74d2da57d5a317
e943294fe503d00513175402cc3f81089618e409
/man/rebuildPackage.Rd
63eab8289e881e3dadb45aa8ad3a1d8e279e7be6
[ "MIT" ]
permissive
sfeuerriegel/ResearchGroupTools
395aa520315003b7626b7a5707a03df963059d3e
96c0238f7261d471d522628be941a32aa9fc042a
refs/heads/master
2020-04-24T01:22:59.561316
2017-02-03T17:21:50
2017-02-03T17:21:50
66,145,324
3
0
null
null
null
null
UTF-8
R
false
true
480
rd
rebuildPackage.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/devtools.R \name{rebuildPackage} \alias{rebuildPackage} \title{Builds, loads and checks package} \usage{ rebuildPackage(updateReadme = FALSE) } \arguments{ \item{updateReadme}{Flag whether to update the README.Rmd (default: FALSE).} } \description{ Simple wrapper to rebuild, load and check package. This function specifically updates the manual as well. } \examples{ \dontrun{ rebuildPackage() } }
63eda15f450107d36dfecc3aa26e49f8095e0cf1
31d259895d0abb1625edc9b3f118bfafdcdddd3b
/HW/SPRMHW.R
75261d6af621c665ad35139d85061bfc6339e3cd
[]
no_license
cortlandwatson/Experimental-Design
0f9f66e9c6149253669c9e8a77aac59cf5560783
6ded6b3e52a2a9640b28a4fbac161b56815bc8f9
refs/heads/master
2020-03-19T06:54:30.214227
2018-07-20T15:50:20
2018-07-20T15:50:20
136,065,928
0
0
null
null
null
null
UTF-8
R
false
false
1,533
r
SPRMHW.R
####SPRM HomeWork library(mosaic) library(dplyr) library(tidyverse) library(ggplot2) library(CARS) library(pander) library(MASS) library(gplots) library(car) ### 3 Auxin <- read_csv("HW/auxin.csv") Auxin$plant <- as.factor(Auxin$plant) View(Auxin) #Descriptive favstats(Auxin$days~Auxin$plant) favstats(Auxin$days~Auxin$auxin) favstats(Auxin$days~Auxin$deblading) par(mfrow=c(3,2)) boxplot(Auxin$days~Auxin$auxin, id.method="y") boxplot(Auxin$days~Auxin$deblading, id.method="y") boxplot(Auxin$days~Auxin$plant, id.method="y") interaction.plot(Auxin$auxin,Auxin$deblading, Auxin$days) interaction.plot(Auxin$deblading,Auxin$auxin, Auxin$days) plotmeans(Auxin$days~Auxin$auxin, error.bars="se") plotmeans(Auxin$days~Auxin$deblading, error.bars="se") plotmeans(Auxin$days~Auxin$plant, error.bars="se") #Inferential Statistics splitplot <- aov(Auxin$days ~ Auxin$auxin + Auxin$plant + Auxin$auxin*Auxin$deblading) summary(splitplot) F=309.8/40.31 1-pf(F,3,12) par(mfrow=c(1,2)) plot(splitplot,which=1:2) hist(splitplot$residuals) qqPlot(splitplot$residuals) #### 4 x <- rep(NA,20) for (i in 2:20){ x[i] <- power.anova.test(groups=4, between.var=var(c(0,0,0,6)), within.var=4.7^2, sig.level=0.05, n=i)$power } x i n <- c(1:20) scatterplot(x~n, reg.line=FALSE, spread=FALSE, boxplots=FALSE) Injury <- read_csv("HW/headinjury.csv") View(Injury) 1-###Project sample(1:9, 9, replace=FALSE) power.anova.test(groups=9, between.var=var(c(0,0,0,0,0,0,0,0,12)),within.var=77, sig.level=0.05, power=.80) sqrt(77)
8a86c1abb8dd095e95ddadd2afdf222d9db2bede
6a4e4d968f5b8d0aefc4177a0d794a607e10f9f0
/man/pmle.norm.Rd
f664071523a69297c3f2502fedca8c45f9d3cc84
[]
no_license
cran/MixtureInf
07ebf123aadd2a2eeddcf370690434fd24a287ee
aa55d75f9e34568f00f5dc240f35e09a7b74fcca
refs/heads/master
2021-01-18T21:19:28.145574
2016-04-07T01:10:26
2016-04-07T01:10:26
17,680,979
0
0
null
null
null
null
UTF-8
R
false
false
2,464
rd
pmle.norm.Rd
\name{pmle.norm} \alias{pmle.norm} \title{ compute the PMLE or MLE of the parameters under a mixture of normals with unequal variance } \description{ Compute the PMLE or MLE of the parameters under a mixture of normals with unequal variance. } \usage{ pmle.norm(x, m0 = 1, lambda = 0, inival=NULL, len = 10, niter = 50, tol = 1e-06, rformat = FALSE) } \arguments{ \item{x}{ data, can be either a vector or a matrix with the 1st column being the observed data values and the 2nd column being the corresponding frequencies. } \item{m0}{ order of the finite mixture model, default value: m0 = 1. } \item{lambda}{ level of penalty for mixing proportions, default value: lambda = 0. } \item{inival}{ initial values for the EM-algorithm, a 3m0-dimension vector including m0 mixing proportions, m0 component means and m0 component variances, or a matrix with 3m0 columns, default value: inival = NULL. (if not provided, random initial values are used.) } \item{len}{ number of random initial values for the EM-algorithm, default value: len = 10. } \item{niter}{ number of iterations for all initial values in the EM-algorithm. The algorithm runs EM-iteration niter times from each initial value. The iteration will restart from the parameter value with the highest likelihood value at the point and run until convergence. default value: niter = 50. } \item{tol}{ tolerance level for the convergence of the EM-algorithm, default value: tol = 1e-6. } \item{rformat}{ form of the digital output: default of R package is used when rformat = T; If rformat = T, the digital output is rounded to the 3rd dicimal place if it is larger than 0.001, keeps 3 significant digits otherwise. The default value of rformat is F. } } \value{ Return the PMLE or MLE of the parameters with order = m0 (mixing proportions, mixing means and mixing variances), log-likelihood value at the PMLE or MLE and the penalized log-likelihood value at the PMLE. } \author{ Shaoting Li, Jiahua Chen and Pengfei Li } \seealso{ emtest.norm, plotmix.norm, rmix.norm } \examples{ #load the pearson's crab data, #fit the 2 and 3 component normal mixture models, #plot the histgorams of the observations and the fitted densities. data(pearson) out1 <- pmle.norm(pearson,2,1) plotmix.norm(pearson,out1) \dontrun{ out2 <- pmle.norm(pearson,3,1) plotmix.norm(pearson,out2) par(mfrow=c(1,1)) } } \keyword{models}
034620aeb1124d9d2d8b1d089209ef42dbc13a43
37b38b3858dd099c8b878d8e90f8b90d3d8c559d
/Taxi_Data.R
f1a96ce400df588d5524fd39babba803c3989908
[]
no_license
sanket1sinha/Data-Science-with-R
8b3fc66c7c5f037831719c27cb0190843107624b
bcabf6873511f9b2457e3b47b90930f9c4fb48ca
refs/heads/master
2021-01-24T01:01:54.816192
2018-02-25T02:11:18
2018-02-25T02:11:18
122,792,889
0
0
null
null
null
null
UTF-8
R
false
false
10,658
r
Taxi_Data.R
### ##Part a) #Loading the green_tripdata_2015-09 dataset into taxi_df library(data.table) taxi_df <- fread("https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2015-09.csv") ##Part b) # Reporting number of rows and columns in the dataframe taxi_df number_of_row=nrow(taxi_df) number_of_column=ncol(taxi_df) ### DATA CLEANING min(taxi_df$Trip_distance) # We can observe that the minimum value of trip distance is 0 which can not be possible, hence we will remove all those #values from our dataframe for which we have trip distance as 0. min(taxi_df$Fare_amount) # The minimum value of fare amount is -475, it shows that there are certain values for which the fare amount is negative. We will remove #all those values from our dataframe. min(taxi_df$Tip_amount) # The minimum value of fare amount is -50, it shows that there are certain values for which the tip amount is negative. We will remove #all those values from our dataframe. min(taxi_df$Total_amount) # The minimum value of total amount is also -475, it shows that there are certain values for which the total amount is negative. We will remove #all those values from our dataframe. #Now we will filter our dataframe based on these values which are mentioned above #nrow(taxi_df) #min(taxi_df$Tip_amount) taxi_df<-taxi_df[with(taxi_df, Trip_distance >0), ] taxi_df<-taxi_df[with(taxi_df, Fare_amount >=2.5), ] taxi_df<-taxi_df[with(taxi_df, Tip_amount >=0), ] nrow(taxi_df) ### ##Part a) #Plotting a Histogram using ggplot install.packages("ggplot2") library("ggplot2") #dev.off()---use this function in case ggplot does not work after loading ggplot(data=taxi_df, aes(taxi_df$Trip_distance))+ geom_histogram( binwidth =1,color = "red", fill = "blue") + xlim(0, 30)+ labs(title="Histogram for Trip distance", x="Distance", y="Count") ggplot(data=taxi_df, aes(taxi_df$Trip_distance))+ geom_density( color = "red", fill = "blue" ) + xlim(0, 30)+ labs(title="Density plot for Trip distance", x="Distance", y="Count") ##Part b) #The Histogram obtained for "Trip distance" is right skewed. It can be observed that #the mean is larger than the median. #Hypothesis:The trips are not normally distributed hence they cannot be random. It seems that maximum #number of trips happen at certain time i.e. during peak hours. ### ##Part a) # Fetching hours from pickup time and adding these hours into a new column "hours" of taxi_df. #NOTE: The 0th hour is corresponding to 12 AM pickup_time <- as.POSIXlt(taxi_df$lpep_pickup_datetime) taxi_df$hours<-pickup_time$hour #Making 2 new columns which have values of trip day and trip week according to month taxi_df$day_of_trip=pickup_time$mday #I am assuming that a week is from Sunday to Saturday taxi_df$week_of_month[taxi_df$day_of_trip >=1 & taxi_df$day_of_trip <= 5]<-1 taxi_df$week_of_month[taxi_df$day_of_trip >=6 & taxi_df$day_of_trip <= 12]<-2 taxi_df$week_of_month[taxi_df$day_of_trip >=13 & taxi_df$day_of_trip <= 19]<-3 taxi_df$week_of_month[taxi_df$day_of_trip >=20 & taxi_df$day_of_trip <= 26]<-4 taxi_df$week_of_month[taxi_df$day_of_trip >=27 & taxi_df$day_of_trip <= 30]<-5 #calculating and plotting mean and median by hours mean_trip_distance= aggregate(taxi_df$Trip_distance, list(hours=taxi_df$hours), mean) plot(mean_trip_distance,col = 'blue', pch = 19, cex = 0.5,main = "Mean distance based on hour", xlab = "Hours", ylab = "Mean",type='l') median_trip_distance= aggregate(taxi_df$Trip_distance, list(hours=taxi_df$hours), median) plot(median_trip_distance,col = 'red', pch = 19, cex = 0.5,main = "Median distance based on hour", xlab = "Hours", ylab = "Median",type='l') ##Part b) #creating a new column which has the value 'Yes' if the ratecodeid is 2(for JFK) or 3(for Newark) and 'No' for rest of the values. taxi_df$airport_pick_or_drop[taxi_df$RateCodeID==2 | taxi_df$RateCodeID==3]<-'Yes' taxi_df$airport_pick_or_drop[taxi_df$RateCodeID==1 | taxi_df$RateCodeID==4 | taxi_df$RateCodeID==5 | taxi_df$RateCodeID==6]<-'No' library(plyr) count(taxi_df$airport_pick_or_drop) # using count from library 'plyr' we can observe that a total of 4280 trips originated or ended at new york airports average_fair= aggregate(taxi_df$Fare_amount, list(taxi_df$airport_pick_or_drop),mean,drop=FALSE) #the average_fair from or to airports is 53.24755 average_tip_amount= aggregate(taxi_df$Tip_amount, list(taxi_df$airport_pick_or_drop),mean,drop=FALSE) #average tip for trips from and to airports is 5.395044 ### ##Part a) #Calculating tip percent and storing all those values in a new column 'taxi_percent' in taxi_df taxi_df$tip_percent=round(100*taxi_df$Tip_amount/taxi_df$Total_amount,2) ##Part b) # Building a model which predict the tip percentge based on the values of Total amount, Trip duration and Trip distance # Calculating the total duration of the trip in minutes pickup_time = as.POSIXct(taxi_df$lpep_pickup_datetime,format='%Y-%m-%d %H:%M:%S') drop_time= as.POSIXct(taxi_df$Lpep_dropoff_datetime,format='%Y-%m-%d %H:%M:%S') taxi_df$trip_duration= round((drop_time-pickup_time)/60,2) taxi_df$trip_duration= as.numeric(taxi_df$trip_duration) # Now as few of the values in trip duration were 0 We will remove these values from our dataset. taxi_df<-taxi_df[with(taxi_df, trip_duration >0), ] #Creating a traning and testing data set for model prediction #Training data set having 1000000 number of rows train_data=taxi_df[1:1000000] # Testing data set having rest of the rows from taxi_df test_data=taxi_df[1000001:1468366] #creating a variable test count which contains value of tip percent from testing dataset test_counts= test_data$tip_percent #Using lm() for prediction prediction_model = lm(train_data$tip_percent ~ train_data$Total_amount + train_data$trip_duration + train_data$Trip_distance , data = train_data) summary(prediction_model) # Looking at the values obatined from Summary function we can obsereve how the value of tip percent changes when we use total amount, #trip duration and trip distance as predictor variables. plot(train_data$tip_percent ~ train_data$Total_amount + train_data$trip_duration + train_data$Trip_distance ) #Predicting the model p = predict(prediction_model, test_data) #Plotting the predicted model plot(p - test_counts) #Also plotting a histogram to showcase the tip percentage ggplot(data=taxi_df, aes(taxi_df$tip_percent)) + geom_histogram(bins=50)+labs( x = "Tip in Percentage", y = "count") ### ##Part a) #calculating the average speed in miles per hour and adding that value in our dataframe taxi_df$average_speed = round(taxi_df$Trip_distance/(taxi_df$trip_duration/60),2) #Now as the maximum speed should be below or equal to 25mph in NYC "http://nymag.com/daily/intelligencer/2014/11/things-to-know-about-nycs-new-speed-limit.html". #We will clean the data based on the average speed. taxi_df<-taxi_df[with(taxi_df, average_speed <= 25), ] ##Part b) #HYPOTHESIS TESING #Finding the averrage speed according to the week of month average_speed_weekwise= aggregate(taxi_df$average_speed, list(week=taxi_df$week_of_month),mean) #Hypothesis Testing using Paired student t-test. We are using this method as we are concerned with average speeds for each week. #Let's define null and alternate hypothesis #Null Hypothesis(H0): Average speed for each week is same and true difference in mean is zero for both samples. #Alternate Hypothesis(H1): Average speeds are different for different week and true difference is not zero for the samples. #making samples of 1st 100 elements from each week week1_sample=taxi_df$average_speed[taxi_df$week_of_month==1][1:100] week2_sample=taxi_df$average_speed[taxi_df$week_of_month==2][1:100] week3_sample=taxi_df$average_speed[taxi_df$week_of_month==3][1:100] week4_sample=taxi_df$average_speed[taxi_df$week_of_month==4][1:100] week5_sample=taxi_df$average_speed[taxi_df$week_of_month==5][1:100] #performing Hypothesis testing for week1 and week2 sample t.test(week1_sample,week2_sample, paired=TRUE) #performing Hypothesis testing for week1 and week3 sample t.test(week1_sample,week3_sample, paired=TRUE) #performing Hypothesis testing for week1 and week4 sample t.test(week1_sample,week4_sample, paired=TRUE) #performing Hypothesis testing for week1 and week5 sample t.test(week1_sample,week5_sample, paired=TRUE) #performing Hypothesis testing for week2 and week3 sample t.test(week2_sample,week3_sample, paired=TRUE) #performing Hypothesis testing for week2 and week4 sample t.test(week2_sample,week4_sample, paired=TRUE) #performing Hypothesis testing for week2 and week5 sample t.test(week2_sample,week5_sample, paired=TRUE) #performing Hypothesis testing for week3 and week4 sample t.test(week3_sample,week4_sample, paired=TRUE) #performing Hypothesis testing for week3 and week5 sample t.test(week3_sample,week5_sample, paired=TRUE) #performing Hypothesis testing for week4 and week5 sample t.test(week4_sample,week5_sample, paired=TRUE) #After performing all the above mentioned tests it is evident that p-value for all the test is greater than 0.05 and hence # we can REJECT NULL HYPOTHESIS in favor of Alternative Hypothesis. #Conclusion: The average speeds of NYC green taxi is different for each week. # Plotting a boxplot for the average speed based on each week. boxplot(taxi_df$average_speed~taxi_df$week_of_month,data=taxi_df, col=(c("gold","darkgreen")),main="Boxplot for average speed on week", xlab="Week", ylab="Speed in miles per hour") #part c) #Plotting a boxplot for the average speed based on each hour of day boxplot(taxi_df$average_speed~taxi_df$hours,data=taxi_df, col=(c("gold","darkgreen")),main="Boxplot for average speed on hours", xlab="Hours per day", ylab="Speed in miles per hour") #HYPOTHESIS TESTING #Hypothesis Testing using One-way Analysis of Variance (ANOVA). We are using this method as we are concerned with average speeds for each hour. #Let's define null and alternate hypothesis #Null Hypothesis(H0): Average speed for each hour is same and true difference in mean is zero. #Alternate Hypothesis(H1): Average speeds are different for different hour and true difference is not zero for the samples. model_on_hours <- lm(taxi_df$average_speed~taxi_df$hours,data=taxi_df) summary(model_on_hours) anova(model_on_hours) #Using the ANOVA test it is clear that the speeds are different for each hour. Hence we will reject Null Hypothesis in favor of #Alertnate Hypothesis. #Using the boxplot we can also infer that the speed is maximum at 5 AM and gradually decreases from there. The speed starts increasing slowly #after 6 PM. ######## THE END
6a3689bb4908b5ea55e84d6c5d497cd06ff462bb
48aa070e16208befa4c2181ac48ad0fbd9e4fa4b
/merging_NRE_data.R
0ec71df4cf064a4049581648d9110dfca0e0c94b
[]
no_license
saahithi1/Research
81ce56cdd437987310d1054284037c419a337f2d
e0d88fd82a39bf9bb006a983c0137cd05cf4b65b
refs/heads/master
2021-07-09T17:30:44.743986
2020-06-22T20:56:09
2020-06-22T20:56:09
143,492,725
0
0
null
null
null
null
UTF-8
R
false
false
552
r
merging_NRE_data.R
setwd("/Users/saahithi/Desktop/Research/merging") # Reading in data coding <- data.frame(read.csv("AnneChrisMerged.csv", header = TRUE)) NRE <- data.frame(read.csv("Latest_NRE_data_feb_2018_w_group.csv", header = TRUE)) # Looking at column names names(coding) names(NRE) # Renaming column B in coding to match column Z in NRE coding$DescribePerjuryorFalseAccusation <- coding$CDescribePerjuryorFalseAccusation # Merge by column total <- merge(NRE, coding, by="DescribePerjuryorFalseAccusation") # Download .csv write.csv(total, file="merged.csv")
0c1a3bcf2f003f9b17bbb98b1edb60d47e5fa894
d1be17ffee0a234f18792f993e92b923d1ba6ba1
/script.contours_for_google_earth.R
57d2cbe5c66abd785a9cf253c66dcde4d9bf4604
[]
no_license
AndresEGonzalez/GIS_TOPO_KML
e86263ecf9ec32d00999980d25b21cb70efd0813
69bd3946ce5f706e1b4f2014fe869b27195cecf9
refs/heads/master
2021-01-20T18:54:41.068991
2016-08-02T19:18:49
2016-08-02T19:18:49
63,365,997
0
0
null
null
null
null
UTF-8
R
false
false
4,379
r
script.contours_for_google_earth.R
# SCRIPT CREATE GEARTH KML FILES WITH CONTOURS LINES (.tif RASTER) # USE AFTER THE -> script.GIS.hgt.to.tif.R rm(list=ls()) # sudo apt-get install libgdal1-dev libproj-dev # http://stackoverflow.com/questions/30424608/r-error-in-fetchkey-lazy-load-database # http://stackoverflow.com/questions/15248815/rgdal-package-installation?lq=1 # http://stackoverflow.com/questions/30320583/unable-to-install-rgdal-in-ubuntu-14-04-undefined-to-reference-to-pj-ctx-fclos # http://stackoverflow.com/questions/7852679/trouble-loading-rgdal-in-rstudio?rq=1 install.packages("rgdal", dependencies=T) install.packages("raster", dependencies=T) install.packages("maptools", dependencies=T) install.packages("rgeos", dependencies=T) install.packages("RCurl", dependencies=T) install.packages("magrittr") # remove.packages("rgdal")- # install.packages("gdalUtils", dependencies=T) library(rgdal) library(raster) library(maptools) library(rgeos) library(RCurl)#loading required package: bitops library(plyr) # dem in hgt-format downloaded from http://www.viewfinderpanoramas.org/dem3.html#alps # as done here: # http://thebiobucket.blogspot.co.at/2013/06/use-r-to-bulk-download-digital.html # splitted into tiles for easier handling and saved to tiff as done here: # http://thebiobucket.blogspot.co.at/2014/03/use-gdal-from-r-console-to-split-raster.html setwd("~/Documents/1_WORKING/DATA/GIS_Database/DEM_2/SELECT_RASTER") (filenames <- gsub(".tif", "", dir(pattern = ".tif"))) ## funtion make_kml_contours ## arguments ## step: altitude in between contours, starting at 0 m ## simplify: 1-0, 1 is no generalization, 0 is straight line ## ftp: optional ftp uload make_kml_contours <- function(filename, step = 25, simplify = 0.001, ftp = F) { ## coerce into SpatialGridDataFrame dem <- readGDAL(paste0("~/Documents/1_WORKING/DATA/GIS_Database/DEM_2/SELECT_RASTER/", filename, ".tif")) dem$band1[is.na(dem$band1)]<-0 #AGL ## make image object for contourLines function im <- as.image.SpatialGridDataFrame(dem) # check: summary(im$z) cl <- contourLines(im, levels = seq(0, max(im$z), step))#cero ## back convert to SpatialLinesDataFrame SLDF <- ContourLines2SLDF(cl) proj4string(SLDF) <- CRS("+proj=longlat +datum=WGS84") ## simplify simplSLDF <- gSimplify(SLDF, tol = simplify) ## view results # image(dem, col = gray.colors(20)) # plot(simplSLDF, add = T) ## convert simplified SLDF to KML (btw, that's how to extract IDs unlist(lapply(slot(simplSLDF, 'lines'), function(x) slot(x, 'ID'))) ) out <- sapply(slot(simplSLDF, "lines"), function(x) { # get meter level, by picking from sequence by ID: ID = 1 -> 1*step m, ID = 2, 2*step m, etc. # m <- seq(min(im$z), max(im$z), step)[as.numeric(gsub("C_", "", slot(x, "ID")))]#m <- seq(0, max(im$z), step)[as.numeric(gsub("C_", "", slot(x, "ID")))] # Round minimal isoline to 100' (or 194 -> 200) m <- seq(round_any(min(im$z), 100, f = ceiling), max(im$z), step)[as.numeric(gsub("C_", "", slot(x, "ID")))] # make thicker lines at 100 and 500 m Isolines, and color white kmlLine(x, name = m, description = paste0(m, "m-Isoline"), col = "#FFFefb45", lwd = ifelse(m%%100 == 0, ifelse(m%%500, 3, 1.25), 0.45)) }) # write KML tf <- tempfile() kmlFile <- file(tf, "w") cat(kmlLine(kmlname = filename, kmldescription = "<i>Contour lines by A. González based on script K. Cechini, see <a href=\"htp://gimoya.bplaced.net/terrain-overlays.blogspot.co.at\">Terrain-Overlays</a> for details</i>")$header, file = kmlFile, sep = "\n")# kmlname = "Contour Lines"-> filename cat(unlist(out["style", ]), file = kmlFile, sep = "\n") cat(unlist(out["content", ]), file = kmlFile, sep = "\n") cat(kmlLine()$footer, file = kmlFile, sep = "\n") close(kmlFile) kmlName <- paste0("TOPO_", filename, ".kml") file.copy(tf, kmlName, overwrite = FALSE)#file.copy -> file.rename error: https://github.com/wch/vtest/issues/14 if (ftp == T) ftpUpload(kmlName, paste0('ftp://gimoya:password@gimoya.bplaced.net/Terrain-Overlays/downloads/', kmlName)) } for (filename in filenames[1:length(filenames)]) { tryCatch(make_kml_contours(filename, step = 25, simplify = 0.000001, ftp = F),#0.000001 error = function(e) message(paste0("\n..something happend with dataset ", filename, ":\n", e))) cat("File ", filename, " done!..\n") }
d88716da03fd4ff4ce1af48c5bceb2eed901e4df
f2478ed514a0559e433fb8f20c20fd2ea631c8f9
/man/DataSource.Rd
28bbae8c5434c5f275489577f506574375f83ae7
[ "CC0-1.0" ]
permissive
qdread/useeior
44555c6fc941a5750ce46a36243a22a4820d36ce
53494293c34ca53a2bb9e92c418476754003bf80
refs/heads/master
2020-09-15T13:39:34.957252
2019-11-22T23:20:04
2019-11-22T23:20:04
223,462,878
0
0
CC0-1.0
2019-11-22T18:32:36
2019-11-22T18:32:35
null
UTF-8
R
false
true
531
rd
DataSource.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ConfigDocumentation.R \docType{data} \name{DataSource} \alias{DataSource} \title{DataSource} \format{A list of data source attributes \describe{ \item{Title}{string, Name of source}, \item{DataYear}{numeric, year of data}, \item{Author}{string, name or orgs or individuals} \item{URL}{string, URL data retrieved from} \item{Primary}{boolean, TRUE if this is the primary source} }} \usage{ DataSource } \description{ DataSource } \keyword{datasets}
4eb17a76dea046bd4a2f22f34bed1930b070f78e
2bdff965877caa16d587e27879e899abf48f174e
/gp.combat.R
1956ef3628462b4dd8e368db72a7eb9d3b4d9e06
[]
no_license
genepattern/ComBat
0c56f358384f377b3c3832dcd7372439587e7c2e
f4ce91f00bbfc79c755069182fa84e6896d2bdf2
refs/heads/master
2023-04-14T15:37:48.305636
2021-02-27T00:54:20
2021-02-27T00:54:20
52,296,184
0
0
null
2021-02-27T00:54:20
2016-02-22T18:40:14
R
UTF-8
R
false
false
7,765
r
gp.combat.R
setup <- function(libdir) { source(paste(libdir, "ComBat.R", sep='')) source(paste(libdir, "common.R", sep='')) #setLibPath(libdir) #install.required.packages(libdir) } parseCmdLine <- function(...) { suppressMessages(.parseCmdLine(...)) } .parseCmdLine <- function(...) { args <- list(...) input.file.name <- '' sample.info.file.name <- '' covariates <- '' filter <- '' prior.plots <- '' par.prior <- '' output.file.name <- '' libdir <- '' for(i in 1:length(args)) { flag <- substring(args[[i]], 0, 2) value <- substring(args[[i]], 3, nchar(args[[i]])) if(flag=='-i') { input.file.name <- value } else if(flag=='-s') { sample.info.file.name <- value } else if(flag == '-c') { if(tolower(value) == "all" || tolower(value) == "none") { covariates <- tolower(value) } else { value <- strsplit(value, ",") value <- suppressWarnings(lapply(value, as.integer)) value <- lapply(value, unlist) if(regexpr(TRUE, lapply(value, is.na)) != -1) { stop("Covariate columns can only be set to all, none, or a list of one or more column indices.") } covariates <- value } } else if(flag=='-f') { filter <- as.numeric(value) } else if(flag=='-p') { prior.plots <- as.logical(value) } else if(flag=='-m') { par.prior <- as.logical(value) } else if(flag=='-o') { output.file.name <- value } else if(flag=='-l') { libdir <- value } else { stop(paste("unknown flag ", flag, " value ", value, sep=""), .call=FALSE) } } gp.combat.R(input.file.name = input.file.name, sample.info.file.name =sample.info.file.name, libdir = libdir, output.file.name = output.file.name, prior.plots = prior.plots , par.prior = par.prior, filter = filter, covariates = covariates) } gp.combat.R <- function(input.file.name, sample.info.file.name, libdir, output.file.name, prior.plots, par.prior, filter, covariates) { setup(libdir) dataset <- read.dataset(input.file.name) data.matrix <- dataset$data sample.info <- read.table(sample.info.file.name, header=T, sep='\t',comment.char='') if(length(colnames(sample.info)) < 3) { stop("The sample info file must have the 3 columns: Array, Sample, Batch.") } if(colnames(sample.info)[1] != "Array") { stop("Error: 'Array' column must be the first column in the sample info file.") } if(colnames(sample.info)[2] != "Sample") { stop("Error: 'Sample' column must be the second column in the sample info file.") } if(colnames(sample.info)[3] != "Batch") { stop("Error: 'Batch' column must be the third column in the sample info file.") } if(is.null(covariates) || covariates == '' || tolower(covariates) == "all") { covariates <- 'all' } else if(tolower(covariates) == "none") { covariates <- c(3) } else if(min(unlist(covariates)) <= 3 || max(unlist(covariates)) > length(colnames(sample.info))) { stop("Invalid covariates column parameter setting. Covariates column(s) must be less than or equal to the number of columns in the sample info file and greater than 3.") } else covariates <- c(3, covariates, recursive = TRUE) if(!is.null(dataset$calls)) { if(is.null(filter) || filter == '') filter <- 1 else if (filter <= 0 || filter >= 1) stop("Absent calls filter must be greater than 0 and less than 1") } else { filter <- F } gct.ext <- regexpr(paste(".gct","$",sep=""), tolower(output.file.name)) res.ext <- regexpr(paste(".res","$",sep=""), tolower(output.file.name)) if(gct.ext[[1]] != -1 || res.ext[[1]] != -1) { output.file.name <- substr(output.file.name, 0, (nchar(output.file.name)-4)) } combat.input.file.name <- paste(output.file.name, ".temp.txt", sep='') on.exit(unlink(combat.input.file.name)) column.names <- c("ProbeID", colnames(data.matrix)) input.table <- data.matrix #check if it is a res file if(!is.null(dataset$calls)) { column.names <- NULL input.table <- NULL for(i in 1:ncol(data.matrix)) { col.name <- colnames(data.matrix)[i] calls.column.name <- paste(col.name, "call", sep=' ') column.names <- c(column.names, col.name, calls.column.name) input.table <- cbind(input.table, data.matrix[,i]) input.table <- cbind(input.table, as.character(dataset$calls[,i])) } row.names(input.table) <- row.names(data.matrix) column.names <- c("ProbeID", column.names) } cat(column.names, file=combat.input.file.name, sep="\t") cat("\n", file = combat.input.file.name, append=TRUE) write.table(input.table, file=combat.input.file.name, sep="\t", col.names = FALSE, quote=FALSE, append=TRUE) if(prior.plots && par.prior) { if (.Platform$OS.type == "windows") { windows(width = 17, height = 14) } else if (capabilities("jpeg")) { jpeg(filename = paste(output.file.name, ".plot.jpeg", sep=''), width = 800, height = 720) } else { library(Cairo) CairoPNG(filename = paste(output.file.name, ".plot.png", sep=''), width = 800, height = 720, pointsize = 12, quality = 75, bg = "white") } } combat.result <- ComBat(combat.input.file.name, sample.info.file.name, filter = filter, skip = 1, write = F, prior.plots = prior.plots, par.prior = par.prior, covariates = covariates) if(is.character(combat.result)) stop(paste("An error occurred which running ComBat. ", combat.result, sep='')) if(!is.list(combat.result)) unlink(paste(output.file.name, "*", sep='')) if(prior.plots && par.prior) { if (.Platform$OS.type == "windows") { savePlot(filename = paste(output.file.name, ".plot", sep=''), type ="jpeg", device = dev.cur()) } dev.off(); } gene.info <- combat.result[,1] combat.result <- subset(combat.result, select = colnames(data.matrix)) row.names(combat.result) <- gene.info dataset$data <- combat.result if(!is.null(dataset$row.descriptions)) { descriptions.row <- rbind(row.names(data.matrix), dataset$row.descriptions) colnames(descriptions.row) <- row.names(data.matrix) descriptions.row <- subset(descriptions.row, select = row.names(combat.result)) dataset$row.descriptions <- descriptions.row[2,] } if(!is.null(dataset$column.descriptions)) { dataset$column.descriptions <- dataset$column.descriptions[ dataset$column.descriptions!=""] descriptions.column <- rbind(colnames(data.matrix), dataset$column.descriptions) column.names <- colnames(combat.result) colnames(descriptions.column) <- descriptions.column[1,] descriptions.column <- subset(descriptions.column, select=column.names) descriptions.column <- descriptions.column[2,] dataset$column.descriptions <- descriptions.column } if(!is.null(dataset$calls)) { rownames <- row.names(dataset$calls) calls <- t(dataset$calls) colnames(calls) <- rownames calls <- subset(calls, select = row.names(combat.result)) calls <- t(calls) row.names(calls) <- gene.info colnames(calls) <- colnames(dataset$calls) dataset$calls <- calls write.res(dataset, output.file.name) } else { write.gct(dataset, output.file.name) } }
7c4d586b46fb2b32a267459562075d22860187f6
f19dc4ea6af08834bff90a22b2786be169c07db1
/R/ChibLL.R
0082551ae93f42acb1a83bc7698209eca51784f0
[]
no_license
hoanguc3m/fatBVARS
71df1bff45b963f06258d2efa4e92036a6e7209b
472ea0e344975bb4538182e3e5302708c9fd1a7c
refs/heads/master
2022-12-21T13:42:35.405562
2022-12-12T18:00:38
2022-12-12T18:00:38
226,090,978
6
2
null
null
null
null
UTF-8
R
false
false
44,099
r
ChibLL.R
#' Marginal log likelihood of BVAR model #' #' This function returns a marginal log likelihood density of BVAR-SV-fatTail model. #' @param Chain The fatBVARSV object from command BVAR. #' @return The marginal log likelihood density of of BVAR model #' @export #' @examples #' \dontrun{ #' marginal_dens1 <- marginalLL(Chain1) #' } #' #' @export ChibLLP <- function(Chain, inits, ndraws = NULL, numCores = NULL){ RhpcBLASctl::blas_set_num_threads(1) K <- Chain$K p <- Chain$p m <- K + K*K*p dist <- Chain$dist prior <- Chain$prior SV <- Chain$prior$SV y <- Chain$y yt <- t(y) t_max <- nrow(y) if (is.null(Chain$y0)){ y0 <- matrix(0, ncol = K, nrow = p) } else { y0 <- Chain$y0 } xt <- makeRegressor(y, y0, t_max, K, p) # y_combine <- rbind( tail(y0, p) , y) mcmc <- Chain$mcmc if (is.null(ndraws)) ndraws <- nrow(mcmc) B_mat <- get_post(mcmc, element = "B") A_mat <- get_post(mcmc, element = "a") Gamma_mat <- get_post(mcmc, element = "gamma") # B_gen <- apply(B_mat, MARGIN = 2, FUN = mean) # A_gen <- apply(A_mat, MARGIN = 2, FUN = mean) # Gamma_gen <- apply(Gamma_mat, MARGIN = 2, FUN = mean) B_gen <- as.numeric(inits$B0) A_gen <- as.numeric(inits$A0[lower.tri(inits$A0)]) Gamma_gen <- inits$gamma Sigma_mat <- get_post(mcmc, element = "sigma") # No SV is sigma / SV is sigma^2 # Sigma_gen <- apply(Sigma_mat, MARGIN = 2, FUN = mean) if (SV) { Sigma_gen <- diag(inits$sigma_h) } else { Sigma_gen <- inits$sigma } H_mat <- get_post(mcmc, element = "h") H0_mat <- get_post(mcmc, element = "lh0") Nu_mat <- as.matrix(get_post(mcmc, element = "nu")) W_mat <- get_post(mcmc, element = "w") # H_gen <- apply(H_mat, MARGIN = 2, FUN = mean) # H0_gen <- apply(H0_mat, MARGIN = 2, FUN = mean) # Nu_gen <- apply(Nu_mat, MARGIN = 2, FUN = mean) H_gen <- as.numeric(inits$h) Nu_gen <- inits$nu sum_log <- rep(0, ndraws) lprior <- 0 if (is.null(numCores)) numCores <- parallel::detectCores()*0.5 if (SV){ h_mean <- H_gen #################### End prior ########################### if (dist == "Gaussian") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen ytilde <- as.numeric(A %*% (yt - B %*% xt)) # from dim K * t_max to (Kt_max) * 1 Chibint_h_Gaussian(ytilde = ytilde, h0 = h0, sigma_h = sigma_h, t_max = t_max, K = K) }, mc.cores = numCores) } else { if (dist == "Student") { w_mean <- apply(W_mat, MARGIN = 2, mean) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T) + log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_StudentSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "Skew.Student") { w_mean <- apply(W_mat, MARGIN = 2, mean) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T) + log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_SkewSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, gamma = gamma, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 1000) llw }, mc.cores = numCores) } if (dist == "MT") { w_mean <- matrix(apply(W_mat, MARGIN = 2, mean), nrow = K) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_MSTSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "MST") { w_mean <- matrix(apply(W_mat, MARGIN = 2, mean), nrow = K) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_MSTSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, gamma = gamma, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "OT") { w_mean <- matrix(apply(W_mat, MARGIN = 2, mean), nrow = K) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_OTSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "OST") { w_mean <- matrix(apply(W_mat, MARGIN = 2, mean), nrow = K) lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(dgamma(Sigma_gen, shape = 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + # sum(dnorm(H0_gen, mean = log(prior$sigma), sd = sqrt(4), log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma_h <- Sigma_gen h0 <- 2 * log(prior$sigma) # h0 <- H0_gen nu <- Nu_gen llw <- Chibint_h_OSTSV(yt = yt, xt = xt, B = B, A = A, h0 = h0, sigma_h = sigma_h, nu = nu, gamma = gamma, h_mean = h_mean, w_mean = w_mean, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } } # end if student } else { if (dist == "Gaussian") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma <- Sigma_gen sum(mvnfast::dmvn(X = (y - t(xt) %*% t(B)), mu = rep(0,K), sigma = t(solve(A) %*% diag(sigma, nrow = K)), log = T, isChol = T)) }, mc.cores = numCores) } else { # Fat tail if (dist == "Student") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T) + log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_TnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, t_max = t_max, K = K) llw }, mc.cores = numCores) } if (dist == "Skew.Student") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_STnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, gamma = gamma, t_max = t_max, K = K) llw }, mc.cores = numCores) } if (dist == "MT") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_MSTnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "MST") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_MSTnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, gamma = gamma, t_max = t_max, K = K, R = 100) llw }, mc.cores = numCores) } if (dist == "OT") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) # Truncated sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_OSTnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, t_max = t_max, K = K) llw }, mc.cores = numCores) } if (dist == "OST") { lprior <- mvnfast::dmvn(X = B_gen, mu = prior$b_prior, sigma = prior$V_b_prior, log = T) + mvnfast::dmvn(X = A_gen, mu = prior$a_prior, sigma = prior$V_a_prior, log = T) + sum(invgamma::dinvgamma(Sigma_gen^2, shape = prior$sigma_T0 * 0.5, rate = 0.5 * prior$sigma_S0, log = T)) + sum(dgamma(Nu_gen, shape = prior$nu_gam_a, rate = prior$nu_gam_b, log = T)) + K * log( 1/(pgamma(100, shape = 2, rate = 0.1) - pgamma(4, shape = 2, rate = 0.1) ) ) + # Truncated mvnfast::dmvn(X = Gamma_gen, mu = prior$gamma_prior, sigma = prior$V_gamma_prior, log = T) sum_log <- parallel::mclapply(1:ndraws, FUN = function(j) { B <- matrix(B_gen, nrow = K) A <- a0toA(A_gen, K) gamma <- Gamma_gen sigma <- Sigma_gen nu <- Nu_gen llw <- int_w_OSTnonSV(y = y, xt = xt, A = A, B = B, sigma = sigma, nu = nu, gamma = gamma, t_max = t_max, K = K) llw }, mc.cores = numCores) } } # end Student } # end SV sum_log <- unlist(sum_log) short_sumlog = matrix(sum_log, nrow = ndraws/20, ncol = 20) max_sumlog = apply(short_sumlog, MARGIN = 2 , FUN = max) bigml = log( apply(exp(short_sumlog-reprow(max_sumlog,ndraws/20)), MARGIN = 2, FUN = mean )) + max_sumlog lc = mean(bigml) # log conditional mlstd = sd(bigml)/sqrt(20) return( list( lprior = as.numeric(lprior), LL = as.numeric(lc + lprior), lc = lc, std = mlstd, sum_log = sum_log)) } #' @export Chibint_h_Gaussian <- function(ytilde, h0, sigma_h, t_max, K, R = 100){ s2 = ytilde^2 max_loop = 100 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) e_h = 1 ht = log(s2+0.001) count = 0 while ( e_h> .01 & count < max_loop){ einvhts2 = exp(-ht)*s2 gh = - HinvSH_h %*% (ht-alph) - 0.5 * (1-einvhts2) Gh = - HinvSH_h -.5*sparseMatrix(i = 1:(t_max*K),j = 1:(t_max*K), x = einvhts2) newht = ht - Matrix::solve(Gh,gh) e_h = max(abs(newht-ht)); ht = newht; count = count + 1; } if (count == max_loop){ ht = rep(h0,t_max) einvhts2 = exp(-ht)*s2 Gh = - HinvSH_h -.5*sparseMatrix(i = 1:(t_max*K),j = 1:(t_max*K), x = einvhts2) } Kh = -Gh CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llike = rep(0,R) for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) llike = -t_max*K*0.5*log(2*pi) - 0.5*sum(hc) - 0.5 * sum(ytilde^2 * exp(-hc)) store_llike[i] = as.numeric(llike + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llike) llk = log(mean(exp(store_llike-maxllike))) + maxllike return(llk) } #' @export Chibint_h_StudentSV <- function(yt, xt, B, A, h0, sigma_h, nu, gamma = rep(0,K), h_mean, w_mean, t_max, K, R = 100){ y_tilde = A %*% ( (yt - B %*% xt) ) # Student dist with Identity correlation matrix s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) # e_h = 1 ht = log(s2+0.001) # count = 0 errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- as.numeric(reprow(colSums(matrix(s2 * exp(-ht), nrow = K)), K)) # Multivariate student rowSum(y^2 exp(-h)) Eilam = (nu+K)/(nu + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu+K)/(2*nu) * (s2*exp(ht)) / ((exp(ht) + s2/nu )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llike = rep(0,R) c_LL <- lgamma(0.5*(nu+K)) - lgamma(0.5*nu) - 0.5*K*log(nu) - 0.5 * K * log(pi) for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) # allw <- sum( unlist(lapply(1:t_max, FUN = function(t) { # mvnfast::dmvt(X = y_tilde[,t], # mu = rep(0,K), # sigma = diag(exp(hnew[,t]/2), nrow = K), df = nu, log = T, isChol = T) # })) ) allw <- c_LL * t_max + sum(- 0.5 * colSums(hnew) - 0.5*(nu+K) * log(1 + colSums(y_tilde^2 * exp(-hnew)) / nu )) store_llike[i] = as.numeric(allw + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llike) llk = log(mean(exp(store_llike-maxllike))) + maxllike return(llk) } #' @export Chibint_h_SkewSV <- function(yt, xt, B, A, h0, sigma_h, nu, gamma = rep(0,K), h_mean, w_mean, t_max, K, R = 100){ u <- yt - B %*% xt + nu/(nu-2) * gamma y_tilde = A %*% (( u - reprow(w_mean,K)*gamma ) ) # Approximation multivariate Student dist s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) # e_h = 1 ht = log(s2+0.001) # count = 0 errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- as.numeric(reprow(colSums(matrix(s2 * exp(-ht), nrow = K)), K)) # Multivariate student rowSum(y^2 exp(-h)) Eilam = (nu+K)/(nu + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu+K)/(2*nu) * (s2*exp(ht)) / ((exp(ht) + s2/nu )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llike = rep(0,R) #y_til = A %*% ( (yt - B %*% xt) ) # is Skew Student distribution for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) # if (K == 1){ # allw <-sum( unlist(lapply(1:t_max, FUN = function(t) { # ghyp::dghyp(u[,t], object = ghyp::ghyp(lambda = -0.5*nu, chi = nu, psi = 0, mu = rep(0,K), # sigma = diag(exp(hnew[,t]/2), nrow = K), # gamma = gamma), logvalue = T) # use sigma for univariable # # # })) ) # # } else { # # allw <-sum( unlist(lapply(1:t_max, FUN = function(t) { # ghyp::dghyp(u[,t], object = ghyp::ghyp(lambda = -0.5*nu, chi = nu, psi = 0, mu = rep(0,K), # sigma = solve(A) %*% diag(exp(hnew[,t]), nrow = K) %*% t(solve(A)), # gamma = gamma), logvalue = T) # use Sigma = AA' for multivariable # })) ) # } lambda = -0.5*nu; chi = nu; psi = 0; inv.vola <- exp(-0.5*hnew) xtilde <- A %*% u * inv.vola gtilde <- as.vector(A %*% gamma) * inv.vola Q.vec <- colSums(xtilde^2) skewnorm.vec <- colSums(gtilde^2) skewness.scaled.vec <- colSums(xtilde*gtilde) interm.vec <- sqrt((chi + Q.vec) * skewnorm.vec) lambda.min.d.2 <- lambda - K/2 log.const.top <- -lambda * log(chi) - lambda.min.d.2 * log(skewnorm.vec) log.const.bottom <- K/2 * log(2 * pi) + 0.5 * colSums(hnew) + lgamma(-lambda) - (lambda + 1) * log(2) log.top <- log(besselK(interm.vec, lambda.min.d.2, expon.scaled = TRUE)) - interm.vec + skewness.scaled.vec log.bottom <- - lambda.min.d.2 * log(interm.vec) allw <- sum(log.const.top + log.top - log.const.bottom - log.bottom) # # mvnfast::dmvt(X = u[,t], # mu = rep(0,K), # sigma = diag(exp(hnew[,t]/2), nrow = K), df = nu, log = T, isChol = T) # dt(x = u[,t]/diag(exp(hnew[,t]/2), nrow = K), df = nu, log = T) - hnew[,t]/2 store_llike[i] = as.numeric(allw + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llike) llk = log(mean(exp(store_llike-maxllike))) + maxllike return(llk) } #' @export Chibint_h_MTSV <- function(yt, xt, B, A, h0, sigma_h, nu, h_mean, w_mean, t_max, K, R = 100){ u <- (yt - B %*% xt) y_tilde <- (A %*% u) # Mimic univariate Student s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) ht = log(s2+0.001) nu_vec <- rep(nu, t_max) errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- s2 * exp(-ht) # Univariate student (y^2 exp(-h)) Eilam = (nu_vec+1)/(nu_vec + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu_vec+1)/(2*nu_vec) * (s2*exp(ht)) / ((exp(ht) + s2/nu_vec )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llw = rep(0,R) for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) allw <- int_w_MTnonSV(y = t(yt), xt = xt, A = A, B = B, sigma = exp(hnew/2), nu = nu, t_max = t_max, K = K, R = 100) store_llw[i] = as.numeric(allw + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llw) llk = log(mean(exp(store_llw-maxllike))) + maxllike return(llk) } #' @export Chibint_h_MSTSV <- function(yt, xt, B, A, h0, sigma_h, nu, gamma = rep(0,K), h_mean, w_mean, t_max, K, R = 100){ u <- (yt - B %*% xt - w_mean * gamma + nu/(nu-2)*gamma) y_tilde <- (A %*% u) # Mimic Univariate Student s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) ht = log(s2+0.001) nu_vec <- rep(nu, t_max) errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- s2 * exp(-ht) # Univariate student (y^2 exp(-h)) Eilam = (nu_vec+1)/(nu_vec + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu_vec+1)/(2*nu_vec) * (s2*exp(ht)) / ((exp(ht) + s2/nu_vec )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llw = rep(0,R) for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) allw <- int_w_MSTnonSV(y = t(yt), xt = xt, A = A, B = B, sigma = exp(hnew/2), nu = nu, gamma = gamma, t_max = t_max, K = K, R = 100) store_llw[i] = as.numeric(allw + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llw) llk = log(mean(exp(store_llw-maxllike))) + maxllike return(llk) } #' @export Chibint_h_OTSV <- function(yt, xt, B, A, h0, sigma_h, nu, h_mean, w_mean, t_max, K, R = 100){ u <- (yt - B %*% xt) y_tilde <- (A %*% u) # Univarite Student s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) # e_h = 1 ht = log(s2+0.001) # count = 0 nu_vec <- rep(nu, t_max) errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- s2 * exp(-ht) # Univariate student (y^2 exp(-h)) Eilam = (nu_vec+1)/(nu_vec + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu_vec+1)/(2*nu_vec) * (s2*exp(ht)) / ((exp(ht) + s2/nu_vec )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llike = rep(0,R) y_tilde = A %*% ( (yt - B %*% xt) ) # is ortho-Student distribution c_LL <- lgamma(0.5*(nu_vec+1)) - lgamma(0.5*nu_vec) - 0.5 * log(nu_vec) - 0.5 * log(pi) for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) # allw <- rep(0,K) # for (k in c(1:K)){ # allw[k] <- sum(dt(y_tilde[k,]/exp(hnew[k,]/2), df = nu[k], log = T)) - 0.5 * sum(hnew[k,]) # } store_llike[i] = as.numeric( sum(c_LL - 0.5 * (nu_vec+1) * log(1 + y_tilde^2*exp(-hnew) / nu_vec ) - 0.5 * hnew ) + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llike) llk = log(mean(exp(store_llike-maxllike))) + maxllike return(llk) } #' @export Chibint_h_OSTSV <- function(yt, xt, B, A, h0, sigma_h, nu, gamma = rep(0,K), h_mean, w_mean, t_max, K, R = 100){ u <- (yt - B %*% xt) y_tilde <- (A %*% u - w_mean * gamma + nu/(nu-2)*gamma) # Mimic univariate Student distribution s2 = as.numeric(y_tilde)^2 max_loop = 10000 prior_var_h0 <- 4 Hh = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = rep(1,t_max*K)) - sparseMatrix( i = (K+1):(t_max*K), j = 1:((t_max-1)*K), x = rep(1,(t_max-1)*K), dims = c(t_max*K, t_max*K)) SH = sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = c(1./ (sigma_h + prior_var_h0), rep(1./sigma_h, t_max-1))) # Prior for h1 \sim N(2 log sigma, sigmah^2 + 4 ) HinvSH_h = Matrix::t(Hh) %*% SH %*% Hh alph = Matrix::solve(Hh, sparseMatrix(i = 1:K, j = rep(1,K), x = h0, dims = c(t_max*K,1))) ht = log(s2+0.001) nu_vec <- rep(nu, t_max) errh_out = 1; while (errh_out> 0.001){ # E-step s2_mod <- s2 * exp(-ht) # Univariate student (y^2 exp(-h)) Eilam = (nu_vec+1)/(nu_vec + s2_mod) s2Eilam = s2*Eilam # M-step htt = ht errh_in = 1 while (errh_in> 0.001){ eht = exp(htt) sieht = s2Eilam/eht fh = -.5 + .5*sieht Gh = .5*sieht Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) newht = Matrix::solve(Kh, fh+Gh*htt+ HinvSH_h %*% alph) errh_in = max(abs(newht-htt)) htt = newht } errh_out = max(abs(ht-htt)) ht = htt } # compute negative Hessian Gh = (nu_vec+1)/(2*nu_vec) * (s2*exp(ht)) / ((exp(ht) + s2/nu_vec )^2) Kh = HinvSH_h + sparseMatrix(i = 1:(t_max*K), j = 1:(t_max*K), x = Gh) CKh = Matrix::t(Matrix::chol(Kh)) c_pri = -t_max*K*0.5*log(2*pi) -.5*(t_max-1)*sum(log(sigma_h)) -.5*sum(log(sigma_h + prior_var_h0)) c_IS = -t_max*K*0.5*log(2*pi) + sum(log(Matrix::diag(CKh))) store_llike = rep(0,R) y_tilde = A %*% ( (yt - B %*% xt) ) + nu/(nu-2)*gamma # is ortho-Skew Student distribution for (i in c(1:R)){ hc = ht + Matrix::solve(Matrix::t(CKh), rnorm(t_max*K)) hnew <- matrix(hc, nrow = K) # allw <- matrix(NA, nrow = K, ncol = t_max) # for (k in c(1:K)){ # for (t in c(1:t_max)){ # allw[k,t] <- ghyp::dghyp(y_tilde[k,t], object = ghyp::ghyp(lambda = -0.5*nu[k], chi = nu[k], psi = 0, mu = 0, # gamma = gamma[k], sigma = exp(hnew[k,t]/2)), # logvalue = T) # } # } # Skew Student density lambda = -0.5*nu; chi = nu; psi = 0; sigma = exp(hnew/2); x = as.vector(y_tilde) sigma <- as.vector(sigma) Q <- (x/sigma)^2 d <- 1 n <- length(x) inv.sigma <- 1/sigma^2 skewness.scaled <- x * (inv.sigma * gamma) skewness.norm <- gamma^2 * inv.sigma lambda.min.0.5 <- lambda - 0.5 interm <- sqrt((chi + Q) * as.vector(skewness.norm)) log.const.top <- -lambda * log(chi) - lambda.min.0.5 * log(as.vector(skewness.norm)) log.const.bottom <- 0.5 * log(2 * pi) + log(sigma) + lgamma(-lambda) - (lambda + 1) * log(2) log.top <- log(besselK(interm, lambda.min.0.5, expon.scaled = TRUE)) - interm + skewness.scaled log.bottom <- -lambda.min.0.5 * log(interm) allw <- log.const.top + log.top - log.const.bottom - log.bottom store_llike[i] = as.numeric( sum(allw) + (c_pri -.5*Matrix::t(hc-alph)%*%HinvSH_h%*%(hc-alph)) - (c_IS -.5*Matrix::t(hc-ht)%*%Kh%*%(hc-ht))) } maxllike = max(store_llike) llk = log(mean(exp(store_llike-maxllike))) + maxllike return(llk) }
90a0a0a2f061612ba399a4d3715aeddacfb5db49
57eeb8bc61e099a71dee6291de36de05aa3014a2
/run_Analysis.R
c251d95ed9ac4cc1dbd8f16a8879633e8dd6e415
[]
no_license
anatesan/DataCleansingFinalProject
26c9c1c43b7bd8027ff6bc388860d92ec51cdb29
d19877d41cb9e5e73997441be0c204aec3ad8f7b
refs/heads/master
2021-01-17T17:15:21.397316
2016-06-06T06:51:54
2016-06-06T06:51:54
60,500,457
0
0
null
null
null
null
UTF-8
R
false
false
644
r
run_Analysis.R
source("tidyHAR.R") source("createHARAvgDataset.R") # This is just a driver script that relies on the workhorses - tidyHAR() and createHARAvgDataset() functions. # Does not do much else than calling those 2 functions. run_Analysis<-function() { # create the tidy data set for HAR data print("Creating tidy data set for HAR observations - mean and std deviations...") harDF <- tidyHAR() # create the tidy data set for the avg of HAR data print("Creating tidy data set for HAR observations - mean grouped by subject and activity ...") createHARAvgDataset(harDF = harDF) }
243c38a4f25dd86fe05ab3749492001ce6fe5753
6d94af002ecbd876df3e22c33ec69d11d985ed37
/man/ExpPower.Rd
752ed9e0aa1a7266ead53dddc2d9898fd81d44e5
[]
no_license
statwonk/reliaR
c82365b33dcab7a66d87f70bc2887608b305c265
f35ce9fbe14e7a0cf89ad4880b2e666879fdc2a0
refs/heads/master
2021-05-14T02:28:29.110165
2010-10-28T00:00:00
2010-10-28T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
2,973
rd
ExpPower.Rd
\name{ExpPower} \alias{ExpPower} \alias{dexp.power} \alias{pexp.power} \alias{qexp.power} \alias{rexp.power} \title{The Exponential Power distribution} \description{ Density, distribution function, quantile function and random generation for the Exponential Power distribution with shape parameter \code{alpha} and scale parameter \code{lambda}.} \usage{ dexp.power(x, alpha, lambda, log = FALSE) pexp.power(q, alpha, lambda, lower.tail = TRUE, log.p = FALSE) qexp.power(p, alpha, lambda, lower.tail = TRUE, log.p = FALSE) rexp.power(n, alpha, lambda) } \arguments{ \item{x,q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{alpha}{shape parameter.} \item{lambda}{scale parameter.} \item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).} \item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]} otherwise, \eqn{P[X > x]}.} } \details{ The probability density function of exponential power distribution is \deqn{f(x; \alpha, \lambda) = \alpha \lambda^\alpha x^{\alpha - 1} e^{\left({\lambda x}\right)^\alpha} \exp\left\{{1 - e^{\left({\lambda x}\right)^\alpha}}\right\};\;(\alpha, \lambda) > 0, x > 0.}{ f(x; \alpha, \lambda) = \alpha \lambda^\alpha x^{\alpha - 1} exp{(\lambda x)^\alpha} exp{1 - exp{(\lambda x)^\alpha}}; (\alpha, \lambda) > 0, x > 0.} where \eqn{\alpha} and \eqn{\lambda} are the \code{shape} and \code{scale} parameters, respectively. } \value{ \code{dexp.power} gives the density, \code{pexp.power} gives the distribution function, \code{qexp.power} gives the quantile function, and \code{rexp.power} generates random deviates. } \seealso{ \code{\link{.Random.seed}} about random number; \code{\link{sexp.power}} for Exponential Power distribution survival / hazard etc. functions; } \references{ Chen, Z.(1999). \emph{Statistical inference about the shape parameter of the exponential power distribution}, Journal :Statistical Papers, Vol. 40(4), 459-468. Pham, H. and Lai, C.D.(2007). \emph{On Recent Generalizations of theWeibull Distribution}, IEEE Trans. on Reliability, Vol. 56(3), 454-458. Smith, R.M. and Bain, L.J.(1975). \emph{An exponential power life-test distribution}, Communications in Statistics - Simulation and Computation, Vol.4(5), 469 - 481 } \examples{ ## Load data sets data(sys2) ## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(sys2) ## alpha.est = 0.905868898, lambda.est = 0.001531423 dexp.power(sys2, 0.905868898, 0.001531423, log = FALSE) pexp.power(sys2, 0.905868898, 0.001531423, lower.tail = TRUE, log.p = FALSE) qexp.power(0.25, 0.905868898, 0.001531423, lower.tail=TRUE, log.p = FALSE) rexp.power(30, 0.905868898, 0.001531423) } \keyword{distribution}
b0cc12b729a3d44b0bd5ac0877f584ac3caad3aa
35e85a8945e271680d569d91336cd5584c5f2297
/Load_Bgen/LoadBgen.R
ebe8c61840308217b8c5dcf510b3e58d361b3c3a
[]
no_license
zmx21/polyresponse
377f2f15477b85c4754b6b0ffef231ef03de272a
800bbf6c2d7037933fcd12cf44d29f5e408266f0
refs/heads/master
2021-07-13T11:25:30.640388
2020-08-11T09:52:31
2020-08-11T09:52:31
178,338,183
0
0
null
null
null
null
UTF-8
R
false
false
3,979
r
LoadBgen.R
library(dplyr) library(rbgen) library(abind) library(DBI) library(RSQLite) LoadSNPAnnotations <- function(sql_path,sql_db_name,rsid){ curwd <- getwd() setwd(sql_path) con <- RSQLite::dbConnect(SQLite(), dbname = sql_db_name) #Get annotation info of rsid annotation <- do.call(rbind,lapply(rsid,function(x) RSQLite::dbGetQuery(con,paste0('SELECT * FROM all_snp_stats WHERE rsid=','"',x,'"')))) RSQLite::dbDisconnect(con) #Remove duplicated rsid (different mutation at same pos) annotation <- dplyr::distinct(annotation,position,chromosome,.keep_all=T) setwd(curwd) return(annotation) } #Import should be directory where bgen files are, and the prefix of the files (# represents chr number) LoadBgen <- function(path,bgen_file_prefix,rsIDs,chr=NULL){ #Load bgen file curwd <- getwd() setwd(path) #If chr information of each snp is not provided, look up annotation to determine chromosome. if(is.null(chr)){ chr <- LoadSNPAnnotations(sql_path = "~/bsu_scratch/SQL/",sql_db_name = "all_snp_stats.sqlite",rsid = rsIDs) %>% dplyr::select(chromosome) %>% t() %>% as.numeric() } #Replace all wildcards with determined chromosomes bgen_file_prefix <- sapply(chr,function(x) gsub(pattern = '#',replacement = x,x = bgen_file_prefix)) #Determine unique chr and load each chunk uniqueFiles <- unique(bgen_file_prefix) resultList <- vector(mode = 'list',length = length(uniqueFiles)) for(i in 1:length(uniqueFiles)){ currentFile <- uniqueFiles[i] currentrsIDs <- rsIDs[which(bgen_file_prefix == currentFile)] # a <- Sys.time() currentAlleleProbMatrix <- rbgen::bgen.load(filename = paste0(currentFile,'.bgen'), rsids = currentrsIDs)$data # print(Sys.time() - a) resultList[[i]] <- currentAlleleProbMatrix[,,'g=1'] + 2*currentAlleleProbMatrix[,,'g=2'] if(is.null(rownames(resultList[[i]]))){ resultList[[i]] <- as.matrix(t(resultList[[i]])) rownames(resultList[[i]]) <- currentrsIDs } } setwd(curwd) dosageMatrix <- do.call(rbind,resultList) return(dosageMatrix) } LoadSamples <- function(path,sample_file_prefix){ #Load sample file samplesTbl <- read.table(file = paste0(path,sample_file_prefix,'.sample'),header = F, stringsAsFactors = F) colnames(samplesTbl) <- samplesTbl[1,]#Add header as first row samplesTbl <- samplesTbl[-c(1,2),]#Remove first two rows return(samplesTbl) } # FindAllRSIds <- function(path,bgen_file_prefix){ # library(dplyr) # library(data.table) # curwd <- getwd() # setwd(path) # # allFiles <- dir() # rsIDFile <- paste0(bgen_file_prefix,'_','rsid.txt') # #Run Bgenix tool if not previously run # if(!rsIDFile %in% allFiles){ # bgenSummary <- system(paste0('bgenix -g ',bgen_file_prefix,'.bgen',' -list',' >',rsIDFile),intern = T) # } # bgenSummary <- data.table::fread(rsIDFile,sep = '\t',header = T,showProgress = F,skip = 1,fill = T) # bgenSummary <- bgenSummary[1:(nrow(bgenSummary)-1),] # rsIDs <- dplyr::select(bgenSummary,rsid,chromosome) # setwd(curwd) # # return(rsIDs) # } FindAllRSIds <- function(chr,MAF=-Inf,Info=-Inf){ #Connect to rsid annotation database. anno_sql_name<- "all_snp_stats.sqlite" path <- '~/bsu_scratch/SQL/' curwd <- getwd() setwd(path) anno_con <- RSQLite::dbConnect(SQLite(), dbname = anno_sql_name) anno_db <- dplyr::tbl(anno_con,'all_snp_stats') # if(as.numeric(chr) < 10){ # chr <- paste0('0',chr) # } #Get all rsids meeting criteria rsIDs <- anno_db %>% dplyr::filter(chromosome==chr & as.numeric(minor_allele_frequency) > MAF & as.numeric(info) > Info) %>% dplyr::select(rsid) %>% collect() setwd(curwd) RSQLite::dbDisconnect(anno_con) return(rsIDs) } # test <- LoadSNPAnnotations(sql_path = "~/bsu_scratch/SQL/",sql_db_name = "all_rsid.sqlite",rsid = 'rs193250821') # test <- LoadBgen('~/bsu_scratch/UKB_Data/','ukb_imp_chr#_HRConly',c('rs193250821','rs537869321'))
8868fc28b6fb9eb831d3e36072b2d7f494f49e7e
e6d1b0df8392757469026cd5c3b74a9b65c7a966
/ensemble2.R
5139c1a2658387c7644df78d466f31b19d86da67
[]
no_license
miguel-conde/Kaggle_SCS
1dd8e6dd11bfc7ac240bf349b51a88214a341bb8
7af5f6504cb4d13decf6c55ff410b0c82c39af1d
refs/heads/master
2021-01-10T06:30:57.408488
2016-04-14T07:53:30
2016-04-14T07:53:30
55,915,625
0
0
null
null
null
null
UTF-8
R
false
false
7,282
r
ensemble2.R
# Initiate ---------------------------------------------------------------- source("common.R") # Start Parallel Processing cl <- makeCluster(max(detectCores()-1,1)) registerDoParallel(cl) # Data -------------------------------------------------------------------- # Load tidy_train and tidy_test load(file = file.path(".", "Data", "tidySets.Rda")) set.seed(mySeed) # Divide tidy_train into 3 sets: trainIdx_1 <- createDataPartition(tidy_train$TARGET, p = 1/3, list = FALSE) trainingSet_1 <- tidy_train[trainIdx_1, ] trainingSet_23 <- tidy_train[-trainIdx_1, ] trainIdx_2 <- createDataPartition(trainingSet_23$TARGET, p = 1/2, list = FALSE) trainingSet_2 <- trainingSet_23[trainIdx_2, ] trainingSet_3 <- trainingSet_23[-trainIdx_2, ] trainingClass_1 <- factor(trainingSet_1$TARGET, labels = c("SATISFIED", "UNSATISFIED")) trainingClass_2 <- factor(trainingSet_2$TARGET, labels = c("SATISFIED", "UNSATISFIED")) trainingClass_3 <- factor(trainingSet_3$TARGET, labels = c("SATISFIED", "UNSATISFIED")) predLabels <- setdiff(names(tidy_train), c("ID", "TARGET")) trainingSet_1 <- trainingSet_1[,predLabels] trainingSet_2 <- trainingSet_2[,predLabels] trainingSet_3 <- trainingSet_3[,predLabels] save(trainingSet_1, trainingSet_2, trainingSet_3, trainingClass_1, trainingClass_2, trainingClass_3, file = file.path(".", "Data", "trainingSets_123.Rda")) # Don't need this any more rm (tidy_train) # Build Single Models List ------------------------------------------------ # We'll build a bunch of single models training them on trainingSet_1. # Later we'll validate them twofold: on CV basis and using trainingSet_2 as # validation set classifModels <- c("gbm", "glm", "LogitBoost") # classifModels <- c("gbm", "ranger", "svmRadial", "xgbLinear", "AdaBag", # "AdaBoost.M1", "glm", "glmboost", "xgbTree", "LogitBoost") trCtrl <- trainControl(method = "cv", number = numCVs, # repeats = 3, verboseIter = TRUE, savePredictions = "final", classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = TRUE, index = createFolds(trainingClass_1, numCVs)) tuneModels <- list(nn = caretModelSpec(method = "nnet", trace = FALSE)) set.seed(mySeed) modelsList <- caretList(# Arguments to pass to train() as '...' ### x = trainingSet_1, y = trainingClass_1, metric = "ROC", ########################################### # caretList() specific arguments ########## trControl = trCtrl, methodList = classifModels, tuneList = tuneModels ########################################### ) save(modelsList, file = file.path(".", "Models", "modelsList2.Rda")) # Compare Models ---------------------------------------------------------- ## ON CV basis resamps <- resamples(modelsList) summary(resamps) modelsROC_CV <- summary(resamps)$statistics$ROC[,"Mean"] xyplot(resamps, what = "BlandAltman") dotplot(resamps) densityplot(resamps) bwplot(resamps) splom(resamps) parallelplot(resamps) diffs <- diff(resamps) summary(diffs) ## ON validation set trainingSet_2 modelsPerfVal <- modelsListPerformance(modelsList, trainingSet_2, trainingClass_2, lev = c("SATISFIED", "UNSATISFIED")) modelsROC_Val <- modelsPerfVal[,"ROC"] ## CV and Validation Set Methods - summary summaryModelsPerf <- modelsListPerf_CV_VAL(modelsList, trainingSet_2, trainingClass_2, lev = c("SATISFIED", "UNSATISFIED")) summaryModelsPerf # Choosing models --------------------------------------------------------- ## Best performers (based on CV) numModels <- min(length(modelsList), maxBP) bestModelsOrder <- sort(resamps, decreasing = TRUE, metric = "ROC")[1:numModels] bestModelsOrder best_modelsLists <- modelsList[names(modelsList[bestModelsOrder])] class(best_modelsLists) <- "caretList" ## Low correlated models (based on ?? Could be both on training and on # validation sets) lowCorrModels <- getLowCorrModels(modelsList, trainingSet_2, "UNSATISFIED", cutoff = 0.75, printTable = TRUE) lowCorrModels LC_modelsLists <- modelsList[names(modelsList[lowCorrModels])] class(LC_modelsLists) <- "caretList" ## From now on i don't need modelsList # Ensemble 1: a caretList ensemble ---------------------------------------- # Ensemble 2: a caretList stack ------------------------------------------- # Ensemble 3: a tailor made ensemble averaging probs ---------------------- predProbs_2 <- avgListProbs(LC_modelsLists, trainingSet_2, "UNSATISFIED") # Ensemble 4: tailor made ensembles fitting models from submodels -------- # We'll use as predictors the probs predicted by single low correlated # submodels on trainingSet_2. The ensemble models obtained will be # Cross Validated on trainingSet_2 and validated on trainingSet_3. set.seed(mySeed) predictors_2 <- listProbs(LC_modelsLists, trainingSet_2, "UNSATISFIED") modelsList2 <- caretList(# Arguments to pass to train() as '...' ### x = predictors_2, y = trainingClass_2, preProc = c("BoxCox"), metric = "ROC", ########################################### # caretList() specific arguments ########## trControl = trCtrl, methodList = classifModels, tuneList = tuneModels ########################################### ) save(modelsList2, file = file.path(".", "Models", "modelsList22.Rda")) # Compare Models ## ON CV basis resamps <- resamples(modelsList2) summary(resamps) modelsROC_CV <- summary(resamps)$statistics$ROC[,"Mean"] xyplot(resamps, what = "BlandAltman") dotplot(resamps) densityplot(resamps) bwplot(resamps) splom(resamps) parallelplot(resamps) diffs <- diff(resamps) summary(diffs) listEnsembleCV_AUC(LC_modelsLists, modelsList2) ## ON validation set trainingSet_3 predictors_3 <- listProbs(LC_modelsLists, trainingSet_3, "UNSATISFIED") modelsPerfVal <- modelsListPerformance(modelsList2, predictors_3, trainingClass_3, lev = c("SATISFIED", "UNSATISFIED")) modelsROC_Val <- modelsPerfVal[,"ROC"] listEnsembleAUC(LC_modelsLists, modelsList2, trainingSet_3, "UNSATISFIED", trainingClass_3) ## CV and Validation Set Methods - summary summaryModelsPerf <- modelsListPerf_CV_VAL(modelsList2, predictors_3, trainingClass_3, lev = c("SATISFIED", "UNSATISFIED")) summaryModelsPerf # Making a submission makeAnEnsembleSubmission(LC_modelsLists, modelsList2, "nn", tidy_test, "ens1NN.csv") # 0.677362 # Clean and Exit ---------------------------------------------------------- # Stop Parallel Processing stopCluster(cl)
8cccc122ade4a52486eaeb4397c34945819458d0
085d56e1c39afa030dd000512a7385c70bb3570e
/3_Script/1_Code/03_Processing_Data/batch_ItemBaseData.R
8e1eb14167fc077625b408013bbbe9306b9c0209
[]
no_license
datvuong/OMS_Data_Update
86fb0e48a12482b28d80c401b9e68ad36a7d7ae2
178c4695322d8a1dbdb5c9f2f5d9ec83fada5d62
refs/heads/master
2021-01-16T23:15:17.800460
2016-07-06T05:05:03
2016-07-06T05:05:03
62,691,886
0
0
null
null
null
null
UTF-8
R
false
false
6,438
r
batch_ItemBaseData.R
args <- commandArgs(trailingOnly = TRUE) ventureShort <- as.character(args[1]) source("3_Script/1_Code/00_init.R") source("3_Script/1_Code/01_Loading/00_LoadingData_Init.R") source("3_Script/1_Code/03_Processing_Data/UpdateSOITimeStamp.R") source("3_Script/1_Code/03_Processing_Data/GetSixMonthData.R") source("3_Script/1_Code/03_Processing_Data/UpdateSOIBaseData.R") source(file.path("3_Script/1_Code/03_Processing_Data",ventureShort , paste0(ventureShort, "_GetSOAddressRegion.R"))) source("3_Script/1_Code/03_Processing_Data/BuildPackageData.R") load(file.path(ventureDataFolder, "addressRegionData.RData")) load(file.path(ventureDataFolder, "oms_shipment_provider.RData")) reportName <- paste0(ventureShort, "_Build_Item_Base_Data") flog.appender(appender.tee(file.path("3_Script/2_Log", paste0(reportName,"_",dateReport,".csv"))), name = reportName) layout <- layout.format(paste0(timeReport, '|', reportName, '|[~l]|[~t]|[Item_Base_Data]|~m')) flog.layout(layout, name=reportName) salesOrderAddress <- GetXMonthData(ventureShort = ventureShort,dbName = "oms_live", tableName = "ims_sales_order_address", xMonth = "all", columnsList = c("id_sales_order_address", "ward", "city", "postcode", "fk_customer_address_region"), keyField = "id_sales_order_address") SOAddressRegion <- GetSOAddressRegion(salesOrderAddress, addressRegionData) rm(salesOrderAddress) gc() flog.info("Consolidate OMS Data", name = reportName) ##### SOI Hisotry Timestamp ##### soiHistoryData <- GetXMonthData(ventureShort = ventureShort,dbName = "oms_live", tableName = "ims_sales_order_item_status_history", xMonth = 6, keyField = "id_sales_order_item_status_history") soiTimestampData <- UpdateSOITimeStamp(soiHistoryData) soiTimestampData %<>% filter(!is.na(rts)) rm(soiHistoryData) gc() ##### SOI Based Data #### salesOrderItem <- GetXMonthData(ventureShort = ventureShort,dbName = "oms_live", tableName = "ims_sales_order_item", xMonth = 12, columnsList = c("id_sales_order_item", "bob_id_sales_order_item", "fk_sales_order", "fk_sales_order_item_status", "fk_marketplace_merchant", "unit_price", "paid_price", "sku", "amount_paid", "shipping_fee", "shipping_discount_amount", "shipping_surcharge"), keyField = "id_sales_order_item") salesOrderItemBased <- inner_join(salesOrderItem, soiTimestampData, by = c("id_sales_order_item" = "fk_sales_order_item"), copy = TRUE) rm(salesOrderItem, soiTimestampData) gc() ##### SKU ##### load(file.path(ventureDataFolder, "skuData.RData")) salesOrderItemBased <- left_join(salesOrderItemBased, skuData, by = "sku", copy = TRUE) rm(skuData) gc() salesOrderItemBased %<>% mutate(volumetricDimension = package_length * package_width * package_height) ##### Package Item ##### packageItemData <- GetXMonthData(ventureShort = ventureShort, dbName = "oms_live", tableName = "oms_package_item", xMonth = 12, columnsList = c("fk_package", "fk_sales_order_item"), keyField = "id_package_item") salesOrderItemBased <- left_join(salesOrderItemBased, packageItemData, by = c("id_sales_order_item" = "fk_sales_order_item"), copy = TRUE) rm(packageItemData) gc() ##### Package Dispatching ##### packageDispatchingData <- GetXMonthData(ventureShort = ventureShort, dbName = "oms_live",tableName = "oms_package_dispatching", xMonth = 12, columnsList = c("id_package_dispatching", "fk_package", "fk_shipment_provider", "tracking_number"), keyField = "id_package_dispatching") salesOrderItemBased <- left_join(salesOrderItemBased, packageDispatchingData, by = ("fk_package" = "fk_package"), copy = TRUE) rm(packageDispatchingData) gc() ##### Seller Data #### load(file.path(ventureDataFolder, "SCSellerData.RData")) salesOrderItemBased <- left_join(salesOrderItemBased, SCSellerData %>% filter(!is.na(src_id)), by = c("fk_marketplace_merchant" = "src_id"), copy = TRUE) rm(SCSellerData) gc() ##### Sales Order Data #### salesOrder <- GetXMonthData(ventureShort = ventureShort, dbName = "oms_live",tableName = "ims_sales_order", xMonth = 12, columnsList = c("id_sales_order", "order_nr", "payment_method", "fk_sales_order_address_shipping"), keyField = "id_sales_order") salesOrderItemBased <- left_join(salesOrderItemBased, salesOrder, by = c("fk_sales_order" = "id_sales_order"), copy = TRUE) rm(salesOrder) gc() ##### Package Number Data ##### packageNumberData <- GetXMonthData(ventureShort = ventureShort, dbName = "oms_live",tableName = "oms_package", xMonth = 12, columnsList = c("id_package", "package_number", "isdeleted"), keyField = "id_package") salesOrderItemBased <- left_join(salesOrderItemBased, packageNumberData, by = c("fk_package" = "id_package"), copy = TRUE) rm(packageNumberData) gc() ##### Shipment Provider ##### salesOrderItemBased <- left_join(salesOrderItemBased, oms_shipment_provider %>% select(id_shipment_provider, shipment_provider_name), by = c("fk_shipment_provider" = "id_shipment_provider"), copy = TRUE) salesOrderItemBased %<>% select(-c(package_length, package_width, package_height, fk_package, id_package_dispatching, fk_shipment_provider, fk_sales_order_address_shipping)) save(salesOrderItemBased, file = file.path(ventureDataFolder, "salesOrderItemBased.RData"), compress = TRUE) flog.info("Done!!!", name = reportName) rm(list = ls())
33a97838b7aa086fd7b357f496c75b3c119787ba
3b6fd9148a6c86cd05fc3b9db05cee8b7cbb96fa
/R/outside.R
b5a157e6d8066b90e140029f90d45f2aacdf7f1e
[ "MIT" ]
permissive
markusdumke/fluid
604faa7023539c843d44ff6d970663e507c18068
e7c4c566495e9f76d6b4bacec475ab8a1bb04686
refs/heads/master
2020-04-18T09:41:31.133937
2019-09-23T18:48:40
2019-09-23T18:48:40
167,443,315
0
0
null
null
null
null
UTF-8
R
false
false
543
r
outside.R
#' Inside / Outside #' #' @param .x Vector to test. #' @param .y Vector or list of length 2. #' The first element is interpreted as the lower bound, #' the second element as the upper limit. #' #' @return A logical vector of the same length as `.x`. #' #' @export #' @import data.table #' @rdname outside #' #' @examples #' 1 %outside% c(2, 5) #' 1 %inside% c(2, 5) `%outside%` <- function(.x, .y) { !(.x %between% .y) } #' @rdname outside #' @export #' @import data.table `%inside%` <- function(.x, .y) { .x %between% .y }
76ad120514e5e550aae4d296f3508ca81f068d10
2b4ad1be75d23c3081ab01797822ecf49cab7488
/plot2.R
73e375d8cc45ebba9611b93c79450edd3671727f
[]
no_license
galc/ExData_Plotting1
44cb8c24f06daec488e3a2a0ad57fc2a1a19cbbe
6a3ce21f9f022b567a0541a39f8e7ebe4897c03a
refs/heads/master
2021-01-22T00:46:13.954241
2014-05-10T17:28:49
2014-05-10T17:28:49
null
0
0
null
null
null
null
UTF-8
R
false
false
942
r
plot2.R
## plot2.R - Timeline Plot of Global_active_power ## Read Data data <- read.table('Data/household_power_consumption.txt', sep=';', header=T, colClasses = c('character', 'character', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric'), na.strings='?') ## Convert dates and subset on two days in February 2007 data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S") data <- subset(data, as.Date(DateTime) >= as.Date("2007-02-01") & as.Date(DateTime) <= as.Date("2007-02-02")) ## Create a file named plot2.png with dimension of 480x480 png("plot2.png", height=480, width=480) ## Build the wrapping timeline plot(data$DateTime, data$Global_active_power, pch=NA, xlab="", ylab="Global Active Power (kilowatts)") ## Draw the timeline lines(data$DateTime, data$Global_active_power) # Close PNG file dev.off()
91c1756ad208b7149389295e2123e1ea20e97665
146d7de2db9f1bffeaa24e7f01200c38ea2e9d75
/tests/testthat/test_wtr.layer.R
199d32630b410e2da4175ffa25dcbf2adffeafc9
[]
no_license
lawinslow/rLakeAnalyzer
13e9b11da4b44042fea0dd88210d7e47a9557c5c
69749f58a665358dac8263a2d08d92f9100f1ff3
refs/heads/master
2021-01-21T03:13:08.229100
2018-09-13T19:49:51
2018-09-13T19:49:51
25,272,757
1
0
null
null
null
null
UTF-8
R
false
false
1,295
r
test_wtr.layer.R
context("Testing wtr.layer") data("latesummer") test_that("Ensure that unconstrained and specified layer approach result in the same answer", { expect_equal( wtr.layer(depth = latesummer$depth, measure = latesummer$temper)[, 1:4], wtr.layer( depth = latesummer$depth, measure = latesummer$temper, nseg = wtr.layer(depth = latesummer$depth, measure = latesummer$temper)$nseg )[, 1:4] ) }) test_that("NA's are returned when profile has less than 30 readings", { # Data with less than 10 readings z <- 1:25 sigmavar <- rnorm(z) expect_warning(wtr.layer(depth = z, measure = sigmavar)) expect_warning(wtr.layer(data = latesummer[1:20, ], depth = depth, measure = temper)) }) test_that("Omitting the data argument results in a valid data.frame with constrained and uncontrained scenarios", { expect_silent(wtr.layer(depth = latesummer$depth, measure = latesummer$temper)) expect_silent(wtr.layer(depth = latesummer$depth, measure = latesummer$temper, nseg = 5)) }) test_that("Including the data argument results in a valid data.frame with constrained and uncontrained scenarios", { expect_silent(wtr.layer(data = latesummer, depth = depth, measure = temper)) expect_silent(wtr.layer(data = latesummer, depth = depth, measure = temper, nseg = 5)) })
e3d6a25cf6b10053e7c8d91917793b3dd5ff79ff
c8ce461387ababba10c5116dd047325d873a17a4
/diversification_analyses/Castor/louca_pennell_stats_truncated.R
43303677940ecc7bc86bb0cfc7d51851006f7b10
[]
no_license
elsemikk/tyranni
8ff58e6c39e2f402766d13fda413b3504023ec78
859f162e55124c72603f25b21172ac0b63188203
refs/heads/master
2023-05-01T05:57:18.018181
2021-01-29T16:15:03
2021-01-29T16:15:03
null
0
0
null
null
null
null
UTF-8
R
false
false
1,859
r
louca_pennell_stats_truncated.R
setwd("tyranni/diversification_analyses/Castor") getwd() library(ape) library(phytools) library(castor) # Get mean crown age of families sample.data <- read.csv("../../Species_name_map_uids_Jetz.csv", header=TRUE) family.map <- sample.data$howardmoore.family names(family.map) <- sample.data$tipnamecodes tree <- read.tree('tyranni/species_trees/final_timetrees/T400F_AOS_HowardMoore.tre') # Fit pulled diversification rate to whole tree (do 100x for final run) #pdr <- fit_hbd_pdr_on_grid(tree, Ntrials=1) #pdr # PDR #pdr$fitted_PDR # Present-day speciation rate #pdr$fitted_rholambda0 # Fit PDR using a grid of times tree.height <- max(branching.times(tree)) #ages <- seq(0, tree.height, length.out=25) ages <- seq(0, 35, length.out=25) #pdr.grid <- fit_hbd_pdr_on_grid(tree, oldest_age=tree.height, age_grid=ages, Ntrials=1) pdr.grid <- fit_hbd_pdr_on_grid(tree, oldest_age=35, age_grid=ages, Ntrials=10, Nbootstraps=100, min_PDR=-100, max_PDR=+100, Nthreads=2, max_model_runtime=1) save(pdr.grid, file="pdr_fit_truncated.Rdata") x.target <- seq(0, 35, length.out=1000) spline_vals <- evaluate_spline(ages, pdr.grid$fitted_PDR, splines_degree=3, Xtarget=x.target) spline_vals.lower <- evaluate_spline(ages, pdr.grid$CI50lower$PDR, splines_degree=3, Xtarget=x.target) spline_vals.upper <- evaluate_spline(ages, pdr.grid$CI50upper$PDR, splines_degree=3, Xtarget=x.target) pdf("PDR_through_time_truncated_SE.pdf") plot(1, type="n", xlim=c(0,30), ylim=c(-20,20), ylab="Pulled Diversification Rate", xlab="Time (My)") polygon(c(x.target, rev(x.target)), c(spline_vals.upper, rev(spline_vals.lower)), col="lightgray", border=NA) lines(x=x.target, y=spline_vals, xlim=c(0,30)) # "A relatively constant pdr over time would be indicative of constant - or only slowly changing - speciation and extinction rates" dev.off() #load("pdr_fit_truncated.Rdata")
8b94be5fbb2e51db451edc4c55942cb3f555b20e
aebca85114388224fc24481fdfce04be048110db
/man/getModels_Caret.Rd
e1ac41f7bdcef7698bdc42601e18800905201871
[]
no_license
mssm-msf-2019/BiostatsALL
4f79f2fbb823db8a0cbe60172b3dcd54eac58539
0623dd13db576b2501783b31d08ae43340f2080b
refs/heads/master
2020-05-25T15:16:01.949307
2019-05-21T18:11:12
2019-05-21T18:11:12
187,864,190
2
0
null
null
null
null
UTF-8
R
false
false
1,261
rd
getModels_Caret.Rd
% Generated by roxygen2 (4.1.0.9001): do not edit by hand % Please edit documentation in R/getModels_Caret.R, R/getModels_Caret_JCR.R \name{getModels_Caret} \alias{getModels_Caret} \title{A function that tells you what are most N disimilar models available in caret for a certain problem (tag)} \usage{ getModels_Caret(method = "Regression", N = 5, include = "") getModels_Caret(method = "Regression", N = 5, include = "") } \arguments{ \item{method}{is the kind of model Classification / Regression} \item{N}{the number of dissimilar models to be extracted} \item{include}{a character to separate the names of the columns} \item{method:}{type of methods. Options are Regression, Classification adn Dual use} \item{N:}{number of methods to select} \item{include:}{name of a method to be included in the set} } \description{ It selects the N most dissimilar models among those in caret suite by maximizing the Jaccard dissimilarity between sets of models. this function extracts a list of models in caret that are dissimilar } \examples{ getModels_Caret<-function(method="Regression",N=5,include='cubist') getModels_Caret('Regression',5,'') } \keyword{,} \keyword{caret} \keyword{classification} \keyword{regression}
7be4f4bc6bb8151a0caad3d14d35e98bdc90e7bb
448e3fbc77e3290c1c4196c2258b3d444c8c0d45
/R/facets-procreads.R
e9b6393352d4cdd07ca2904bf455cd180d7586a0
[]
no_license
mskcc/facets
1fe15ddabe7b157c4ffdeddb048797c8a1f4f83b
f3c93ee65b09fc57aaed22a2eb9faa05586a9dc0
refs/heads/master
2023-06-09T01:38:44.938368
2021-10-12T12:04:39
2021-10-12T12:04:39
35,636,429
135
71
null
2023-06-04T17:04:27
2015-05-14T20:54:49
R
UTF-8
R
false
false
4,616
r
facets-procreads.R
# heterozygous and keep flags of the SNPs procSnps <- function(rcmat, ndepth=35, het.thresh=0.25, snp.nbhd=250, nX=23, unmatched=FALSE, ndepthmax=1000) { # keep only chromsomes 1-22 & X for humans and 1-19, X for mice # for other genomes (gbuild = udef) nX is number of autosomes plus 1 chromlevels <- c(1:(nX-1),"X") chr.keep <- rcmat$Chromosome %in% chromlevels # keep only snps with normal read depth between ndepth and 1000 depthN.keep <- (rcmat$NOR.DP >= ndepth) & (rcmat$NOR.DP < ndepthmax) # reduce the data frame to these snps rcmat <- rcmat[chr.keep & depthN.keep,] # output data frame; will use rcmat to return output instead of a new one row.names(rcmat) <- NULL # reset row names so that it's 1:nsnps rcmat$NOR.RD = 1 - rcmat$NOR.RD/rcmat$NOR.DP rcmat$TUM.RD = 1 - rcmat$TUM.RD/rcmat$TUM.DP names(rcmat) = c("chrom", "maploc", "rCountN", "vafN", "rCountT", "vafT") # make chromosome ordered and numeric rcmat$chrom <- as.numeric(ordered(rcmat$chrom, levels = chromlevels)) # call a snp heterozygous if min(vafN, 1-mafN) > het.thresh if (unmatched) { if (het.thresh == 0.25) het.thresh <- 0.1 rcmat$het <- 1 * (pmin(rcmat$vafT, 1 - rcmat$vafT) > het.thresh & rcmat$rCountT >= 50) } else { rcmat$het <- 1 * (pmin(rcmat$vafN, 1 - rcmat$vafN) > het.thresh) } # scan maploc for snps that are close to one another (within snp.nbhd bases) # heep all the hets (should change if too close) and only one from a nbhd rcmat$keep <- scanSnp(rcmat$maploc, rcmat$het, snp.nbhd) rcmat[,c(1,2,5,3,6,4,7,8)] } scanSnp <- function(maploc, het, nbhd) { n <- length(maploc) zzz <- .Fortran("scansnp", as.integer(n), as.double(maploc), as.double(het), keep=double(n), as.double(nbhd)) zzz$keep } # obtain logR and logOR from read counts and GC-correct logR counts2logROR <- function(out, gbuild, unmatched=FALSE, ugcpct=NULL, f=0.2) { out <- out[out$keep==1,] # gc percentage out$gcpct <- rep(NA_real_, nrow(out)) # get GC percentages from pctGCdata package # loop thru chromosomes nchr <- max(out$chrom) # IMPACT doesn't have X so only 22 for (i in 1:nchr) { ii <- which(out$chrom==i) # allow for chromosomes with no SNPs i.e. not targeted if (length(ii) > 0) { if (gbuild == "udef") { out$gcpct[ii] <- getGCpct(i, out$maploc[ii], gbuild, ugcpct) } else { out$gcpct[ii] <- getGCpct(i, out$maploc[ii], gbuild) } } } ##### log-ratio with gc correction and maf log-odds ratio steps chrom <- out$chrom maploc <- out$maploc rCountN <- out$rCountN rCountT <- out$rCountT vafT <- out$vafT vafN <- out$vafN het <- out$het gcpct <- out$gcpct # compute gc bias ncount <- tapply(rCountN, gcpct, sum) tcount <- tapply(rCountT, gcpct, sum) pctgc <- as.numeric(names(ncount)) tscl <- sum(ncount)/sum(tcount) gcb <- lowess(pctgc, log2(tcount*tscl)-log2(ncount), f=f) jj <- match(gcpct, gcb$x) gcbias <- gcb$y[jj] # compute cn log-ratio (gc corrected) and baf log odds-ratio cnlr <- log2(1+rCountT*tscl) - log2(1+rCountN) - gcbias # minor allele log-odds ratio and weights lorvar <- valor <- rep(NA_real_, length(maploc)) if (unmatched) { # read count matrix for odds ratio etc rcmat <- round(cbind(vafT[het==1]*rCountT[het==1], (1-vafT[het==1])*rCountT[het==1])) # folded log of Tukey (with 1/6 correction) valor[het==1] <- log(rcmat[,1]+1/6) - log(rcmat[,2]+1/6) # variance - approximation using delta method lorvar[het==1] <- 1/(rcmat[,1]+1/6) + 1/(rcmat[,2]+1/6) } else { # read count matrix for odds ratio etc rcmat <- round(cbind(vafT[het==1]*rCountT[het==1], (1-vafT[het==1])*rCountT[het==1], vafN[het==1]*rCountN[het==1], (1-vafN[het==1])*rCountN[het==1])) # log-odds-ratio (Haldane correction) valor[het==1] <- log(rcmat[,1]+0.5) - log(rcmat[,2]+0.5) - log(rcmat[,3]+0.5) + log(rcmat[,4]+0.5) # variance of log-odds-ratio (Haldane; Gart & Zweifel Biometrika 1967) lorvar[het==1] <- (1/(rcmat[,1]+0.5) + 1/(rcmat[,2]+0.5) + 1/(rcmat[,3]+0.5) + 1/(rcmat[,4]+0.5)) } # put them together out$lorvar <- out$valor <- out$cnlr <- out$gcbias <- rep(NA_real_, nrow(out)) out$gcbias <- gcbias out$cnlr <- cnlr out$valor <- valor out$lorvar <- lorvar out }
fcedbc2d4e6638a86d576846792d88f87aa2acf9
04335cc0a2db41c50d2083600c3ceb19efe16db9
/videoseries/05_Maximum_likelihood.R
cd97c595fb78a7f1312ada29566385932148253f
[]
no_license
tchintchie/VU-Supervised-Learning-Parametric-and-Semi-Parametric
33ae29a55699a19cef7f59d6e04a3fff2ea110e0
d388acbf6460ff1e222fb9607eba7c7e03d9a170
refs/heads/master
2023-03-20T08:04:11.498705
2021-03-14T08:59:03
2021-03-14T08:59:03
347,587,323
0
0
null
null
null
null
UTF-8
R
false
false
1,255
r
05_Maximum_likelihood.R
rm(list = objects()) library(ggplot2) data("airquality") data <- na.omit(subset(airquality, select=c(Ozone, Solar.R, Wind, Temp))) head(data, n = 2) # prepare response vector y y <- data$Ozone head(y) # covariate matrix X X <- as.matrix(cbind("(Intercept)" = 1, subset(data, select = -Ozone))) head(X, n = 2) beta <- solve(t(X)%*%X)%*%t(X)%*%y ll <- function(beta, y, X){ sd <- exp(beta[length(beta)]) mu <- X%*%beta[-length(beta)] loglik <- dnorm(y, mean = mu, sd = sd, log = T) return(-sum(loglik)) } mf <- model.frame(Ozone~Solar.R+Temp+Wind, data = data) start <- c(rep(0, ncol(mf)),1) opt <- optim(start, ll, method = "BFGS", control = list(reltol = 1e-20), y = model.response(mf), X=model.matrix(mf, data)) opt$par as.vector(beta) coef(lm(Ozone~Solar.R+Temp+Wind, data = data)) opt$par[-length(opt$par)] # evaluate opt$value #Proof logLik(lm(Ozone~Solar.R+Temp+Wind, data = data)) # OLS vs Gaussian MLE x <- seq(-10, 10, length = 501) xlab <- expression(paste("y - ", hat(y))) par(mfrow = c(1, 3)) plot(x, x^2, type = "l", xlab = xlab, main = "Squared Error") plot(x, dnorm(x, log = T), type = "l", xlab =xlab, main = "Gaussian Loglikelihood") plot(x^2, dnorm(x, log = T), type = "l", xlab =xlab, main = "Comparison")
2bb8401b2808dfc2e1b3cb37ee0f0ff5bc94a9cd
0b84d6023413e08965e0ce2cca29c6431ddc3d53
/codes/main/step8_results.R
338f3a998ca639b73864ac9ab0a7969af86f5d0d
[]
no_license
jasa-acs/Covariance-Based-Sample-Selection-for-Heterogeneous-Data-Applications-to-Gene-Expression-and-Auti...
8d615fe9b73991ebf066bbc545aeebadfd31ec75
1ed8f1f509098c8d34b92496b7258b28d2309d4f
refs/heads/master
2022-08-30T02:09:03.023416
2020-05-26T20:27:21
2020-05-26T20:27:21
266,931,290
0
0
null
null
null
null
UTF-8
R
false
false
1,105
r
step8_results.R
if(verbose) print(paste0(Sys.time(), "Start of step 8: Compiling the results")) validated_genes <- covarianceSelection::validated_genes$Gene num_pfc35 <- length(intersect(genes_pfc35, validated_genes)) num_nodawn <- length(intersect(genes_nodawn, validated_genes)) num_our <- length(intersect(genes_our, validated_genes)) num_intersect <- length(intersect(genes_our_intersect, validated_genes)) # output some summaries c(num_pfc35, num_nodawn, num_our, num_intersect) c(length(genes_pfc35), length(genes_nodawn), length(genes_our), length(genes_our_intersect)) c(sum(as.matrix(adj_pfc35))/2, sum(as.matrix(adj_our))/2) c(covarianceSelection::compute_scale_free(as.matrix(adj_pfc35)), covarianceSelection::compute_scale_free(as.matrix(adj_our))) cbind(names(dat_list[idx_our]), sapply(dat_list[idx_our], nrow)) covarianceSelection::binning(names(dat_list)[idx_our]) # computing eigen decompositions for the figure diagnostics eigen_pfc35 <- eigen(as.matrix(adj_pfc35)) eigen_our <- eigen(as.matrix(adj_our)) save.image(file = paste0(save_filepath, "/step8_results", filepath_suffix, ".RData"))
56b18129783b7131ba1a1d1215538ffd638c9fab
9e6c6d3ea78d408a6746fcdeca6ff0d3a8a3308c
/R/biomisation.R
3d14172921930a9c760bff5c4b02468a1ee37383
[]
no_license
stineb/rbeni
36f28d38f58301d2af24255e9d63fe5ac6809ebe
2f9d26d0a286c550cb90ee9d30a1f2b6c3b112f6
refs/heads/master
2023-02-18T22:18:52.856980
2023-02-16T17:29:09
2023-02-16T17:29:09
167,402,490
3
6
null
2020-09-25T09:35:32
2019-01-24T16:49:15
R
UTF-8
R
false
false
6,500
r
biomisation.R
## ---------------------------------------------------------------- ## BIOME CLASSIFICATION ALGORITHM FOR LPX-Bern OUTPUT ## Using variables GDD, FPC_GRID, height ## after Prentice et al., 2011 ## Adopted from Fortran subroutine 'findbiome.F' implemented within ## LPX-Bern. ## b.stocker@imperial.ac.uk ## ---------------------------------------------------------------- ## NR. BIOME ## 1 Tropical forest ## 2 Warm temperate forest ## 3 Temperate forest ## 4 Boreal forest ## 5 Tropical Savanna ## 6 Sclerophyll woodland ## 7 Temperate parkland ## 8 Boreal parkland ## 9 Desert ## 10 Dry grassland/shrubland ## 11 Shrub tundra ## 12 Tundra ## ---------------------------------------------------------------- biomisation <- function( gdd, fpc_grid, hgt, lunat=1.0 ) { ## ---------------------------------------------------------------- ## Arguments ## ---------------------------------------------------------------- ## gdd : vector of annual growing degree days for an arbitrary number of years preceeding (and including) current year ## fpc_grid : vector of fractional plant cover of current year, vector of length 9 (for each LPX-PFT) ## hgt : vector of vegetation height, vector of length 9 (for each LPX-PFT) ## lunat : gridcell area fraction of natural land, scalar ## ---------------------------------------------------------------- ## Parameters ## ---------------------------------------------------------------- ## warm/cold cutoff (GDD, Prentice et al. 2011 used 350) gdd_cutoff <- 1000.0 fpc_cutoff1 <- c( 0.1, 0.1 ) fpc_cutoff2 <- c( 9999.0, 0.6 ) hgt_cutoff <- c( 10.0, 6.0, 4.0, 4.0, 2.0 ) ## ---------------------------------------------------------------- ## Auxiliary variables ## ---------------------------------------------------------------- ## GDD averaged over previous 31 years gdd_avg <- mean( gdd, na.rm=TRUE ) if (gdd_avg > gdd_cutoff) { gdd_class <- 2 } else { gdd_class <- 1 } ## Total FPC, tree FPC fpc_tree <- sum( fpc_grid[1:7], na.rm=TRUE ) fpc_grass <- sum( fpc_grid[8:9], na.rm=TRUE ) fpc_tot <- sum( fpc_grid[1:9], na.rm=TRUE ) ## Vegetation height, fpc_grid-weighted sum over trees if ( fpc_tree==0.0 ) { hgt_avg <- 0.0 } else { hgt_avg <- sum( hgt[1:7] * fpc_grid[1:7], na.rm=TRUE ) / fpc_tree } ##-------------------------------------------------------- ## Start biome classification algorithm ##-------------------------------------------------------- ## Correct for case where natural land area is ~zero ## NOT ANYMORE: => take fpc_grid in cropland/pasture ## => classify to "closest" corresponding biome ##-------------------------------------------------------- if (lunat<0.001){ biome <- NA # useval <- max( sum( fpc_grid[12:13], na.rm=TRUE ), sum( fpc_grid[14:15], na.rm=TRUE ), na.rm=TRUE ) # print( paste( "use value ", useval)) # fpc_tot <- useval } else if (gdd_class==2) { ## Warm Biomes ##******************************************************** if ( fpc_tot<fpc_cutoff1[ gdd_class ] ) { ## Desert ##-------------------------------------------------------- biome <- 9 } else if ( fpc_tot<fpc_cutoff2[ gdd_class ] ) { ## Dry grassland/shrubland ##-------------------------------------------------------- biome <- 10 } else { ## Forest/Savanna ##-------------------------------------------------------- if ( max( fpc_grid[1:2] )>0.0) { ## tropical PFTs are present if (hgt_avg<hgt_cutoff[1] ) { ## Tropical savanna biome <- 5 } else { ## Tropical forest biome <- 1 } } else if (fpc_grid[4]>fpc_grid[5] || fpc_grid[4]>fpc_grid[3]) { ## temp. broadl. evergr. PFT dominant if (hgt_avg<hgt_cutoff[2] ) { ## Sclerophyll woodland biome <- 6 } else { ## Warm temperate forest biome <- 2 } } else if (fpc_grid[3]>0.0 || fpc_grid[5]>0.0) { ## temp. needl. or temp. summergr. PFT present if (hgt_avg<hgt_cutoff[3] ) { ## Temperate parkland biome <- 7 } else { ## Temperate forest biome <- 3 } } else { ## This is in "warm biomes" because there may still be "warm biome areas" where ## neither temperate nor (sub)tropical PFTs grow. if (hgt_avg<hgt_cutoff[4] ) { ## Boreal parkland biome <- 8 } else { ## Boreal forest biome <- 4 } } } } else { ## Cold Biomes ##******************************************************** if (fpc_tot<fpc_cutoff1[gdd_class]) { ## Tundra ##-------------------------------------------------------- biome <- 12 } else if (hgt_avg<hgt_cutoff[5]) { ## Shrub tundra ##-------------------------------------------------------- biome <- 11 } else { ## This is in "cold biomes" because there may still be "cold biome areas" ## where no boreal PFTs grow. ## Forest/Savanna ##-------------------------------------------------------- if (max(fpc_grid[1])>0.0) { ## tropical PFTs are present if (hgt_avg<hgt_cutoff[1] ) { ## Tropical savanna biome <- 5 } else { ## Tropical forest biome <- 1 } } else if (fpc_grid[4]>fpc_grid[5] || fpc_grid[4]>fpc_grid[3]) { ## temp. broadl. evergr. PFT dominant if (hgt_avg<hgt_cutoff[2] ) { ## Sclerophyll woodland biome <- 6 } else { ## Warm temperate forest biome <- 2 } } else if (fpc_grid[3]>0.0 || fpc_grid[5]>0.0) { ## temp. needl. or temp. summergr. PFT present if (hgt_avg<hgt_cutoff[3] ) { ## Temperate parkland biome <- 7 } else { ## Temperate forest biome <- 3 } } else { if (hgt_avg<hgt_cutoff[4] ) { ## Boreal parkland biome <- 8 } else { ## Boreal forest biome <- 4 } } } } return( biome ) }
4c4745bff1e7beb899e18c25f24652f3c4f43bd3
2beba1c70d0b5ee75f083e71cf186dc826a1aca2
/plot4.R
c77e6c5826b23057fbf33c470e92cfa8a7fb38ae
[]
no_license
thomasemurphy/ExData_Plotting1
5bb3b253b1e13acab265417e37b1f583e19bc4dc
6ce2f47cab3891b917fc3052a77f6ee29f9987a2
refs/heads/master
2020-12-13T16:23:42.133979
2020-01-31T05:01:37
2020-01-31T05:01:37
234,470,790
0
0
null
2020-01-17T04:31:36
2020-01-17T04:31:35
null
UTF-8
R
false
false
1,645
r
plot4.R
load_data <- function(filename="household_power_consumption.txt"){ all_data <- read.table(filename, header=TRUE, sep=";") all_data[,"dateTime"] <- as.POSIXct(paste(all_data$Date, all_data$Time), format="%d/%m/%Y %H:%M:%S") feb_data <- subset(all_data, dateTime >= as.POSIXct("2007-02-01", format="%Y-%m-%d") & dateTime < as.POSIXct("2007-02-03", format="%Y-%m-%d")) feb_data } feb_data$Global_active_power <- as.numeric(as.character(feb_data$Global_active_power)) feb_data$Sub_metering_1 <- as.numeric(as.character(feb_data$Sub_metering_1)) feb_data$Sub_metering_2 <- as.numeric(as.character(feb_data$Sub_metering_2)) feb_data$Sub_metering_3 <- as.numeric(as.character(feb_data$Sub_metering_3)) feb_data$Voltage <- as.numeric(as.character(feb_data$Voltage)) feb_data$Global_reactive_power <- as.numeric(as.character(feb_data$Global_reactive_power)) png("plot4.png") par(mfcol=c(2,2)) # top left plot(feb_data$dateTime, feb_data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") # bottom left plot(feb_data$dateTime, feb_data$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(feb_data$dateTime, feb_data$Sub_metering_2, col="red") lines(feb_data$dateTime, feb_data$Sub_metering_3, col="blue") legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty="solid", bty="n") # top right plot(feb_data$dateTime, feb_data$Voltage, type="l", xlab="datetime", ylab="Voltage") # bottom right plot(feb_data$dateTime, feb_data$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power") dev.off()
5bc180e1225a046df9a66eaea7bcb744915fce9a
cc23547152744fdc4af49bd25f4c72cc1af44067
/tests/testthat/testloadGEO.R
5e241b2599f669d4c67b30ceb00edb2a7032279f
[]
no_license
rsablina/phantasus
0ea1e7ed8fb4b5a7e8649658aefc3930c924a111
60bec95d906666bb80681c8aa44ffe685131f03c
refs/heads/master
2021-04-27T19:24:29.074081
2018-02-18T07:39:27
2018-02-18T07:39:27
122,356,605
0
0
null
2018-02-21T15:48:53
2018-02-21T15:48:53
null
UTF-8
R
false
false
2,077
r
testloadGEO.R
context("Load GEO and its utils") library(jsonlite) library(Biobase) test_that("loadGEO finishes with result", { options(phantasusMirrorPath = "https://genome.ifmo.ru/files/software/phantasus") x <- loadGEO("GSE27112") expect_is(x, "json") ess <- protolite::unserialize_pb(readBin(fromJSON(x), what="raw", n=100000000)) expect_equal(length(ess), 2) x <- loadGEO("GSE27112-GPL6885") expect_is(x, "json") ess <- protolite::unserialize_pb(readBin(fromJSON(x), what="raw", n=100000000)) expect_equal(length(ess), 1) expect_is(loadGEO("GSE14308"), "json") expect_is(loadGEO("GDS4885"), "json") expect_error(loadGEO("WRONGNAME")) options(phantasusMirrorPath = NULL) }) test_that("reparseCachedGSEs works", { cacheDir <- tempdir() getES("GSE14308", destdir = cacheDir) expect_true("GSE14308" %in% reparseCachedESs(destdir = cacheDir)) }) test_that("checkGPLs counts gpls correctly", { options(phantasusMirrorPath = "https://genome.ifmo.ru/files/software/phantasus") expect_equal(fromJSON(checkGPLs("GSE14308")), c("GSE14308")) expect_equal(fromJSON(checkGPLs("GDS4885")), c("GDS4885")) expect_length(fromJSON(checkGPLs("GSE27112")), 2) expect_length(fromJSON(checkGPLs("GSE10000")), 2) expect_warning(checkGPLs("GSE101")) expect_warning(checkGPLs("GSE201")) options(phantasusMirrorPath = NULL) }) test_that("checkGPLs counts existing files correctly without connection", { options(phantasusMirrorPath = "https://notworkingdomain", phantasusCacheDir = system.file("testdata", package="phantasus")) expect_message(checkGPLs("GSE27112"), regexp = "Problems establishing connection") expect_length(fromJSON(checkGPLs("GSE27112")), 1) expect_warning(checkGPLs("GSE14308")) options(phantasusCacheDir = NULL, phantasusMirrorPath = NULL) }) test_that("getGSE works with ARCHS4", { ess <- getGSE("GSE99709", destdir=system.file("testdata", package="phantasus")) expect_gt(nrow(ess[[1]]), 0) expect_gt(ncol(ess[[1]]), 0) })