blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb051a7e15ad9f6046b70a4f2c4f75770d943866
|
326cc069990dc4a1bb1e020344749ec4188d8827
|
/CREATE_TIMELINE_LAB_RESULTS.r
|
a0f2eee8972ee4f0a33596220ab043e3f61557c8
|
[] |
no_license
|
iuabhmalat/mmrf_scripts
|
a622072d968782c876357c80cc98f6842956f635
|
272faaa029e77fabced1a1ed1f31c8d87e374039
|
refs/heads/master
| 2023-04-14T22:52:11.626776
| 2021-04-19T21:23:58
| 2021-04-19T21:23:58
| 288,565,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,340
|
r
|
CREATE_TIMELINE_LAB_RESULTS.r
|
library(purrr)
library(dplyr)
setwd('c:/Users/abhmalat/OneDrive - Indiana University/MMRF_CoMMpass_IA16a')
#bone_cols <- c("PUBLIC_ID", "BA_WASABONEASSES", "BA_DAYOFASSESSM", "BA_TYPEOFBONEASS", "BA_SPECIFY", "BA_IMAGEINTERPRE",
# "BA_LYTICLESIONS", "BA_OFLYTICLESION", "BA_OSTEOPENIAOST", "BA_PATHOLOGICFRA", "BA_MULTIPLEDISSE",
# "BA_SKULL", "BA_SPINE", "BA_CERVICAL", "BA_THORACIC", "BA_LUMBAR", "BA_SACRAL", "BA_PELVIS", "BA_RIGHT",
# "BA_LEFT", "BA_FEMUR", "BA_RIGHT2", "BA_LEFT2", "BA_HUMERUS", "BA_RIGHT3", "BA_LEFT3", "BA_RIBS", "BA_RIGHT4",
# "BA_NUMBERAFFECTE", "BA_LEFT4", "BA_NUMBERAFFECTE2", "BA_OTHER", "BA_SPECIFY2")
per_visit <- read.csv('clinical_flat_files/CoMMpass_IA16_FlatFiles/MMRF_CoMMpass_IA16_PER_PATIENT_VISIT.csv', sep=",", header=TRUE)
head(per_visit)
bone_assessment <- per_visit[, c("PUBLIC_ID", "BA_WASABONEASSES", "BA_DAYOFASSESSM", "BA_TYPEOFBONEASS", "BA_SPECIFY", "BA_IMAGEINTERPRE",
"BA_LYTICLESIONS", "BA_OFLYTICLESION", "BA_OSTEOPENIAOST", "BA_PATHOLOGICFRA", "BA_MULTIPLEDISSE",
"BA_SKULL", "BA_SPINE", "BA_CERVICAL", "BA_THORACIC", "BA_LUMBAR", "BA_SACRAL", "BA_PELVIS", "BA_RIGHT",
"BA_LEFT", "BA_FEMUR", "BA_RIGHT2", "BA_LEFT2", "BA_HUMERUS", "BA_RIGHT3", "BA_LEFT3", "BA_RIBS", "BA_RIGHT4",
"BA_NUMBERAFFECTE", "BA_LEFT4", "BA_NUMBERAFFECTE2", "BA_SPECIFY2")]
ba_true <- dplyr::filter(bone_assessment, BA_WASABONEASSES == "Yes")
ba_true$BA_TYPEOFBONEASS <- ifelse(ba_true$BA_TYPEOFBONEASS %in% "Other", ba_true$BA_SPECIFY, ba_true$BA_TYPEOFBONEASS)
ba_true <- dplyr::select (ba_true,-c(BA_SPECIFY, BA_WASABONEASSES, ))
head(ba_true)
ba_true$STOP_DATE <- ""
ba_true$EVENT_TYPE <- "LAB_TEST"
ba_true$TEST <- "Bone Assessment"
colnames(ba_true)
names(ba_true) <- c("PATIENT_ID", "START_DATE", "Type of bone assessment", "Image Interpretation", "Lytic Lesions",
"Number of Lytic Lesions", "Osteopenia / Osteoporosis", "Pathologic fracture", "Multiple disseminated lesions",
"Skull", "Spine", "Cervical", "Thoracic", "Lumbar", "Sacral", "Pelvis", "Right pelvis", "Left pelvis",
"Femur", "Right femur", "Left femur", "Humerus", "Right humerus", "Left humerus", "Ribs", "Right ribs",
"Number of ribs affected (right side)", "Left ribs", "Number of ribs affected (left side)",
"Other myeloma-related bone lesion", "STOP_DATE", "EVENT_TYPE", "TEST")
summary(bone_assessment)
outputFile <- "data_timeline_lab_bone_assessment.txt"
#write.table(select(ba_true, "PATIENT_ID", "START_DATE", "STOP_DATE", "EVENT_TYPE", "TEST",
# "Type of bone assessment", "Image Interpretation", "Lytic Lesions",
# "Number of Lytic Lesions", "Osteopenia / Osteoporosis", "Pathologic fracture", "Multiple disseminated lesions",
# "Skull", "Spine", "Cervical", "Thoracic", "Lumbar", "Sacral", "Pelvis", "Right pelvis", "Left pelvis",
# "Femur", "Right femur", "Left femur", "Humerus", "Right humerus", "Left humerus", "Ribs", "Right ribs",
# "Number of ribs affected (right side)", "Left ribs", "Number of ribs affected (left side)",
# "Other myeloma-related bone lesion"),
# outputFile, sep='\t', col.names = TRUE, row.names=FALSE, quote = FALSE, append = FALSE, na="NA")
mprotein <- per_visit[, c("PUBLIC_ID", "VISIT", "AT_INCREASEOF25F", "AT_SERUMMCOMPONE", "AT_URINEMCOMPONE","AT_ONLYINPATIENT","AT_ONLYINPATIENT2")] %>% dplyr::filter(AT_INCREASEOF25F == "Checked")
mprotein$STOP_DATE <- ""
mprotein$EVENT_TYPE <- "LAB_TEST"
mprotein$TEST <- ""
#mprotein <- dplyr::select (mprotein,-AT_INCREASEOF25F)
#Serum m protein
serum <- select(mprotein, c("PUBLIC_ID", "VISIT", "STOP_DATE", "EVENT_TYPE", "TEST", "AT_SERUMMCOMPONE")) %>% dplyr::filter(AT_SERUMMCOMPONE == "Checked")
serum$TEST <- "Serum M-Component"
names(serum) <- c("PATIENT_ID", "START_DATE", "STOP_DATE", "EVENT_TYPE", "TEST", "Serum M-component (absolute increase must be >= 0.5g/dL)")
outputFile <- "data_timeline_lab_m_serum.txt"
write.table(serum, outputFile, sep='\t', col.names = TRUE, row.names=FALSE, quote = FALSE, append = FALSE, na="NA")
metaFile <- file("meta_timeline_lab_m_serum.txt")
writeLines(c(
"cancer_study_identifier: mmrf_2020",
"genetic_alteration_type: CLINICAL",
"datatype: TIMELINE",
paste("data_filename: ", outputFile)
), metaFile
)
close(metaFile)
#Urine m protein
urine <- select(mprotein, c("PUBLIC_ID", "VISIT", "STOP_DATE", "EVENT_TYPE", "TEST", "AT_URINEMCOMPONE")) %>% dplyr::filter(AT_URINEMCOMPONE == "Checked")
urine$TEST <- "Urine M-Component"
names(urine) <- c("PATIENT_ID", "START_DATE", "STOP_DATE", "EVENT_TYPE", "TEST", "Serum M-component (absolute increase must be >= 0.5g/dL)")
outputFile <- "data_timeline_lab_m_urine.txt"
write.table(urine, outputFile, sep='\t', col.names = TRUE, row.names=FALSE, quote = FALSE, append = FALSE, na="NA")
metaFile <- file("meta_timeline_lab_m_urine.txt")
writeLines(c(
"cancer_study_identifier: mmrf_2020",
"genetic_alteration_type: CLINICAL",
"datatype: TIMELINE",
paste("data_filename: ", outputFile)
), metaFile
)
close(metaFile)
#FLC
flc_bmpcp <- select(mprotein, c("PUBLIC_ID", "VISIT", "STOP_DATE", "EVENT_TYPE", "TEST", "AT_ONLYINPATIENT", "AT_ONLYINPATIENT2")) %>% dplyr::filter(AT_ONLYINPATIENT == "Checked" | AT_ONLYINPATIENT2 == "Checked")
flc_bmpcp$TEST <- "FLC and Bone Marrow Plasma %"
names(flc_bmpcp) <- c("PATIENT_ID", "START_DATE", "STOP_DATE", "EVENT_TYPE", "TEST",
"Only in patients without measurable serum and M-Protein, the difference between involved and uninvolved FLC levels (absolute increase must be > 10mg/dL)",
"Only in patients without measurable serum and urine M-protein and without measurable disease by FLC levels, bone marrow plasma cell percentage (absolute % must be >= 10%)"
)
outputFile <- "data_timeline_lab_flc.txt"
write.table(flc_bmpcp, outputFile, sep='\t', col.names = TRUE, row.names=FALSE, quote = FALSE, append = FALSE, na="NA")
metaFile <- file("meta_timeline_lab_flc.txt")
writeLines(c(
"cancer_study_identifier: mmrf_2020",
"genetic_alteration_type: CLINICAL",
"datatype: TIMELINE",
paste("data_filename: ", outputFile)
), metaFile
)
close(metaFile)
print('Completed')
|
c97f2877c8c0da69e420e259156b0afcca80ebfd
|
0329874372a50960f4d0c5cd4055fd2c076f40bd
|
/man/setReduce.Rd
|
0c07965a11dcb3fee6919599eb0c2f2a0e231771
|
[] |
no_license
|
bomeara/doRedis
|
470f7b3e877edeef575db168c6d1d462955eb552
|
5d2725e91c566df97d4473b6501e9a05d41abdfe
|
refs/heads/master
| 2021-08-19T11:49:37.497437
| 2017-11-26T03:21:45
| 2017-11-26T03:21:45
| 112,051,830
| 0
| 0
| null | 2017-11-26T03:06:18
| 2017-11-26T03:06:18
| null |
UTF-8
|
R
| false
| true
| 1,523
|
rd
|
setReduce.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doRedis.R
\name{setReduce}
\alias{setReduce}
\title{Set two-level distributed reduction}
\usage{
setReduce(fun = NULL)
}
\arguments{
\item{fun}{a function of two arguments, set to NULL to disable combining, or
leave missing to implicitly set the gather function formally identical to the
\code{.combine} function but with an empty environment.}
}
\value{
\code{fun} is invisibly returned, or TRUE is returned when
\code{fun} is missing (in which case the \code{.combine} function is used).
}
\description{
Instruct doRedis to perform the \code{.combine} reduction per task on each
worker before returning results, cf. \code{\link{foreach}}.
Combined results are then processed through
the specified function \code{fun} for two levels of reduction
functions. This option only applies when the \code{chunkSize} option is greater than
one, and automatically sets \code{.multicombine=FALSE}.
}
\details{
This approach can improve performance when the \code{.combine} function is
expensive to compute, and when function emits significantly less data than
it consumes.
}
\note{
This value is overriden by setting the 'reduce' option in the
foreach loop (see the examples).
}
\examples{
\dontrun{
setChunkSize(3)
setReduce(list)
foreach(j=1:10, .combine=c) \%dopar\% j
# Same effect as:
foreach(j=1:10, .combine=c,
.options.redis=list(chunksize=3, reduce=list)) \%dopar\% j
}
}
\seealso{
\code{\link{foreach}}, \code{\link{setChunkSize}}
}
|
3adee09747fdca7b1ed87981a4563ec8df3206a3
|
56dd625b8daa7d50aaef1e457e74e2801cb39a95
|
/demo.R
|
17f9d0d84e1c7b1999687dafc86ee9a44da3b4b0
|
[] |
no_license
|
moomoofarm1/eSNN-2
|
2fbc0912aa5393a2ac42fc12606dbeaf17dec442
|
1f2436485c15a22c20b6003ca94e3e53bc68ae44
|
refs/heads/master
| 2021-05-31T10:51:36.646641
| 2016-05-03T11:01:00
| 2016-05-03T11:01:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,889
|
r
|
demo.R
|
rm(list = ls())
source('main.R')
########################
#
# Demo of the ESNN
#
########################
doTheTest <- function(datasetName, esnn){
availableDataSets <- c('s', 'i', 'w' ,'bc') # spiral / iris / wine / breast cancer
if(length(which(availableDataSets==datasetName))==0){
cat(paste('\nDataset unknown. Availale are ',availableDataSets))
return()
}
encoder <- SpikeEncoding$new(nbFields = 20, beta = 1.5, iMin = -1, iMax= 1)
if(datasetName=='i'){ # iris
irisData <- read.csv(file="data/iris.data", header=FALSE, sep=" ")
baseIrisData <- as.data.frame(c(irisData[,1:4], irisData[5]))
shuffledIrisData <- baseIrisData[sample(nrow(baseIrisData)),]
irisTrainSet <- shuffledIrisData[1:100,]
irisTestSet <- shuffledIrisData[101:150,]
enc <- SpikeEncoding$new(nbFields = 20, beta = 1.5, iMin = 0, iMax= 8)
esnnIris <- ESNN$new(encoder = enc, m=0.9, c=0.7, s=0.6)
esnnIris$train(irisTrainSet)
irisTestResults <- esnnIris$test(irisTestSet)
return(irisTestResults)
}else if(datasetName=='s'){ # spiral
data <- read.csv(file="data/spiral.data", header=FALSE, sep=" ")
baseData <- as.data.frame(c(data[,1:6], data[21]))
shuffledData <- baseData[sample(nrow(baseData)),]
trainSet <- shuffledData[1:300,]
testSet <- shuffledData[301:400,]
esnn <- ESNN$new(encoder = encoder, m=0.9, c=0.7, s=0.6)
esnn$train(trainSet)
testResults <- esnn$test(testSet)
return(testResults)
}else if(datasetName=='w'){ # wine
data <- read.csv(file="data/wine.data", header=FALSE, sep=",")
baseWineData <- as.data.frame(c(data[,2:14], data[1]))
shuffledWineData <- baseWineData[sample(nrow(baseWineData)),]
wineTrainSet <- shuffledWineData[1:118,]
wineTestSet <- shuffledWineData[119:178,]
esnnWine <- ESNN$new(encoder = encoder, m=0.9, c=0.7, s=0.6)
esnnWine$train(wineTrainSet)
wineTestResults <- esnnWine$test(wineTestSet)
return(wineTestResults)
}else if(datasetName=='bc'){ # breast cancer
data <- read.csv(file="data/wdbc.data", header=FALSE, sep=",")
baseBCData <- as.data.frame(c(data[,3:32], data[2]))
shuffledBCData <- baseBCData[sample(nrow(baseBCData)),]
BCTrainSet <- shuffledBCData[1:379,]
wintTestSet <- shuffledBCData[380:569,]
esnnBC <- ESNN$new(encoder = encoder, m=0.9, c=0.7, s=0.6)
esnnBC$train(BCTrainSet)
BCTestResults <- esnnBC$test(wintTestSet)
return(BCTestResults)
}
}
cat('\nTesting on Iris dataset\n')
i <- suppressWarnings(doTheTest('i'))
cat(paste('Iris dataset done. Use: \n\t`i$stats` for overall stats \n\t`i$precision`, `i$accuracy` or `i$recall` \nto view the results.\n'))
cat('\nTesting on Spiral dataset\n')
s <- suppressWarnings(doTheTest('s'))
cat(paste('Spiral dataset done. Use: \n\t`s$stats` for overall stats \n\t`s$precision`, `s$accuracy` or `s$recall` \nto view the results.\n'))
|
456330bcea7c67913d76ff606a05cf08e552d877
|
e7600a22232d22511f3a77ce1da5f89d6a5a6440
|
/src/allen/wgcna/initial_clustering_tom_filtered_phenotype_separate.R
|
4bcb343a73f1c3e70d59de0aaf8425bd3635bdbe
|
[] |
no_license
|
SuLab/expression_map
|
aba55c8d21e65fa4b000144126352e8df373e83a
|
72e266417f114c4f2a1094419622aabd4f40765e
|
refs/heads/master
| 2021-10-11T21:19:24.791487
| 2019-01-29T23:23:34
| 2019-01-29T23:23:34
| 110,739,279
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,378
|
r
|
initial_clustering_tom_filtered_phenotype_separate.R
|
library(WGCNA)
enableWGCNAThreads()
setwd('/gpfs/group/su/lhgioia/map')
options(stringsAsFactors=F)
tpm.dat <- read.table('data/allen/tpm_matrix.csv',sep=',',header=T)
var.genes <- readRDS('results/allen/variable_genes_log_4k.RDS')
allen.meta <- read.table('data/allen/allen_meta.tsv',sep='\t',header=T)
tpm.dat <- tpm.dat[as.character(allen.meta$rnaseq_profile_id),var.genes]
mag.cnst <- as.integer(log(min(as.vector(tpm.dat)[as.vector(tpm.dat)!=0])))
dec.cnst <- exp(mag.cnst)
tpm.dat <- log(tpm.dat + dec.cnst) - mag.cnst
beta <- 2
expr.dat <- vector(mode='list',length=3)
cnt <- 1
for(i in unique(allen.meta$sources)){
dat.slice <- tpm.dat[allen.meta$sources==i,]
expr.dat[[cnt]] <- list(data=dat.slice)
## colnames(expr.dat[[cnt]]$data) <- colnames(dat.slice)
## rownames(expr.dat[[cnt]]$data) <- rownames(dat.slice)
cnt <- cnt + 1
}
names(expr.dat) <- unique(allen.meta$sources)
TOM.all <- readRDS('data/allen/wgcna/scaled_TOM_pheno.RDS')
unmerged.labels.pheno <- list()
unmerged.colors.pheno <- list()
set.trees <- list()
for(i in 1:3){
setTree <- hclust(as.dist(1-TOM.all[i,,]),method='average')
set.trees[[i]] <- setTree
unmerged.labels <- cutreeDynamic(setTree,distM=1-TOM.all[i,,],deepSplit=2,cutHeight=0.995,minClusterSize=30,pamRespectsDendro=F)
unmerged.labels.pheno[[i]] <- unmerged.labels
unmerged.colors <- labels2colors(unmerged.labels)
unmerged.colors.pheno[[i]] <- unmerged.colors
}
merging.results <- list()
for(i in 1:3){
unmerged.mes <- moduleEigengenes(expr.dat[[i]]$data,colors=unmerged.colors.pheno[[i]])
mes <- unmerged.mes$eigengenes
rownames(mes) <- rownames(expr.dat[[i]])
me.diss <- 1-cor(mes)
me.tree <- hclust(as.dist(me.diss),method='average')
me.diss.thresh <- 0.25
merge <- mergeCloseModules(expr.dat[[i]]$data,unmerged.colors.pheno[[i]],cutHeight=me.diss.thresh,verbose=3)
merged.colors <- merge$colors
merged.mes <- merge$newMEs
rownames(merge$newMEs) <- rownames(expr.dat[[i]])
color.order <- c('grey',standardColors(50))
module.labels <- match(merged.colors,color.order)-1
merge$labels <- module.labels
merging.results[[i]] <- merge
}
saveRDS(merging.results,'results/allen/wgcna/merged_results_separate_phenotype.RDS')
saveRDS(set.trees,'results/allen/wgcna/gene_trees_separate_phenotype.RDS')
|
4427f4f72227319e0a85ccda66dc908a77f4f630
|
5fbd7b46fe555db6271ddd423ff74039e989f4a2
|
/Course3/Week4/Quiz.R
|
1ede1652dc77ba741bcf17ba04bc16be169bfef8
|
[] |
no_license
|
TD0401/datasciencecoursera
|
b3efa4cbf15a20fb287c8dfbffee013a0dfabcea
|
0c369e98f9db1a830cae9df1ca3a7ca1f69ed7bc
|
refs/heads/master
| 2023-02-12T02:54:43.759691
| 2021-01-03T18:44:06
| 2021-01-03T18:44:06
| 263,995,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
r
|
Quiz.R
|
problem1 <- function(){
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
if(!file.exists("data")){
dir.create("data")
}
if(!file.exists("data/Idaho.csv")){
download.file(fileUrl ,"data/Idaho.csv",method = "curl")
}
df <- read.csv("data/Idaho.csv")
strsplit(names(df),"wgtp")
}
problem23 <- function(){
file1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
if(!file.exists("data")){
dir.create("data")
}
if(!file.exists("data/GDP.csv")){
download.file(file1 ,"data/GDP.csv",method = "curl")
}
gdp <- read.csv("data/GDP.csv",skip=5, header = FALSE,nrows =231)
colnames(gdp) <- c("countrycode","ranking","x1","name","economy","x2","x3","x4","x5","x6")
gdp <- gdp[!is.na(gdp$ranking),]
gdp$economy <- gsub(",","",gdp$economy)
res <- mean(as.numeric(gdp$economy), na.rm=TRUE)
print(res)
gdp$name[grep("^United" , gdp$name)]
}
problem4 <- function(){
file1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
file2 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
if(!file.exists("data")){
dir.create("data")
}
if(!file.exists("data/GDP.csv")){
download.file(file1 ,"data/GDP.csv",method = "curl")
}
if(!file.exists("data/Country.csv")){
download.file(file2 ,"data/Country.csv",method = "curl")
}
gdp <- read.csv("data/GDP.csv",skip=5, header = FALSE,nrows =231)
colnames(gdp) <- c("countrycode","ranking","x1","name","economy","x2","x3","x4","x5","x6")
gdp <- gdp[!is.na(gdp$ranking),]
country <- read.csv("data/Country.csv")
res <- merge(country,gdp, by.x="CountryCode",by.y="countrycode",all=FALSE )
res <- res[!is.na(country$Special.Notes),]
length(grep("Fiscal year end: June", country$Special.Notes))
}
problem5 <- function(){
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
library(lubridate)
my_years <- year(ymd(sampleTimes))
print(length(my_years[my_years==2012]))
my_days <- weekdays(ymd(sampleTimes[grep("^2012",as.character(sampleTimes))]))
print(length(my_days[my_days=="Monday"]))
}
|
c78b13d54fe075b482f8dc384aae5e8db224c5e7
|
2b9ce821f8453b8626f8563979859b9cc91f81b6
|
/BreastCancerModel.R
|
023bcc6410c4cabce5eb4f04e707e9296e37c5f2
|
[
"Apache-2.0"
] |
permissive
|
lawhitmi/BreastCancerPrediction
|
9841ba0317c60b7066c42ab7314cf69f6d692faa
|
4c267e1d2474be342462cadaf63b09d85823571a
|
refs/heads/main
| 2023-02-13T08:25:52.376540
| 2021-01-10T19:21:06
| 2021-01-10T19:21:06
| 320,080,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,038
|
r
|
BreastCancerModel.R
|
# Breast Cancer Prediction
library(ggplot2)
library(pROC)
# Load Data
data = read.csv('Data/wdbc.data', header=FALSE)
summary(data)
dim(data) #569 x 32
# Add in column names from the documentation
colnames(data) = c('ID', 'Diag', 'radius', 'texture','perimeter', 'area', 'smoothness', 'compactness', 'concavity',
'concave_points', 'symmetry', 'fractal_dim', 'se_radius', 'se_texture','se_perim', 'se_area', 'se_smooth',
'se_compact', 'se_concavity', 'se_concpts', 'se_symm', 'se_fractdim', 'max_rad', 'max_text', 'max_perim',
'max_area', 'max_smooth', 'max_compact', 'max_concavity', 'max_concpts', 'max_symm', 'max_fracdim')
# Remove ID column
data = subset(data, select=-ID)
table(data$Diag) #B: 357 M: 212
sum(is.na(data)) #0 -> no missing data
sum(duplicated(data)) #0 -> no duplicate data
# Test/Train Split
set.seed(2)
numObs = dim(data)[1]
train.idx = sample(1:numObs,0.8*numObs) # Reserve 20% of data for testing (CV used throughout, no need for validation set)
train = data[train.idx,]
test = data[-train.idx,]
# EDA
# We can check the relation between Diag and variables
for (i in colnames(train)[-1]) {
p1 <- ggplot(data=train, mapping = aes(x = !!rlang::sym(i), fill = Diag))
p1 <- p1 + geom_histogram(aes(y = ..density..), alpha = 0.6, bins = 30) + ggtitle(paste0('Distribution of ', i, ' by Diagnosis'))
print(p1)
}
# Look for correlation between variables
pairs(train[,1:11])
pairs(train[,c(1,12:21)])
pairs(train[,c(1,22:31)])
summary(train)
# PCA
pr.out<-prcomp(train[,-1],scale.=TRUE)
plot(pr.out$x[,1:2], col=as.numeric(train[,1]))
##############################################
# SVM Classifier
##############################################
library(e1071) #For SVM
# Function for Evaluating SVM Performance
eval_perf <- function(clf, data) {
ypred = predict(clf, data)
#Print out confusion matrix
print(table(ypred, truth=data$Diag))
#Return Accuracy of Model
return(list(sum(diag(prop.table(table(ypred, truth=data$Diag)))),ypred))
}
# Function for Pretty Plot of Confusion Matrix
plot_conf <- function(pred, truth, title) {
conf_mat = as.data.frame(table(predicted=pred,truth=truth))
ggplot(data=conf_mat, mapping=aes(x=truth,y=predicted))+
geom_tile(aes(fill=Freq))+
geom_text(aes(label=sprintf("%1.0f", Freq)), vjust=0.5)+
scale_fill_gradient(low="white",
high="green",
trans="log",
guide=FALSE)+
xlim(rev(levels(conf_mat$truth)))+
ggtitle(title)
}
# TRAIN
######## Model trained for obtaining plot to demonstrate SVM Method ####################
# For the document example plot, we can train the model as following:
svm.hpo = tune(svm, Diag~max_concpts+max_perim, data=train,
ranges=list(cost=c(0.1,0.5,1,2,5,10,20,50),
kernel=c('radial','linear','polynomial')),
scale=TRUE)
# The best model for plotting
svm.hpo$best.model # radial and cost 2
# Trying some plots of the svmfit
plot(svm.hpo$best.model, train, max_concpts~max_perim)
########################################################################################
# 10 fold CV with HPO on 'Cost' and 'Kernel'
svm.hpo = tune(svm, Diag~., data=train, ranges=list(cost=c(0.1,0.5,1,2,5,10,20,50), kernel=c('radial','linear','polynomial')), scale=TRUE)
summary(svm.hpo) # C=2, radial basis function
perf.hpo = eval_perf(svm.hpo$best.model, train)
perf.hpo[1] #98.9%
plot_conf(perf.hpo[[2]], train$Diag, 'SVM Training Confusion Matrix')
# Check if scale=FALSE improves performance (as this change can cause 'max iterations reached' error)
svm.noscale = tune(svm, Diag~., data=train, kernel='radial', scale=FALSE)
summary(svm.noscale) # error: 0.36 vs. ~0.03 for scaled -> Stick with scale=TRUE
# 10 fold CV with HPO on 'Cost' and 'Kernel' and PCA preprocessing
train_pca = cbind(as.data.frame(train$Diag),as.data.frame(pr.out$x[,1:15]))
colnames(train_pca)[1] = 'Diag'
svm.pca = tune(svm, Diag~., data=train_pca,
ranges=list(cost=c(0.1,0.5,1,2,5,10,20,50), kernel=c('radial','linear','polynomial')), scale=FALSE)
summary(svm.pca) # C=0.1, linear
perf.pca = eval_perf(svm.pca$best.model, train_pca)
perf.pca[1] # 98.6%
# Vary the # of PCs used
pca.res = cbind(rep(NA,10), rep(NA,10))
j=1
for (i in c(5,10,12,15,17,20,22,25,27,30)) {
train_pca = cbind(as.data.frame(train$Diag),as.data.frame(pr.out$x[,1:i]))
colnames(train_pca)[1]='Diag'
pcasvm = svm(Diag~.,data=train_pca, cost=0.1,kernel='linear',scale=FALSE)
perf.pcasvm = eval_perf(pcasvm, train_pca)
pca.res[j,]=c(i, perf.pcasvm[[1]])
j=j+1
}
res.df=as.data.frame(pca.res)
colnames(res.df) = c("# PCs","Accuracy")
plot(res.df, main="Model Performance at Varying Numbers of PCs")
# Feature Selection Using Logistic Regression
#####################################################################
library(MASS) # for the stepAIC method
trainlm = train
trainlm$Diag = as.numeric(trainlm$Diag)-1
testlm = test
testlm$Diag = as.numeric(testlm$Diag)-1
# LM TRAIN
lm = glm(Diag~.,data=trainlm, family='binomial')
# glm.fit: algorithm did not converge
# glm.fit: fitted probabilities numerically 0 or 1 occurred
# Training data could be separable, or probabilities of some points are indistinguishable from 0.
# didn't further investigate this as the StepAIC method below still gives some results.
summary(lm)
lm.probs = predict(lm,type="response")
lm.pred = rep(0,length(lm.probs))
lm.pred[lm.probs>.5]=1
table(lm.pred, trainlm$Diag)
sum(diag(prop.table(table(lm.pred, truth=trainlm$Diag)))) # 100% Accuracy
mod.select = stepAIC(lm) #this iteratively determines the best model using the AIC value
# The svm below uses only the variables which were identified as significant using the stepAIC method in the LogReg section
# This svm achieves 98.6% training accuracy with just 16 variables(/30)
svm.out = tune(svm, Diag~max_concpts+max_perim+max_smooth+fractal_dim+smoothness+radius+symmetry+max_area
+concave_points+max_concavity+compactness+se_concpts+max_symm+se_fractdim+se_radius+max_text, data=train,
ranges=list(cost=c(0.1,0.5,1,2,5,10,20,50), kernel=c('radial','linear','polynomial')), scale=TRUE)
summary(svm.out)
summary(svm.out$best.model) #C=1, linear
perf.out = eval_perf(svm.out$best.model,train)
perf.out[1] #98.6
# TEST
# Select model using all features
best.svm = svm(Diag~., data=train, prob=TRUE, cost=2, scale=TRUE, kernel='radial')
test.hpo=eval_perf(best.svm, test)
test.hpo[1] #Acc: 98.2%
plot_conf(test.hpo[[2]], test$Diag, 'SVM Test Confusion Matrix')
#Identify which points were misclassified
test.hpo[[2]][test.hpo[[2]]!=test$Diag]
#Attempt to visualize the misclassified points
pr.out.test<-prcomp(test[,-1],scale.=TRUE)
plot(pr.out.test$x[,1:2], col=as.numeric(test[,1]), pch=as.numeric(test.hpo[[2]]),main="Misclassified Points - SVM")
text(pr.out.test$x["69",1]-0.35,pr.out.test$x["69",2]-0.25, label=69, col='darkblue')
text(pr.out.test$x["74",1]-0.35,pr.out.test$x["74",2]-0.25, label=74, col='darkblue')
legend(-12, 9, legend=c("True Benign","True Malignant","False Malignant", "False Benign"), col=c("black","red","black","red"), pch=c(1,2,2,1))
# ROC curve
test.pred = predict(best.svm, test, prob=TRUE)
test.predprob = attr(test.pred, 'probabilities')[,1]
roc.obj1 = roc(test$Diag, test.predprob)
ggroc(roc.obj1)
auc(roc.obj1)
# Also test the best performing dimension reduction model
best.dimred = svm(Diag~max_concpts+max_perim+max_smooth+fractal_dim+smoothness+radius+symmetry+max_area
+concave_points+max_concavity+compactness+se_concpts+max_symm+se_fractdim+se_radius+max_text,
data=train, prob=TRUE, cost=1, scale=TRUE, kernel='linear')
test.dimred=eval_perf(best.dimred, test)
test.dimred[1] #Acc: 98.2%
plot_conf(test.dimred[[2]], test$Diag, 'SVM (DimRed) Test Confusion Matrix')
#Identify which points were misclassified
test.dimred[[2]][test.dimred[[2]]!=test$Diag]
#Attempt to visualize the misclassified points
pr.out.test<-prcomp(test[,-1],scale.=TRUE)
plot(pr.out.test$x[,1:2], col=as.numeric(test[,1]), pch=as.numeric(test.dimred[[2]]),main="Misclassified Points - SVM")
text(pr.out.test$x["69",1]-0.35,pr.out.test$x["69",2]-0.25, label=69, col='darkblue')
text(pr.out.test$x["74",1]-0.35,pr.out.test$x["74",2]-0.25, label=74, col='darkblue')
legend(-12, 9, legend=c("True Benign","True Malignant","False Malignant", "False Benign"), col=c("black","red","black","red"), pch=c(1,2,2,1))
# ROC curve
dimred.pred = predict(best.dimred, test, prob=TRUE)
dimred.predprob = attr(test.pred, 'probabilities')[,1]
roc.obj2 = roc(test$Diag, dimred.predprob)
ggroc(roc.obj2)
auc(roc.obj2)
####################################################################
# Neural Network
####################################################################
library(tensorflow)
install_tensorflow(method='conda', envname='r-reticulate')
library(keras)
library(tensorflow)
library(dplyr)
library(tfdatasets) # needed for 'feature_spec' function
x_train = train[,-1]
y_train = train[,1]
x_test = test[,-1]
y_test = test[,1]
x_train = as.matrix(x_train)
dimnames(x_train) <- NULL
x_test = as.matrix(x_test)
dimnames(x_test) <- NULL
# Normalize Independent variables
x_train = normalize(x_train, axis=2)
x_test = normalize(x_test, axis=2)
# One-hot Encode Target Variable
y_test_vec = as.numeric(y_test)-1
y_train_vec = as.numeric(y_train)-1
y_train = to_categorical(as.numeric(y_train)-1)
y_test = to_categorical(as.numeric(y_test)-1)
y_train = as.matrix(y_train)
y_test = as.matrix(y_test)
print_dot_callback <- callback_lambda(
on_epoch_end = function(epoch, logs) {
if (epoch %% 80 == 0) cat("\n")
cat(".")
}
)
train_model <- function(layers){
# Initialize Model
model <- keras_model_sequential()
# Add layers from function input
start = TRUE
for(i in layers) {
if (start==TRUE) {
model %>% layer_dense(units = i, activation = 'relu', input_shape = c(30))
prev_layer = i
start = FALSE
} else {
model %>% layer_dense(units = i, activation = 'relu', input_shape = c(prev_layer))
prev_layer = i
}
}
#Add last layer
model %>% layer_dense(units = 2, activation = 'softmax')
model %>% compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
#optimizer = optimizer_adam(lr=0.01),
metrics = 'accuracy'
)
# Fit the model and
# Store the fitting history in `history`
history <- model %>% fit(
x_train,
y_train,
epochs = 400,
batch_size = 5,
validation_split = 0.2,
verbose=0,
callbacks = list(print_dot_callback)
)
train_score <- model %>% evaluate(x_train, y_train, batch_size=5)
return(list(model,train_score,history))
}
# TRAIN
layers = c(64,32,32)
res = train_model(layers)
plot(res[[3]])
#relu:
#64/32/16 : 94.3
#64/32/32 : 93.2
#64/32 : 92.8
#64/64/32: 88.6,93.9
#Epochs 300:
#64/32/16 : 94.9
#Epochs 400: (batch=5, lr=default from Adam, (64/32/16), binary_crossentropy)
#32/32/16 : 95.6
#64/32/16 : 96.0 #Choose this setup
#Epochs 500:
#64/32/16 : 95.6
#sigmoid/400epochs:
#64/32/16 : 91.7
#64/32/32 : 92.3
classes <- res[[1]] %>% predict_classes(x_train, batch_size=5)
table(y_train_vec, classes)
# TEST
score <- res[[1]] %>% evaluate(x_test, y_test, batch_size = 5) # 98.2%
classes <- res[[1]] %>% predict_classes(x_test, batch_size=5)
table(y_test_vec, classes)
plot_conf(classes, y_test_vec, "Neural Network Test Confusion Matrix")
# Identify Misclassified Points
test[classes!=y_test_vec,] #15, 482
plot(pr.out.test$x[,1:2], col=as.numeric(test[,1]), pch=as.numeric(classes)+1, main="Misclassified Points - NN")
text(pr.out.test$x["15",1]-0.35,pr.out.test$x["15",2]-0.25, col='darkblue', label=15)
text(pr.out.test$x["482",1]-0.45,pr.out.test$x["482",2]-0.35, col='darkblue', label=482)
legend(-12, 9, legend=c("True Benign","True Malignant","False Malignant", "False Benign"), col=c("black","red","black","red"), pch=c(1,2,2,1))
|
d5aa73214c426b2816061360d8e5d17161ff8859
|
40d361233b16f1427816d231d84f0678ca0916c7
|
/plot4.R
|
df7e4cec61f0ab926ed1bc10ade882b1ed147e58
|
[] |
no_license
|
russ-macbeth/ExData_Plotting1
|
9d096cbef5d3f3a4b9e686780b1f673036bc2c57
|
388d2eb917c3a62aecbcc52eaa0b26c3e1d78e12
|
refs/heads/master
| 2020-02-26T16:12:27.086632
| 2015-06-07T07:11:28
| 2015-06-07T07:11:28
| 36,980,154
| 0
| 0
| null | 2015-06-06T13:00:13
| 2015-06-06T13:00:12
| null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
plot4.R
|
## Plot 4 - script to replicate the first plot of the Exploratory Data Analysis Course
## course project 1
## Download Libraries
library(data.table)
## Download and unzip the data file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destination <- "file.zip"
download.file(url, destination, method="curl")
data <- read.table(unz(destination, "household_power_consumption.txt"), sep = ";", header=TRUE)
## Convert date and time fields, create date time field
data <- cbind(datetime = paste(data$Date,data$Time),data)
data$datetime <- strptime(data$datetime, format = "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
## Subset data to only dates 1/2/2007 and 2/2/2007
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
df <- subset(data, data$Date == date1 | data$Date == date2)
## Converts last columns [ , 4:10] to numerics
for (i in 4:ncol(df)){
df[,i] <- as.numeric(as.character(df[,i]))
}
## Create Plots
## Set parameters for 2x2 matrix
png(file = "plot4.png")
par(mfrow = c(2, 2))
## Plot 1, top left
plot(df$datetime, df$Global_active_power, type = "l", ylab = "Global Active Power", xlab= "")
## Plot 2, top right
with(df, plot(df$datetime, df$Voltage, type = "l", ylab = "Voltage", , xlab = "datetime"))
## Plot 3, bottom left
with(df, plot(df$datetime, df$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab= ""))
lines(df$datetime, df$Sub_metering_2, col = "red")
lines(df$datetime, df$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n", legend = colnames(df[,8:10]))
## Plot 4, bottom right
plot(df$datetime, df$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
300c8ab1c102be63a1837ad52677d3b86d7c2be8
|
c9b97474d53e4539301edff547a1e713171ac39d
|
/R/modeldb_metrics_summary.R
|
e9546166cb9c16ffc5d0e94a2af931c06d921088
|
[] |
no_license
|
botchkoAI/VertaProjectService
|
ae04865cbc8ea745c3241a92ee0dc13c4cad250f
|
0f69f3389a1b2d9c03ce852755e232ac285b6d86
|
refs/heads/main
| 2023-06-02T02:06:25.903333
| 2021-06-16T22:21:48
| 2021-06-16T22:21:48
| 377,641,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,308
|
r
|
modeldb_metrics_summary.R
|
# modeldb/ProjectService.proto
#
# No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
#
# The version of the OpenAPI document: version not set
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title ModeldbMetricsSummary
#'
#' @description ModeldbMetricsSummary Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field key character [optional]
#'
#' @field min_value numeric [optional]
#'
#' @field max_value numeric [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ModeldbMetricsSummary <- R6::R6Class(
'ModeldbMetricsSummary',
public = list(
`key` = NULL,
`min_value` = NULL,
`max_value` = NULL,
initialize = function(
`key`=NULL, `min_value`=NULL, `max_value`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`key`)) {
stopifnot(is.character(`key`), length(`key`) == 1)
self$`key` <- `key`
}
if (!is.null(`min_value`)) {
stopifnot(is.numeric(`min_value`), length(`min_value`) == 1)
self$`min_value` <- `min_value`
}
if (!is.null(`max_value`)) {
stopifnot(is.numeric(`max_value`), length(`max_value`) == 1)
self$`max_value` <- `max_value`
}
},
toJSON = function() {
ModeldbMetricsSummaryObject <- list()
if (!is.null(self$`key`)) {
ModeldbMetricsSummaryObject[['key']] <-
self$`key`
}
if (!is.null(self$`min_value`)) {
ModeldbMetricsSummaryObject[['min_value']] <-
self$`min_value`
}
if (!is.null(self$`max_value`)) {
ModeldbMetricsSummaryObject[['max_value']] <-
self$`max_value`
}
ModeldbMetricsSummaryObject
},
fromJSON = function(ModeldbMetricsSummaryJson) {
ModeldbMetricsSummaryObject <- jsonlite::fromJSON(ModeldbMetricsSummaryJson)
if (!is.null(ModeldbMetricsSummaryObject$`key`)) {
self$`key` <- ModeldbMetricsSummaryObject$`key`
}
if (!is.null(ModeldbMetricsSummaryObject$`min_value`)) {
self$`min_value` <- ModeldbMetricsSummaryObject$`min_value`
}
if (!is.null(ModeldbMetricsSummaryObject$`max_value`)) {
self$`max_value` <- ModeldbMetricsSummaryObject$`max_value`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`key`)) {
sprintf(
'"key":
"%s"
',
self$`key`
)},
if (!is.null(self$`min_value`)) {
sprintf(
'"min_value":
%d
',
self$`min_value`
)},
if (!is.null(self$`max_value`)) {
sprintf(
'"max_value":
%d
',
self$`max_value`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(ModeldbMetricsSummaryJson) {
ModeldbMetricsSummaryObject <- jsonlite::fromJSON(ModeldbMetricsSummaryJson)
self$`key` <- ModeldbMetricsSummaryObject$`key`
self$`min_value` <- ModeldbMetricsSummaryObject$`min_value`
self$`max_value` <- ModeldbMetricsSummaryObject$`max_value`
self
}
)
)
|
780018b05403f328441e35231d0a75dd1551249a
|
0ca11666bce33a12e0e33a972d53438d0dc3674c
|
/man/spss.format.concat.Rd
|
cd71f5e8d5c18f04b077dbf7ed9e377fb644c582
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
alixlahuec/syntaxr
|
8305ac297632eda8c42325f0363c517f1581d47d
|
252646cb70546f5f949bebec84482dff9f442cfa
|
refs/heads/master
| 2022-11-26T20:50:47.540924
| 2020-08-04T21:33:20
| 2020-08-04T21:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 564
|
rd
|
spss.format.concat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spss.format.concat.R
\name{spss.format.concat}
\alias{spss.format.concat}
\title{Generate SPSS 'concat()' syntax for two (sets of) variables.}
\usage{
spss.format.concat(var1, var2)
}
\arguments{
\item{var1}{the first argument for concat().}
\item{var2}{the second argument for concat().}
}
\description{
Generate SPSS 'concat()' syntax for two (sets of) variables.
}
\examples{
spss.format.concat(c("dob", "income"), c("dob_f", "income_f"))
spss.format.concat("income", "income_f")
}
|
f97d7b87df0d87c4305354a1df12f9d0710a79fb
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8789_2/rinput.R
|
de45378b9f4dabe617a9eac9330465bdc607f339
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8789_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8789_2_unrooted.txt")
|
957075dad6eb4a99dfe187ccfe2d94076bd179c4
|
6593fa55f4216c069a8aa53d2475838b9a3db1c7
|
/munge/analisis_mobike3.R
|
e2e9d944146ba3fb3580315de2d08a34c31751cc
|
[] |
no_license
|
ecastillomon/ecoVIZ
|
d6eef354fd80ddcdf266c507e8a61872b9c2cce3
|
6d30494dd4f878b53bf046aff29ec67ba736451a
|
refs/heads/master
| 2020-08-21T22:00:42.174077
| 2020-07-17T00:42:46
| 2020-07-17T00:42:46
| 216,253,783
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,420
|
r
|
analisis_mobike3.R
|
library(dbscan)
library(dplyr)
library(sf)
# mobike=openxlsx::read.xlsx("data/Viajes CDMX con Colonias_Mobike.xlsx")
dat=mobike=openxlsx::read.xlsx("data/Viajes CDMX con Colonias_Mobike.xlsx",detectDates = TRUE) %>%
mutate_at(c("start_time_local","end_time_local"), function(x)openxlsx::convertToDateTime(x) )
df_salidas=dat%>%
st_as_sf(coords = c("lat_ini", "lon_ini")) %>% # set coordinates
# select(-matches("coordenadas")) %>%
st_set_crs(4326)
df_llegadas=dat%>%
st_as_sf(coords = c("lat_fin", "lon_fin")) %>% # set coordinates
select(-matches("coordenadas")) %>%
st_set_crs(4326)
df_salidas %>%
ggplot() +scale_color_steps2()+
theme_light()+
geom_sf(alpha=.5)
##Polígono de control para no agarrar errores de captura
a_poly = st_polygon(list(rbind(c(-99.5, 20.5), c(-98.5, 20.5), c(-98.5, 18.5),c(-99.5, 18.5) ,c(-99.5, 20.5))))
a = st_sfc(a_poly) %>% st_set_crs(4326) %>% st_as_sf(x =data.frame(pais= "mexico") )
##Create sf object with intersections of any kind of road
df_vial=readRDS("data/vialidades_cdmx_inegi.RDS")
sel = st_intersection(df_vial, df_vial) %>% filter(NOMVIAL!=NOMVIAL.1)
df_cruces=sel %>% rename_at(vars(matches(".1$")), function(x){gsub(".1$","_calle_2",x)}) %>%
rename_at(vars(-matches("_calle_2"), -geometry), function(x){paste0(x,"_calle_1")}) %>%
tidyr::unite("NOM_INTER", c(NOMVIAL_calle_1,NOMVIAL_calle_2), remove=FALSE, sep="--") %>%
tidyr::unite("CVE_VIAL_INTER", c(CVE_VIAL_calle_1,CVE_VIAL_calle_2), remove=FALSE, sep="--") %>%
tidyr::unite("CVE_SEG_INTER", c(CVE_SEG_calle_1,CVE_SEG_calle_2), remove=FALSE, sep="--") %>%
tidyr::unite("TIPO_INTER", c(TIPOVIAL_calle_1,SENTIDO_calle_1,TIPOVIAL_calle_2,SENTIDO_calle_2), remove=FALSE, sep="--") %>%
select(matches("INTER"), CVE_ENT=CVE_ENT_calle_1) %>% filter(grepl("POINT", st_geometry_type(geometry))) %>%
st_cast("POINT")
# saveRDS(df_cruces,"data/df_cruces.RDS")
# reticulate::py_save_object(df_cruces,"data/df_cruces.pickle")
a_poly = st_polygon(list(rbind(c(-99.5, 20.5), c(-98.5, 20.5), c(-98.5, 18.5),c(-99.5, 18.5) ,c(-99.5, 20.5))))
a = st_sfc(a_poly) %>% st_set_crs(4326) %>% st_as_sf(x =data.frame(pais= "mexico") ) %>% st_transform(st_crs(df_vial))
df_salidas_cruces=df_salidas %>%
st_transform(st_crs(df_vial)) %>%
st_join(df_cruces, st_nearest_feature ) %>% st_join(a) %>% filter(!is.na(pais))
# saveRDS(df_salidas_cruces,"data/df_salidas_cruces.RDS")
# reticulate::py_save_object(df_salidas_cruces,"data/df_salidas_cruces.pickle")
df_llegadas_cruces=df_llegadas %>%
st_transform(st_crs(df_vial)) %>%
st_join(df_cruces, st_nearest_feature ) %>% st_join(a) %>% filter(!is.na(pais))
df_salidas_cruces %>% st_write("data/shapefiles/df_salidas_cruces.shp")
df_llegadas_cruces %>% st_write("data/shapefiles/df_llegadas_cruces.shp")
df_cruces %>% st_write("data/shapefiles/df_cruces.shp")
mat_simplificada=dat %>% distinct(bike_id, duration,distance) %>%
# head(1) %>%
left_join(df_salidas_cruces %>% select(bike_id,duration, distance,start_time_local,
end_time_local,duration,
12:16) %>% sf::st_drop_geometry(),
by=c("bike_id","duration","distance")) %>%
left_join(df_llegadas_cruces %>% select(bike_id,duration, distance,start_time_local,
end_time_local,duration,
12:16) %>% sf::st_drop_geometry(),
by=c("bike_id","duration","distance"), suffix=c("_salidas","_llegadas"))
reticulate::py_save_object(mat_simplificada,"data/df_mobike_cruces.pickle")
reticulate::py_save_object(mat_simplificada,"data/df_mobike_cruces.pickle")
df_llegadas_cruces %>%
ggplot(aes( )) +scale_color_steps2()+
theme_light()+
geom_sf(alpha=.5)
ecobici=read.csv("data/2019-06.csv",stringsAsFactors = F) %>%
bind_rows({read.csv("data/2019-05.csv",stringsAsFactors = F) }) %>%
mutate(hora_retiro=as.POSIXct(paste0(Fecha_Retiro," ",Hora_Retiro),
tryFormats = c("%d/%m/%Y %H:%M:%OS")),
hora_arribo=as.POSIXct(paste0(Fecha_Arribo," ",Hora_Arribo),
tryFormats = c("%d/%m/%Y %H:%M:%OS"))) %>%
filter(hora_retiro<=max(dat$start_time_local) & hora_retiro>=min(dat$start_time_local))
df_ecobici=ecobici %>%
mutate(dia_viaje=as.Date(hora_retiro)) %>%
group_by(estacion=Ciclo_Estacion_Retiro,dia_viaje) %>%
summarise(n_salidas=n()) %>% ungroup() %>%
bind_rows({
ecobici %>%
mutate(dia_viaje=as.Date(hora_retiro)) %>%
group_by(estacion=Ciclo_Estacion_Arribo,dia_viaje) %>%
summarise(n_llegadas=n()) %>% ungroup()
}) %>% group_by(estacion,dia_viaje) %>% summarise(n_salidas=sum(n_salidas,na.rm = T),
n_llegadas=sum(n_llegadas,na.rm = T)) %>% ungroup()
estaciones_ecobici=read.csv("data/estaciones.csv",stringsAsFactors = F)%>%
st_as_sf(coords = c("location.lon", "location.lat"), remove=F) %>%
st_set_crs(4326) %>%
st_transform(st_crs(df_vial))
df_eco=estaciones_ecobici %>% left_join(df_ecobici, by=c("id"="estacion")) %>%
mutate(tipo="ecobici") %>% mutate(viajes=n_salidas+n_llegadas) %>%
st_join(a) %>% filter(!is.na(pais))
df_eco %>%
st_write("data/shapefiles/df_ecobici.shp",overwrite=T, append=F)
df_eco %>% select(id,name,address, location.lat,
location.lon, dia_viaje, n_salidas,n_llegadas, tipo, viajes) %>%
mutate(dia_viaje) %>% filter(!is.na(dia_viaje)) %>%
st_drop_geometry() ->df_pickle
reticulate::py_save_object(df_pickle ,"data/df_ecobici.pickle")
df_salidas_cruces %>%
# filter()
mutate_at(vars(matches("time_local")), function(x){
as.POSIXct(x) %>%
lubridate::floor_date(start_time_local,unit ="15 minutes")
}) %>%
group_by(CVE_VIAL_INTER,start_time_local) %>% summarise(n_salidas=n()) %>% ungroup() %>%
# tidyr::pivot_longer(c(n_salidas,n_llegadas)) %>% st_sf() %>%
ggplot(aes(color=n_salidas, )) +scale_color_steps2()+
theme_light()+
geom_sf(alpha=.5)+
transition_time(start_time_local,range = c(as.POSIXct("2019-06-11 07:00:18"),
as.POSIXct( "2019-06-11 23:00:18"))) +
labs(title = 'Time: {frame_time}', x = '', y = '') +
ease_aes('linear')->g1
animate(g1, renderer = gifski_renderer())
anim_save("output/git_test.gif")
df_salidas_cruces %>%
# filter()
mutate_at(vars(matches("time_local")), function(x){
as.POSIXct(x) %>%
lubridate::floor_date(start_time_local,unit ="15 minutes")
}) %>%
# mutate(dummy=TRUE) %>%
ggplot(aes(color=pais )) +
# geom_point()+
# scale_color_steps2()+
theme_light()+
geom_sf(alpha=.5)+
transition_time(start_time_local,range = c(as.POSIXct("2019-06-11 07:00:18"),
as.POSIXct( "2019-06-11 23:00:18"))) +
labs(title = 'Time: {frame_time}', x = '', y = '') +
ease_aes('linear')->g2
animate(g2, renderer = gifski_renderer())
anim_save("output/daily_trips.gif")
##Plots varios
# mexico %>%
# # filter(!is.na(CVE_INTER)) %>%
# group_by(CVE_VIAL_INTER) %>% summarise(n_salidas=n()) %>% ungroup() %>%
# filter(n_salidas>2) %>%
# select(n_salidas) %>% plot()
# df_salidas %>%
# mutate(dia_viaje=as.Date(start_time_local)) %>%
# group_by(CVE_VIAL_INTER,dia_viaje) %>% summarise(n_salidas=n()) %>%
# ungroup() %>%
# filter(n_salidas>=2) %>%
# ggplot(aes(color=n_salidas)) +scale_color_steps2()+
# theme_light()+
# geom_sf(alpha=.5)+facet_wrap(dia_viaje~.)
dist=sf::st_distance(df_diarias)
dbmodel1=hdbscan(dist, minPts = 5)
saveRDS(dbmodel1,"data/dbmodel1_ecoVIZ.RDS")
# dbmodel1=readRDS("data/dbmodel1_ecoVIZ.RDS")
df_clust=df_diarias %>% mutate(cluster=dbmodel1$cluster)
df_clust %>% ggplot(aes(color=factor(cluster))) +
theme(legend.position = "none")+ geom_sf()
df_salidas_cruces %>%
left_join(
{df_clust%>% select(CVE_VIAL_INTER,cluster) %>% st_drop_geometry() }, by=c("CVE_VIAL_INTER")) %>%
# st_drop_geometry() %>%
reticulate::py_save_object("data/df_clusters.pickle")
df_salidas %>%
st_join(
{df_clust%>% select(-CVE_VIAL_INTER) %>%
st_set_crs( st_crs(df_vial) )}) %>%
st_drop_geometry() %>%
jsonlite::toJSON() %>%
jsonlite::write_json("data/df_clusters.json")
df_vial %>%
st_drop_geometry() %>%
jsonlite::toJSON() %>%
jsonlite::write_json("data/df_cruces.json")
|
927309085bc59183ffd9e73d4bc003056ae753d6
|
df8310cc55b81543af506f9b5b7b10dd2c3584b2
|
/R/data.R
|
0c1b95f5e22b8f0df1c89407b986b6c319288b32
|
[] |
no_license
|
fionarhuang/treeAGG
|
0ea35361a226dee561d60d5e0e0e9274e426c3d2
|
ca762c5db517bb6f50e816786cbfe853117fd095
|
refs/heads/master
| 2020-03-30T19:55:32.782546
| 2018-10-04T10:37:34
| 2018-10-04T10:37:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,004
|
r
|
data.R
|
#' A simulated phylogenetic tree with 10 tips and 9 internal nodes
#'
#' A random phylo object created using the function \code{\link[ape]{rtree}}
#' @importFrom ape rtree
#' @format A phylo object with 10 tips and 9 internal nodes:
#' \describe{
#' \item{Tip labels}{t1, t2, ..., t10.}
#' \item{Node labels}{Node_11, Node_12, ..., Node_19}
#' }
#'
#'
"tinyTree"
#' A simulated phylogenetic tree with 50 tips and 49 internal nodes
#'
#' A random phylo object created using the function \code{\link[ape]{rtree}}
#' @importFrom ape rtree
#' @format A phylo object with 50 tips and 49 internal nodes:
#' \describe{
#' \item{Tip labels}{t1, t2, ..., t50.}
#' \item{Node labels}{NULL}
#' }
"exTree"
#' A simulated phylogenetic tree with 800 tips and 799 internal nodes
#'
#' A random phylo object created using the function \code{\link[ape]{rtree}}
#' @importFrom ape rtree
#' @format A phylo object with 800 tips and 799 internal nodes:
#' \describe{
#' \item{Tip labels}{t1, t2, ..., t800.}
#' \item{Node labels}{NULL}
#' }
"bigTree"
#' AML-sim data
#'
#' A semi-simulated count table generated by computationally "spiking in" small
#' percentages of AML blast cells into healthy BMMCs (created by Lukas Weber)
#'
#' @format A data frame of cell counts collected from healthy and
#' cytogenetically normal (CN) samples:
#' \describe{
#' \item{cluster}{The cluster labels}
#' \item{p_vals}{The nominal p values}
#' \item{p_adj}{The adjusted p values}
#' \item{n_cells}{The number of cells in the clusters}
#' \item{n_spikein}{The number of spike-in cells}
#' \item{prop_spikein}{The proportion of spike-in cells}
#' \item{true}{Whether the cluster is truly differential abundant}
#' \item{healthy_H1}{The counts from the first healthy sample}
#' \item{healthy_H2}{The counts from the second healthy sample}
#' \item{healthy_H3}{The counts from the third healthy sample}
#' \item{healthy_H4}{The counts from the forth healthy sample}
#' \item{healthy_H5}{The counts from the fifth healthy sample}
#' \item{CN_H1}{The counts from the first CN sample}
#' \item{CN_H2}{The counts from the second CN sample}
#' \item{CN_H3}{The counts from the third CN sample}
#' \item{CN_H4}{The counts from the forth CN sample}
#' \item{CN_H5}{The counts from the fifth CN sample}
#' }
"res_table"
#' Count table of the AML-sim data
#'
#' A semi-simulated count table generated by computationally "spiking in" small
#' percentages of AML blast cells into healthy BMMCs (created by Lukas Weber)
#'
#' @format A data frame of cell counts collected from healthy and
#' cytogenetically normal (CN) samples:
#' \describe{
#' \item{healthy_H1}{The counts from the first healthy sample}
#' \item{healthy_H2}{The counts from the second healthy sample}
#' \item{healthy_H3}{The counts from the third healthy sample}
#' \item{healthy_H4}{The counts from the forth healthy sample}
#' \item{healthy_H5}{The counts from the fifth healthy sample}
#' \item{CN_H1}{The counts from the first CN sample}
#' \item{CN_H2}{The counts from the second CN sample}
#' \item{CN_H3}{The counts from the third CN sample}
#' \item{CN_H4}{The counts from the forth CN sample}
#' \item{CN_H5}{The counts from the fifth CN sample}
#' }
"cytofCount"
#' The information about clusters of the AML-sim data
#'
#' A data frame provides information about clusters. Each cluster corresponds to
#' a row in the \code{\link{cytofCount}}.
#'
#' @format A data frame of cluster information:
#' \describe{
#' \item{cluster}{The name of cluster}
#' \item{n_cells}{The number of cells in the cluster}
#' \item{n_spikein}{The number of spike-in cells in the cluster}
#' \item{prop_spikein}{The proportion of spike-in cells in the cluster}
#' \item{truth}{whether the cluster is differential abundant between the
#' healthy sample and the CN sample}
#' }
"cytofCluster"
#' The information about samples of the AML-sim data
#'
#' A data frame provides information about samples. Each sample corresponds to
#' a column in the \code{\link{cytofCount}}.
#'
#' @format 5 samples are collected from 5 healthy subjects. Each sample is
#' equally split into two subsamples, one of which spike in CN cells to
#' simulate samples from patients.
#' \describe{
#' \item{group}{The group that a sample collected from: healthy or CN.}
#' \item{patient}{The patient that the sample collected from. }
#' }
"cytofSample"
#' The true signal in the AML-sim data
#'
#' This data provides the differential abundance information of clusters in the
#' AML-sim data
#'
#' @format A data frame of two columns:
#' \describe{
#' \item{cluster}{The cluster label}
#' \item{true}{Whether the cluster is truly differential abundant}
#' }
"cytofTruth"
#' The hierarchical tree of the cells in the AML-sim data
#'
#' A hclust object which records the hierarchical structure of the cells in the
#' AML-sim data.
#'
#' @format A hclust object with 900 leaves. Each leaf is a cell cluster.
"cytofTree"
|
fb2a0dc753f2bc79453c59bdb0dd809f0dce9d39
|
bed5d80eca97175a1702a2a924e407f880cbe502
|
/02_functions.R
|
fde663c4c82e7b25540c0c84c87ccc97ee08d1f2
|
[] |
no_license
|
Asiwome/phosp_report
|
3a020d8dae085789da06b26ed9757db6f72bed7a
|
d8489cd78d631871d92ba08dc2a9b96d13dda3f9
|
refs/heads/main
| 2023-06-12T00:54:06.280792
| 2021-07-05T07:40:05
| 2021-07-05T07:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,022
|
r
|
02_functions.R
|
# PHOSP-COVID analysis: FUNCTIONS
# Any bespoke function is placed here.
# Centre for Medical Informatics, Usher Institute, University of Edinburgh 2020
# Functions require library(tidyverse), requires() nor :: not currently written in.
# 1. ggplot templates / extraction functions
# 2. Table defaults
# 3. Prognostic scoring
# ggplot templates -------------------------------------------------------------------------
# Lisa's lovely Lancet-style ggplot template (Lisa Norman) ---------------------------------------------
ggplot_lancet <- function(...)
ggplot2::ggplot(...) +
scale_fill_brewer(palette = "Blues") +
scale_colour_brewer(palette = "Blues") +
scale_y_continuous(expand = c(0, 0)) +
theme_classic() +
theme(
axis.title.x = element_text(margin = margin(10, 0, 0, 0, "pt")),
axis.title.y = element_text(margin = margin(0, 10, 0, 0, "pt")),
axis.text = element_text(size = 10),
axis.ticks.length = unit(5, "pt"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
strip.text.x = element_text(hjust = 0,
face = "bold",
size = 10),
strip.text.y = element_text(hjust = 0,
size = 10),
text = element_text(
family = "Merriweather",
size = 10,
face = "plain"
),
legend.position = c(1, 1),
legend.justification = c(1, 1),
legend.direction = "horizontal",
legend.key.size = unit(10, "pt")
) +
guides(fill = guide_legend(title.position = "top"))
# Table defaults ---------------------------------------------------------------------------
# This makes table resize or continue over multiple pages in all output types
# PDF powered by kableExtra, Word by flextable
mytable = function(x, caption = "", row.names = FALSE, longtable = TRUE,
latex_options = c("hold_position"), font_size = 7.0, ...){
# if not latex or html then else is Word
if (is_latex_output()) {
knitr::kable(x, row.names = row.names, align = c("l", "l", "r", "r", "r", "r", "r", "r", "r", "r", "r"),
booktabs = TRUE, caption = caption, #longtable = longtable,
linesep = "", ...) %>%
kableExtra::kable_styling(font_size = font_size,
latex_options = latex_options)
} else if(is_html_output()) {
knitr::kable(x, row.names = row.names, align = c("l", "l", "r", "r", "r", "r", "r", "r", "r", "r", "r"),
booktabs = TRUE, caption = caption, longtable = longtable,
linesep = "", ...) %>%
kableExtra::kable_styling(latex_options = c("scale_down", "hold_position"))
} else {
flextable::flextable(x) %>%
flextable::autofit() %>%
flextable::width(j = 1, width = 1.5) %>%
flextable::height(i = 1, height = 0.5, part = "header")
}
}
|
d441cf05506701982e6d71fac8242f19ba6b577a
|
434cd962c588c9e6c66ad986d25ce2871ed5f8b3
|
/R/EDA/ex01_import_and_tidy.R
|
7c4db446552b8ced9ea3912b8c83b52ae3470e52
|
[] |
no_license
|
yuexiaqianying/PPSN_TSP_DL
|
bfd76a760fcdcd7f728326bcf65c31140b61fd2c
|
239e1c50b1aa4be001b353119f971483ab8d2b8c
|
refs/heads/master
| 2023-03-17T16:04:44.154652
| 2020-06-15T12:17:02
| 2020-06-15T12:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 944
|
r
|
ex01_import_and_tidy.R
|
library(tidyverse)
library(reshape2)
library(re)
## IMPORT AND TIDYING
## ===
load("data/raw/evolved-500/all-simple.Rda")
simple = data
load("data/raw/evolved-500/all-sophisticated.Rda")
soph = data
all = rbind(simple, soph)
# extract informations
meta1 = re::strParse(dirname(as.character(all$path)), split = "---", which = 1:3, types = "ccc", names = c("A1", "A2", "mutation"), append = FALSE)
meta2 = re::strParse(basename(as.character(all$path)), ext = ".tsp", split = "-", which = 1:2, types = "ii", names = c("n", "repl"), append = FALSE)
meta1$type = sprintf("easy for %s", meta1$A1)
all = cbind(meta1, meta2, all)
all = select(all, -A1, -A2, -target)
all = select(all, colnames(all)[!grepl("costs$", colnames(all))])
all$ratio = ifelse(all$type == "easy for eax", all$EAX.PQR10 / all$LKH.PQR10, all$LKH.PQR10 / all$EAX.PQR10)
write.table(all, file = "data/all_features_wide.csv", quote = TRUE, row.names = FALSE, col.names = TRUE)
|
9935d754467cc5ab420f4da8fab449f003ab5c42
|
cafa52c05f020af31985cfd1b8e2c676ea6e3baa
|
/lib/Visualization/insertSize.r
|
850cfbdfaac2cfc0f8b0daedafed3b504875ac83
|
[
"Apache-2.0"
] |
permissive
|
shengqh/ngsperl
|
cd83cb158392bd809de5cbbeacbcfec2c6592cf6
|
9e418f5c4acff6de6f1f5e0f6eac7ead71661dc1
|
refs/heads/master
| 2023-07-10T22:51:46.530101
| 2023-06-30T14:53:50
| 2023-06-30T14:53:50
| 13,927,559
| 10
| 9
|
Apache-2.0
| 2018-09-07T15:52:27
| 2013-10-28T14:07:29
|
Perl
|
UTF-8
|
R
| false
| false
| 733
|
r
|
insertSize.r
|
options(bitmapType='cairo')
library(Rsamtools)
library(ggplot2)
filelist<-read.table(parSampleFile1, sep="\t", header=F, stringsAsFactor=F)
fileDensities<-apply(filelist, 1, function(x){
res<-scanBam(x[1], index=x[1], param=ScanBamParam(what=c("isize")))
aa<-res[[1]]
bb<-as.numeric(aa[[1]])
cc<-abs(bb)
cc<-cc[!is.na(cc) & cc < 1000]
dens<-density(cc, from=0, to=1000, n=250)
dd<-data.frame(File=x[2], InsertSize=dens$x, Density=dens$y )
return(dd)
})
all<-do.call(rbind, fileDensities)
png(file=outFile, width=2000, height=2000, res=300)
g<-ggplot(all, aes(InsertSize, Density, group=File, color=File)) + geom_line() + theme_bw() + scale_x_continuous(breaks=seq(0,1000,100))
print(g)
dev.off()
|
71aff7f7c6bc7a0edef09bbeea63b9380ad52972
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/agricolae/examples/waller.test.Rd.R
|
f0e42e31d8f6a76f33a6153f2cf09dfad29f015e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
waller.test.Rd.R
|
library(agricolae)
### Name: waller.test
### Title: Multiple comparisons, Waller-Duncan
### Aliases: waller.test
### Keywords: htest
### ** Examples
library(agricolae)
data(sweetpotato)
model<-aov(yield~virus, data=sweetpotato)
out <- waller.test(model,"virus", group=TRUE)
#startgraph
par(mfrow=c(2,2))
# variation: SE is error standard
# variation: range is Max - Min
bar.err(out$means,variation="SD",horiz=TRUE,xlim=c(0,45),bar=FALSE,
col=colors()[25],space=2, main="Standard deviation",las=1)
bar.err(out$means,variation="SE",horiz=FALSE,ylim=c(0,45),bar=FALSE,
col=colors()[15],space=2,main="SE",las=1)
bar.err(out$means,variation="range",ylim=c(0,45),bar=FALSE,col="green",
space=3,main="Range = Max - Min",las=1)
bar.group(out$groups,horiz=FALSE,ylim=c(0,45),density=8,col="red",
main="Groups",las=1)
#endgraph
# Old version HSD.test()
df<-df.residual(model)
MSerror<-deviance(model)/df
Fc<-anova(model)["virus",4]
out <- with(sweetpotato,waller.test(yield, virus, df, MSerror, Fc, group=TRUE))
print(out)
|
a11c05eaafcf08c6208211b3c6af6a00d71f05d1
|
138996e20519b168c3630950f802ac6bb83c0a77
|
/man/reach_loads.Rd
|
82054b864c9cf21f0b7a75e121e593f12f6de511
|
[] |
no_license
|
rodlammers/REMvisualizer
|
09f01d5de4e87d4b5bc26a7f5458885d1256bbc4
|
fc5c6ca2fafe3b2ffa5916e64583ce97c4b1d4d4
|
refs/heads/master
| 2021-06-04T01:54:10.863690
| 2020-01-22T20:41:10
| 2020-01-22T20:41:10
| 142,908,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,256
|
rd
|
reach_loads.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Visualization_Functions.R
\name{reach_loads}
\alias{reach_loads}
\title{Plots mass sediment loading rates by reach, with uncertainty}
\usage{
reach_loads(path = "", custom_sgn = NULL, MC_path = NULL, n_MC = 0,
units = "ton", prob = c(0.05, 0.95), print = FALSE, type = "sed",
use_files = TRUE)
}
\arguments{
\item{path}{Path to folder with model outputs}
\item{custom_sgn}{Specifies the direction each reach should be plotted (`-1`
is left, `1` is right)}
\item{MC_path}{Path to "MC Outputs" folder (only if different than `path`)}
\item{n_MC}{Number of Monte Carlo simulations}
\item{units}{Character specifying units to be used in plot ("kg", "ton", or
"1000 ton")}
\item{prob}{Numeric vector of percentiles of Monte Carlo results to plot in
addition to the median (defaults to 0.05 and 0.95)}
\item{print}{Should the plot be printed to a file (defaults to `FALSE`)}
\item{type}{Whether sediment loading (`type = "sed"`, default) or pollutant
loading (`type = "p"`) should be plotted}
\item{use_files}{Logical. Should results files that have been loaded be used
(defaults to `TRUE`)}
}
\description{
Plots mass sediment loading rates by reach, with uncertainty
}
|
fb980e127e2fc6aa357cf64e90485dd8156e5c8a
|
487a6800844292ac12e6dad629ba93ad0ec78495
|
/man/USSeerBG.Rd
|
192802f7084bc22732e25b6f99e234981df0e584
|
[] |
no_license
|
cran/micromapST
|
18fd7f344d741c9c013a0272cf6b089a728976e8
|
71552b1333593ddc04ad04a344b790e58e7e6d58
|
refs/heads/master
| 2023-08-21T23:41:51.451836
| 2023-08-10T11:50:02
| 2023-08-10T13:36:11
| 17,697,489
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,209
|
rd
|
USSeerBG.Rd
|
\name{USSeerBG}
\alias{USSeerBG}
\docType{data}
\title{USSeerBG border group datasets to support use with U.S. 20 Seer areas/registries}
\description{
The \var{micromapST} function has the ability to generate linked micromaps for
any geographical area. To specify the geographical area, the \option{bordGrp}
call argument is used to specify the border group dataset for the geographical area.
The \var{USSeerBG} border group dataset supports creating linked micromaps for the
20 Seer registries in the U. S. When the \option{bordGrp} call argument is set
to \var{USSeerBG}, the appropriate name table (county names and abbreviations) and
the 20 sub-areas (Seer registries) boundary data is loaded in \var{micromapST}.
The user's data is then linked to the boundary data via the Seer registry's name,
abbreviated, alias match or ID based on the table below.
The 20 U. S. Seer registries are the accepted registries as of January 2010
funded by NCI.
}
\details{
The \var{USSeerBG} border group dataset contains the following data.frames:
\describe{
\item{areaParms}{ - contains specific parameters for the border group
}
\item{areaNamesAbbrsIDs}{ - containing the names, abbreviations, numerical
identifier and alias matching string for each of the 20 Seer registries.
}
\item{areaVisBorders}{ - the boundary point lists for each area.
}
\item{L2VisBorders}{ - the boundaries for an intermediate level. For Seer
registry border group, L2VisBorders contains the boundaries for the 51
states and DC in the U. S to help provide a geographical reference of
the registries to the states.
}
\item{RegVisBorders}{ - the boundaries for the 4 U. S. Census regions in
the U. S in support of the region feature.
}
\item{L3VisBorders}{ - the boundary of the U. S.
}
}
The Seer Registries border group contains 20 Seer Registry sub-areas.
Each registry has a row in the ]var{areaNamesAbbrsIDs} data.frame and
a set of polygons in the areaVisBorders data.frame datasets.
Regions are defined in this border group as the 4 census regions
in the U. S. The regions feature is enable. The four census regions
are: NorthEast, South, MidWest, and West. The states and Seer registries in each
region are:
\tabular{lll}{
state\tab Seer Registries\tab region\cr
Alabama\tab <none>\tab South\cr
Alaska\tab Alaska Natives\tab West\cr
Arizona\tab Arizona Natives\tab West\cr
Arkansas\tab <none>\tab South\cr
California\tab California-LA, \tab West\cr
\tab California-Other, \tab \cr
\tab California-SF, \tab \cr
\tab California-SJ \tab \cr
Colorado\tab <none>\tab West\cr
Connecticut\tab Connecticut\tab NorthEast\cr
Delaware\tab <none>\tab South\cr
District of Columbia\tab <none>\tab South\cr
Florida\tab <none>\tab South\cr
Georgia\tab Georgia-Atlanta, \tab South\cr
\tab Georgia-Other, \tab \cr
\tab Georgia-Rural\tab \cr
Hawaii\tab Hawaii\tab West\cr
Idaho\tab <none>\tab West\cr
Illinois\tab <none>\tab MidWest\cr
Indiana\tab <none>\tab MidWest\cr
Iowa\tab Iowa\tab MidWest\cr
Kansas\tab <none>\tab MidWest\cr
Kentucky\tab Kentucky\tab South\cr
Louisiana\tab Louisiana\tab South\cr
Maine\tab <none>\tab NorthEast\cr
Maryland\tab <none>\tab South\cr
Massachusetts\tab <none>\tab NorthEast\cr
Michigan\tab Michigan-Detroit\tab MidWest\cr
Minnesota\tab <none>\tab MidWest\cr
Mississippi\tab <none>\tab South\cr
Missouri\tab <none>\tab MidWest\cr
Montana\tab <none>\tab West\cr
Nebraska\tab <none>\tab MidWest\cr
Nevada\tab <none>\tab West\cr
New Hampshire\tab <none>\tab NorthEast\cr
New Jersey\tab New Jersey\tab NorthEast\cr
New Mexico\tab New Mexico\tab West\cr
New York\tab <none>\tab NorthEast\cr
North Carolina\tab <none>\tab South\cr
North Dakota\tab <none>\tab MidWest\cr
Ohio\tab <none>\tab MidWest\cr
Oklahoma\tab Oklahoma-Cherokee\tab South\cr
Oregon\tab <none>\tab West\cr
Pennsylvania\tab <none>\tab NorthEast\cr
Rhode Island\tab <none>\tab NorthEast\cr
South Carolina\tab <none>\tab South\cr
South Dakota\tab <none>\tab MidWest\cr
Tennessee\tab <none>\tab South\cr
Texas\tab <none>\tab South\cr
Utah\tab Utah\tab West\cr
Vermont\tab <none>\tab NorthEast\cr
Virginia\tab <none>\tab South\cr
Washington\tab Washington-Seattle\tab South\cr
West Virginia\tab <none>\tab South\cr
Wisconsin\tab <none>\tab MidWest\cr
Wyoming\tab <none>\tab West\cr
}
The L3VisBorders dataset contains the outline of the United States.
The details on each of these data.frame structures can be found in
the "bordGrp" section of this document. The \var{areaNamesAbbrsIDs}
data.frame provides the linkages to the boundary data for each
sub-area (registry) using the fullname, abbreviation, and numerical
identifier for each country to the \var{<statsDFrame>} data based
on the setting of the \option{rowNames} call argument.
A column or the data.frame row.names must match one of the types
of names in the \var{areaNamesAbbrsIDs} data.frame name table.
If the data row does not match a value in the name table, an warning
is issued and the data is ignored. If no data is present for a sub-area (registry)
in the name table, the sub-area (registry) is mapped but not colored.
The following are a list of the names, abbreviations, alias and IDs for each country in the
\var{USSeerBG} border group.
\tabular{llllll}{
Name\tab ab\tab alias string\tab id\tab counties\tab region\cr
Alaska Natives\tab AK-NAT\tab ALASKA NATIVES\tab 18\tab all\tab West\cr
Arizona Natives\tab AZ-NAT\tab ARIZONA NATIVES\tab 20\tab all\tab West\cr
California-LA\tab CA-LA\tab LOS ANGELES\tab 4\tab Los Angeles\tab West\cr
California-SF\tab CA-SF\tab SAN FRANCISCO\tab 2\tab Alameda, \tab West\cr
\tab \tab \tab \tab Contra Costa,\tab \cr
\tab \tab \tab \tab Marin, \tab \cr
\tab \tab \tab \tab San Francisco,\tab \cr
\tab \tab \tab \tab San Mateo\tab \cr
California-SJ\tab CA-SJ\tab SAN JOSE\tab 3\tab Montersey \tab West\cr
\tab \tab \tab \tab San Benito,\tab \cr
\tab \tab \tab \tab Santa Clara,\tab \cr
\tab \tab \tab \tab Santa Cruz\tab \cr
California-Other\tab CA-OTH\tab CALIFORNIA EXCLUDING\tab 5\tab all other counties\tab West\cr
Connecticut\tab CT\tab CONNECTICUT\tab 1\tab all\tab NorthEast\cr
Georgia-Atlanta\tab GA-ATL\tab ATLANTA\tab 6\tab Clayton, Cobb, DeKalb,\tab South\cr
\tab \tab \tab \tab Fulton, Gwinnett\tab \cr
Georgia-Rural\tab GA-RUR\tab RURAL GEORGIA\tab 8\tab Glascock, Greene, Hancock,\tab South\cr
\tab \tab \tab \tab Jasper, Jefferson, Morgan,\tab \cr
\tab \tab \tab \tab Putnam, Taliaferro, Warren,\tab \cr
\tab \tab \tab \tab Washington\tab \cr
Georgia-Other\tab GA-OTH\tab GREATER GEORGIA\tab 7\tab all other counties\tab South\cr
Hawaii\tab HI\tab HAWAII\tab 9\tab all\tab West\cr
Iowa\tab IA\tab IOWA\tab 10\tab all\tab MidWest\cr
Kentucky\tab KY\tab KENTUCKY\tab 14\tab all\tab South\cr
Michigan-Detroit\tab MI-DET\tab DETROIT\tab 15\tab Macomb, \tab MidWest\cr
\tab \tab \tab \tab Oakland, \tab \cr
\tab \tab \tab \tab Wayne\tab \cr
New Jersey\tab NJ\tab NEW JERSEY\tab 11\tab all\tab NorthEast\cr
New Mexico\tab NM\tab NEW MEXICO\tab 12\tab all\tab West\cr
Oklahoma-Cherokee\tab OK-CHE\tab OKLAHOMA\tab 19\tab Adair, \tab South\cr
\tab \tab \tab \tab Cherokee, \tab \cr
\tab \tab \tab \tab Craig, \tab \cr
\tab \tab \tab \tab Delaware, \tab \cr
\tab \tab \tab \tab Mayes, \tab \cr
\tab \tab \tab \tab McIntosh, \tab \cr
\tab \tab \tab \tab Muskogee, \tab \cr
\tab \tab \tab \tab Nowata,\tab \cr
\tab \tab \tab \tab Ottawa, \tab \cr
\tab \tab \tab \tab Rogers, \tab \cr
\tab \tab \tab \tab Seqouyah, \tab \cr
\tab \tab \tab \tab Tulsa, \tab \cr
\tab \tab \tab \tab Wagnorer, \tab \cr
\tab \tab \tab \tab Washington\tab \cr
Utah\tab UT\tab UTAH\tab 16\tab all\tab West\cr
Washington-Seattle\tab WA-SEA\tab SEATTLE\tab 17\tab Clallam, \tab South\cr
\tab \tab \tab \tab Grays Harbor,\tab \cr
\tab \tab \tab \tab Island,\tab \cr
\tab \tab \tab \tab Jefferson,\tab \cr
\tab \tab \tab \tab King, \tab \cr
\tab \tab \tab \tab Kitsap, \tab \cr
\tab \tab \tab \tab Mason, \tab \cr
\tab \tab \tab \tab Pierce, \tab \cr
\tab \tab \tab \tab San Juan,\tab \cr
\tab \tab \tab \tab Skagit, \tab \cr
\tab \tab \tab \tab Snohomish, \tab \cr
\tab \tab \tab \tab Thurston,\tab \cr
\tab \tab \tab \tab Whatcom \tab \cr
}
The \option{rowNames} = \var{alias} and the \option{regions} = \var{TRUE}
features are enabled in the \var{USSeerBG} border group.
The \var{alias} option is designed to allow the package to match the
registry labels created by the Seer Stat website when exporting
Seer data for analysis. The alias match is a "contains" match, so
the registry field in the user data must "contain" the "alias" values
listed in the above table. To help generalize the match, the user's
registry value is stripped of any punctuation, control characters and
multiple spaces (blanks, tabs, cr, lf) are reduced to a single blank
and the string is converted to all upper case. Then the wild card match
is performed.
The \option{dataRegionOnly} call parameter (when set to \var{TRUE})
instructs the package to only map the
regions with Seer registers with data. The regions used are the four census
regions: NorthEast, South, MidWest and West. The RegVisBorders data.frame
contains the outline of each of these regions. For example: if Seer
registry data is provided for the only the New Mexico, Utah and California
Registries in the West region, then only the states and regional boundary
for the West region are drawn.
The \var{USSeerBG} border group does not contain or support an alternate set
of abbreviations. If \option{rowNames} is set to \var{alt_ab}, an warning
is generated and the standard Seer registry abbreviations are used.
The following steps should be used to export data for \var{micromapST}'s use from the SEER*Stat Website:
\enumerate{
\item Log on to the SEER^Stat website.
\item Create the matrix of results you want in SEER*Stat.
\item Click on Matrix, Export, Results as Text File
(if you created multiple matrices of results, make sure that the
one you want to export is highlighted)
\item In the Matrix Export Options window, click on:
\enumerate{
\item Output variables as Labels without quotes
\item Remove all thousands separators
\item Output variable names before data
\item Preserve matrix columns & rename fields
\item Leave defaults clicked for Line delimiter, Missing Character,
and Field delimiter
}
\item Change names and locations of text and dictionary files from defaults
to the appropriate name and directory location.
}
To read the resulting text file into R use the \code{read.delim} function with \option{header} = \var{TRUE}.
Follow the \code{read.delim} call with a \code{str} function to verify the data was read correctly.
\preformatted{
dataT <- read.delim("c:\datadir\seerstat.txt",header=FALSE)
str(dataT)
}
}
\source{NCI}
\references{United States National Cancer Institute Seer Website at www.seer.cancer.gov;
Seer Software at seer.cancer.gov/seerstat.; United States Census Bureau,
Geography Division. "Census Regions and Divisions of the United States" (PDF).
Retrieved 2013-01-10.
}
\keyword{datasets}
|
e7eaabb31b52eb08ee988bc8489ef08665d2cc4e
|
12b0aa3c8c9b600253a3850f9b0a0466402f30e1
|
/R/plotPrior.R
|
c10673d210c9b1dcd64da2d849ef445bc9e9b165
|
[] |
no_license
|
bbanbury/phyloextra
|
561ef779c459fc08bcb087069e6977aafd9a6440
|
ebebead690e4317be84d9d7165a2d801951c7c55
|
refs/heads/master
| 2021-01-10T20:50:51.583643
| 2014-11-25T19:36:26
| 2014-11-25T19:36:26
| 5,588,766
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,090
|
r
|
plotPrior.R
|
#' Plot Prior Distribution
#'
#' This function will plot the distribution of priors
#' @param priorFn Prior shape; either "fixed", "uniform", "normal", "lognormal", "gamma", or "exponential"
#' @param priorVariables Variables needed to describe the shape of the distribution. uniform distributions=c(min, max); normal distributions=c(mean, sd); lognormal distributions=c(mean, sd); gamma distributions=c(shape, scale); exponential distributions=c(rate)
#' @param plotQuants If TRUE, plots line segments at the quantiles
#' @param plotLegend If TRUE, plots legend box with quantile values
#' @export
#' @return Returns a distribution plot
#' @examples
#' plotPrior(priorFn="exponential", priorVariables=c(10))
#' plotPrior("normal", c(1,2))
#' plotPrior("gamma", c(2, .2), plotQuants=FALSE, plotLegend=FALSE)
plotPrior <- function(priorFn, priorVariables, plotQuants=TRUE, plotLegend=TRUE){
priorFn <- match.arg(arg=priorFn,choices=c("fixed", "uniform", "normal", "lognormal", "gamma", "exponential"),several.ok=FALSE)
x <- NA
quant <- c(0.01, 0.05, 0.25, .50, 0.75, 0.95, 0.99)
quant.value <- NULL
mm <- NULL
if(priorFn == "fixed")
return("can't plot fixed prior")
else if (priorFn == "uniform"){
min <- priorVariables[1]
max <- priorVariables[2]
x <- runif(1000, min, max)
curve(dunif(x), xlim=c(min-(.3*(max-min)), max+(.3*(max-min))), ylim=c(0, 1.5), type="n", xlab="", ylab=paste("Density (parameters: min=", min, "; max =", max, ")", sep=""))
rect(min, 0, max, 1, col=rgb(0, 0, 0, .2))
for (i in 1:length(quant)){
quant.value[i] <- qunif(quant[i], min, max)
if (plotQuants){
segments(quant.value[i], 0, quant.value[i], 1)
segments(qunif(.5, min, max), 0, qunif(.5, min, max), 1, lwd=2)
}
}
}
else if(priorFn == "normal"){
mean <- priorVariables[1]
stdev <- priorVariables[2]
x <- rnorm(1000, mean, stdev)
poly <- curve(dnorm(x, mean, stdev), from=min(x), to=max(x), xlab="", ylab=paste("Density (parameters: mean=", mean, "; stdev =",stdev, ")", sep=""))
poly$x <- c(min(poly$x),poly$x, max(poly$x))
poly$y <- c(min(poly$y), poly$y, min(poly$y))
polygon(poly, col=rgb(0, 0, 0, 0.3))
for(i in 1:length(quant)) {
quant.value[i] <- qnorm(quant[i], mean, stdev)
if(plotQuants){
mm[i] <- dnorm(quant.value[i], mean, stdev)
segments(quant.value[i], min(poly$y), quant.value[i], mm[i])
segments(qnorm(.5, mean, stdev), min(poly$y), qnorm(.5, mean, stdev), dnorm(qnorm(.5, mean, stdev), mean, stdev), lwd=2)
}
}
}
else if(priorFn == "lognormal"){ #messed up quant lines and polygon
mean <- priorVariables[1]
stdev <- priorVariables[2]
x <- rlnorm(1000, mean, stdev)
poly <- curve(dlnorm(x, mean, stdev), from=0, to=qlnorm(0.99, mean, stdev), xlab="", ylab=paste("Density (parameters: mean=", mean, "; stdev =",stdev, ")", sep=""))
poly$x <- c(poly$x, max(poly$x))
poly$y<-c(poly$y, min(poly$y))
polygon(poly, col=rgb(0, 0, 0, 0.3))
for (i in 1:length(quant)){
quant.value[i] <- qlnorm(quant[i], mean, stdev)
if(plotQuants){
mm[i] <- dlnorm(quant.value[i], mean, stdev)
segments(quant.value[i], min(poly$y), quant.value[i], mm[i])
segments(qlnorm(.5, mean, stdev), min(poly$y), qlnorm(.5, mean, stdev), dlnorm(qlnorm(.5, mean, stdev), mean, stdev), lwd=2)
}
}
}
else if(priorFn == "gamma"){
shape <- priorVariables[1]
scale <- priorVariables[2]
x <- rgamma(1000, shape, scale)
poly <- curve(dgamma(x, shape, scale), from=0, to=qgamma(0.99, shape, scale), xlab="", ylab=paste("Density (parameters: shape=", shape, "; scale =",scale, ")", sep=""))
poly$x <- c(0, poly$x, max(poly$x), 0)
poly$y <- c(0, poly$y, min(poly$y), 0)
polygon(poly, col=rgb(0, 0, 0, 0.3))
for (i in 1:length(quant)) {
quant.value[i] <- qgamma(quant[i], shape, scale)
if (plotQuants){
mm[i] <- dgamma(quant.value[i], shape, scale)
segments(quant.value[i], min(poly$y), quant.value[i], mm[i])
segments(qgamma(.5, shape, scale), min(poly$y), qgamma(.5, shape, scale), dgamma(qgamma(.5, shape, scale), shape, scale), lwd=2)
}
}
}
else if (priorFn == "exponential"){
rate <- priorVariables[1]
x <- rexp(1000, rate)
poly <- curve(dexp(x, rate), from=0, to=qexp(0.99, rate), xlab="", ylab=paste("Density (parameters: rate=", rate, ")", sep=""))
poly$x <- c(poly$x, 0)
poly$y <- c(poly$y, min(poly$y))
polygon(poly, col=rgb(0, 0, 0, 0.3))
for(i in 1:length(quant)) {
quant.value[i] <- qexp(quant[i], rate)
if(plotQuants){
mm[i] <- dexp(quant.value[i], rate)
segments(quant.value[i], min(poly$y), quant.value[i], mm[i])
segments(qexp(.5, rate), min(poly$y), qexp(.5, rate), dexp(qexp(.5, rate), rate), lwd=2)
}
}
}
results <- data.frame(cbind(quant, quant.value))
if(plotLegend)
legend("topright", legend=paste(c(quant, signif(quant.value, digits=3))), title="Quantiles", ncol=2, bty="n")
return(results)
}
|
b9aa0c45a314e52fe076caf6f5b4885c2a86011f
|
e3a65156525b264ae3a86a255804cd5e34f545fe
|
/Sensitivity Analysis and Omitted Variable Bias Diagnosis/Sensitivity analysis.r
|
f7a9c21a0e62fb9bcc52a0bb1619ceff95eb42b7
|
[] |
no_license
|
jasonqiangguo/R-Causal-Inference
|
7c147871e7e5e6dcab36eee994cc07d496207097
|
143b4d7a963ef5e8b41bfc5a68c36633b26c7473
|
refs/heads/master
| 2020-04-29T15:30:32.445649
| 2015-10-20T23:13:25
| 2015-10-20T23:13:25
| 17,455,131
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,146
|
r
|
Sensitivity analysis.r
|
#########################################################
## Weighted survey regression and sensitivity analysis ##
#########################################################
data<- read.dta("mydata.dta")
require(survey)
## create a dataset with abducted == 0 and found == 1
datanonabd <- data[which(data$abd==0 & data$found == 1),]
## sensitivity analysis of the relation between abd and vote05
data2$abd_1 <- scale(data2$abd)
data2$vote05_1 <- scale(data2$vote05)
alpha <- rnorm(nrow(data2), 0, 1) ## Generate alphas from a normal distribution
delta <- rnorm(nrow(data2), 0, 1) ## Generate deltas from a normal distribution
X <- "age + age2 + age3 + fthr_ed + mthr_ed + hh_size96 + hh_wealth96"
out<- lm(paste0("vote05_1~abd_1+", X), data2) ## regression without unobserved confounder U
## use the properties of logit transformation to generate the unobserved confounder U
genConfound<- function (x, y) {
e <- rnorm(nrow(data2), 0, 1)
w <- x * data2$abd_1 + y * data2$vote05_1 + e
p <- exp(w)/(1+exp(w))
U <- rbinom(nrow(data2), 1, p)
return(U)
}
## generate the estimated TE including the unobserved confounding covariate U
results <- NULL
for (i in seq_len(length(alpha))) {
U <- genConfound(alpha[i], delta[i])
corD <- cor(U, data2$abd_1, use="pairwise.complete.obs")
corY <- cor(U, data2$vote05_1, use="pairwise.complete.obs")
estTE <- coef(lm(paste0("vote05_1 ~ abd_1 + U +", X), data2))["abd_1"]
names(estTE) <- NULL
res <- c(estTE = estTE, corD = corD, corY = corY)
results <- rbind(results, res)
}
head(results)
pdf('sensitivity1.pdf')
color <- ifelse(results[, "estTE"] <= 0.5 * coef(out)["abd_1"], "red", NA)
color <- ifelse(is.na(color) & results[, "estTE"] >= 1.5 * coef(out)["abd_1"],"blue", color)
color <- ifelse(is.na(color), "green", color)
pch <- ifelse(results[, "estTE"] <= 0.5 * coef(out)["abd_1"], 2, NA)
pch <- ifelse(is.na(pch) & results[, "estTE"] >= 1.5 * coef(out)["abd_1"],5, pch)
pch <- ifelse(is.na(pch), 1, pch)
plot(results[, "corD"], results[, "corY"], col = color, pch = pch, xlab = "correlation with D",
ylab = "correlation with voting", xlim = c(0, 1), ylim = c(0, 1), main = "FIGURE 1. Impact of Relaxing the Assumption of Unconfoundedness")
mtext(side = 2, text = "(Increase in R-squared)", line = 2)
mtext(side = 1, text = "(Increase in R-squared)", line = 4)
legend("topright",legend = c("Set of correlation pairs that does not overturn the result","Set of correlation pairs that overturns the result","Influence of pre-war covariates on voting and abduction"), pch=c(1,2,3), col = c("green","red","black"), cex=0.8)
points_observed <- read.csv("V2V-fig1.csv")
points_observed <- points_observed[1:9,]
points(points_observed$RwP, points_observed$RyP, pch = "+", col = "black", cex = 1)
text(0.18319667, 0.34574641, label = "Year and location", cex=1, pos=4, col="Black", lwd=10)
text(0.15589219, 0.28866525, label = "Year of birth", cex=1, pos=4, col="Black", lwd=10)
text(0.03053662, 0.10892849, label = "Location of birth", cex=1, pos=4, col="Black", lwd=10)
text(0.00255776, 0.07770063, label = "HH Size", cex=1, pos=4, col="Black", lwd=10)
text(0.00981271, 0.01160790, label = "Cattle", cex=1, pos=4, col="Black", lwd=10)
dev.off()
## sensitivity analysis of the relation between abd and comm_mobil
data2$comm_mobil_1 <- scale(data2$comm_mobil)
alpha <- rnorm(nrow(data2), 0, 1) ## Generate alphas from a normal distribution
delta <- rnorm(nrow(data2), 0, 1) ## Generate deltas from a normal distribution
X <- "age + age2 + age3 + fthr_ed + mthr_ed + hh_size96 + hh_wealth96"
out2<- lm(paste0("data2$comm_mobil_1~abd_1+", X), data2) ## regression without unobserved confounder U
## use the properties of logit transformation to generate the unobserved confounder U
genConfound2<- function (x, y) {
e <- rnorm(nrow(data2), 0, 1)
w <- x * data2$abd_1 + y * data2$comm_mobil_1 + e
p <- exp(w)/(1+exp(w))
U <- rbinom(nrow(data2), 1, p)
return(U)
}
## generate the estimated TE including the unobserved confounding covariate U
results1 <- NULL
for (i in seq_len(length(alpha))) {
U <- genConfound2(alpha[i], delta[i])
corD <- cor(U, data2$abd_1, use="pairwise.complete.obs")
corY <- cor(U, data2$comm_mobil_1, use="pairwise.complete.obs")
estTE <- coef(lm(paste0("comm_mobil_1 ~ abd_1 + U +", X), data2))["abd_1"]
names(estTE) <- NULL
res <- c(estTE = estTE, corD = corD, corY = corY)
results1 <- rbind(results1, res)
}
head(results1)
pdf('sensitivity2.pdf')
color <- ifelse(results1[, "estTE"] <= 0.5 * coef(out2)["abd_1"], "red", NA)
color <- ifelse(is.na(color) & results1[, "estTE"] >= 1.5 * coef(out2)["abd_1"],"blue", color) ##why?
color <- ifelse(is.na(color), "green", color)
pch <- ifelse(results1[, "estTE"] <= 0.5 * coef(out2)["abd_1"], 2, NA)
pch <- ifelse(is.na(pch) & results1[, "estTE"] >= 1.5 * coef(out2)["abd_1"],5, pch)
pch <- ifelse(is.na(pch), 1, pch)
plot(results1[, "corD"], results1[, "corY"], col = color, pch = pch, xlab = "correlation with D",
ylab = "correlation with community mobilizer", xlim = c(0, 1), ylim = c(0, 0.6), main = "FIGURE 2. Impact of Relaxing the Assumption of Unconfoundedness")
mtext(side = 2, text = "(Increase in R-squared)", line = 2)
mtext(side = 1, text = "(Increase in R-squared)", line = 4)
legend("topright",legend = c("Set of correlation pairs that does not overturn the result","Set of correlation pairs that overturns the result","Influence of pre-war covariates on voting and abduction"), pch=c(1,2,3), col = c("green","red","black"), cex=0.8)
points_observed <- read.csv("V2V-fig1.csv")
points_observed <- points_observed[1:9,]
points(points_observed$RwP, points_observed$RyP, pch = "+", col = "black", cex = 1)
text(0.18319667, 0.34574641, label = "Year and location", cex=1, pos=4, col="Black", lwd=10)
text(0.15589219, 0.28866525, label = "Year of birth", cex=1, pos=4, col="Black", lwd=10)
text(0.03053662, 0.10892849, label = "Location of birth", cex=1, pos=4, col="Black", lwd=10)
text(0.00255776, 0.07770063, label = "HH Size", cex=1, pos=4, col="Black", lwd=10)
text(0.00981271, 0.01160790, label = "Cattle", cex=1, pos=4, col="Black", lwd=10)
dev.off()
|
d83090a374c989ee5afd674e2a1a1fb7f1c7e53f
|
3e77c3f49c1c827cf934eee5bffd4ed31ba7376c
|
/R_files/Bivariada_quali_Daniel_Claudiano_Cabral_Pinto.R
|
d216373c44cdaab1580a4668cffa1b7ea4d50a71
|
[] |
no_license
|
DanielClaudiano/Algorithms_from_University
|
4bb7597c1fa843d242a41d1a34455023a9f5725a
|
51d426a9e355523da32a2415f47f46595eed6f66
|
refs/heads/main
| 2023-05-12T11:33:53.083078
| 2021-06-11T00:20:35
| 2021-06-11T00:20:35
| 375,507,787
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,688
|
r
|
Bivariada_quali_Daniel_Claudiano_Cabral_Pinto.R
|
#rm(list = ls())
#Carregando dados
df = read.csv2(file="C:/Users/Daniel/Downloads/DATABASE_TRABALHO_ESTAT1.csv",header = TRUE, sep=",")
#Gráfico Auxilio x Economia
install.packages("dplyr")
require(dplyr)
#criando uma matriz através da função group by para contar as respostas de cada beneficiado ou não
#pelo auxílio emergencial
aux_eco = df %>% group_by(AUX_EMERG)
#criando as colunas da matriz
aux_eco %>% summarise(
Sim_irrestritamente = sum(ECONOMIA == 1),
Sim_com_restricoes = sum(ECONOMIA == 2),
Nao = sum(ECONOMIA == 3),
Nao_sei_opinar = sum(ECONOMIA == 4),
Total = sum(Sim_irrestritamente,Sim_com_restricoes,Nao,Nao_sei_opinar)
)
#?group_by()
#transformando a matriz em dataframe
df_aux_eco = data.frame(aux_eco %>% summarise(
Sim_irrestritamente = sum(ECONOMIA == 1),
Sim_com_restricoes = sum(ECONOMIA == 2),
Nao = sum(ECONOMIA == 3),
Nao_sei_opinar = sum(ECONOMIA == 4),
Total = sum(Sim_irrestritamente,Sim_com_restricoes,Nao,Nao_sei_opinar)
)
)
#Criando dataframe para eliminar a coluna AUX_EMERG
df_aux_eco_g = c(df_aux_eco$Sim_irrestritamente,df_aux_eco$Sim_com_restricoes,df_aux_eco$Nao,df_aux_eco$Nao_sei_opinar)
#Como o dataframe ficou com apenas uma coluna, os resultados tiveram linhas intercaladas.
#Linhas impares beneficiados, linhas pares nao beneficiados.
#Criei duas séries extraindo esses valores para cada grupo
#beneficiados
df_aux_eco_no = df_aux_eco_g[seq(1,8,2)]
#não beneficiados
df_aux_eco_yes = df_aux_eco_g[seq(2,8,2)]
#Gráfico comparativo entre as variáveis Economia e Aux_Emerg
par(mfrow = c(1,2))
my_bar_yes = barplot(df_aux_eco_yes,
col = c("grey98","grey86","red","grey71"),
ylab = "Frequência",
xlab = "Família Recebeu Auxílo Emergencial",
xlim = c(0,5),
ylim = c(0,8),
axes = FALSE
)
text(x = my_bar_yes, y = df_aux_eco_yes+0.25, df_aux_eco_yes )
legend("topleft",legend = lb,fill = c("grey98","grey86","red","grey71"))
my_bar_no = barplot(df_aux_eco_no,
col = c("grey98","grey86","red","grey71"),
ylab = "Frequência",
xlab = "Família Não Recebeu Auxílo Emergencial",
xlim = c(0,5),
ylim = c(0,8),
axes = FALSE
)
lb = c("Sim, irrestritamente","Sim, com Restrições","Não","Não sei")
text(x = my_bar_no, y = df_aux_eco_no+0.25, df_aux_eco_no )
#text(0.5,0.5,"First title",cex=2,font=2)
title("Você é a favor de um Estado atuante na Economia de Mercado", line = -3, outer = TRUE)
#?par()
|
f0c6785a2a46f712e1164a4631ff120faa9e64b2
|
822b56e3d70664da86db8c8ced7ce14b3c0cd866
|
/Farquhar_solver.R
|
33abcfff0776a499ddaeb249108953217aa8d548
|
[] |
no_license
|
blumsteinm/scaling_farquhar
|
ea7f0c8082249c5e685109a7eb5509ba46fcdee7
|
645fcabf3a143c8b83d6d3e6d61d23de34d33dd4
|
refs/heads/master
| 2020-03-08T11:33:13.046814
| 2018-06-05T17:17:08
| 2018-06-05T17:17:08
| 128,101,447
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,554
|
r
|
Farquhar_solver.R
|
####################################################
# Farquhar_solver #
# OEB203 project code, spring 2018 #
####################################################
##### The system of equations #####
### Equation 1: Farquhar Model - take minimum of light-limited vs. temperature limited
## CO2 limited
# A1 = (Vcmax * (Ci-comp)) / (Ci + Kc * (1 + O/K0)) - Rd # units: umol CO2 / m2 / sec
# (Vcmax * (Ci - comp)) / (Ci + K1*(1+K2)) - Rd
# Rd = gamma * vcmax; gamma = 0.015
# vcmax in this context is at 25 C
# CO2-limited assimilation
A1<- function(Ci, Tv, Vmo = 35, gamma = 0.015) {
VCmax_fixedT <- Vcmax(Vmo = Vmo, Tv = Tv)
Rd <- gamma * VCmax_fixedT
A <- (VCmax_fixedT * (Ci - comp(Tv))) / (Ci + K1(Tv)*(1+K2(Tv))) - Rd
return(A)
}
## Light-limited
# A2 = alpha * PAR * ((Ci - comp) / (Ci + 2 * comp)) - Rd
# alpha = quantum yield = 0.04
A2 <- function(Ci, Tv, PAR = 1000, Vmo = 35, alpha = 0.04, gamma = 0.015) {
VCmax_fixedT <- Vcmax(Vmo = Vmo, Tv = Tv)
Rd <- gamma * VCmax_fixedT
A <- alpha * PAR * ((Ci - comp(Tv)) / (Ci + 2 * comp(Tv))) - Rd
return(A)
}
### Equation 2: Stomatal conductance Model (Leunning); Medvigy
# gsw = ((M * A) / ((Cs - comp) * (1 + (el - es)/Do))) + b # open stomata
# b # closed stomata
# b <- function (Tv) {}
gsw <- function(Amin, M = 9, Do = 1.3, b = 2, Ca, Tv, open = TRUE) {
if(open == TRUE) {
gsw <- ((M * Amin) / ((Ca - comp(Tv)) * (1 + (el - ea)/Do))) + b
}
else {
gsw <- b
}
}
## Equation 3:
# A = (-gsw / 1.6) * (Ci - Ca)
## Equation 4: Water flux (not necessary?)
# Phi = gsw * (el - ea)
##### The calculated variables #####
#### Parameters we can calculate or get trait values for:
## Vcmax is from Medvigy
# Vcmax = (Vmo * exp(3000*(1/288.15 -1/Tv))) / ((1+exp(.4(Tvlo - Tv))) * (1 + exp(0.4 * (Tv - 318.15))))
Vcmax <- function (Vmo, Tvlo, Tv) { #Vmo = 23, Tvlo = 277.15 are constants
(Vmo * (exp(3000 * (1/288.15 -1/Tv)))) /
((1 + exp(.4 * (Tvlo - Tv))) * (1 + exp(0.4 * (Tv - 318.15))))
}
##### VCmax equation with Paul
Tv <- 270:320
C1 <- 1 # 2.12e-5 #missing in original coding of this equation
C2 <- 3000
Vmo <- 23
num<- Vmo * (C1 *exp(C2 * (1/288.15 -1/Tv)))
denom<-1/((1 + exp(.4 * (277.15 - Tv))) * (1 + exp(0.4 * (Tv - 318.15))))
plot(Tv, num*denom)
# Vcmax <- function(Tv) {
# C1 <- 1
# C2 <- 3000
# Vmo <- 23
# num<- Vmo * (C1 *exp(C2 * (1/288.15 -1/Tv)))
# denom<-1/((1 + exp(.4 * (277.15 - Tv))) * (1 + exp(0.4 * (Tv - 318.15))))
# Vc <- num * denom
# return(Vc)
# }
#
# Vcmax <- function (Vmo, Tvlo, Tv) { #Vmo, Tvlo are constants
# (Vmo * (exp(3000 * (1/288.15 -1/Tv)))) /
# ((1 + exp(.4 * (Tv - Tvlo))) * (1 + exp(0.4 * (Tv - 318.15))))
# }
# Vmo = 6.3 umol/m2/sec (Medvigy 2009, table 3)
# Vmo = Vc at 25C or 15C #Vcmax should be ~40 at 25 degrees
# Tvlo = Low temperature threshold (Kelvin) = 4.7 C = 277.85 Kelvin
# Tv = leaf temperature (Kelvin)
## new Vcmax equation (for Vcmax at 25c):
# Kattge 2007, Plant, Cell and Environment
# Vcmax = Vmo * exp((36380 * (Tv - 298))/(298*8.314*Tv))
# Vmo is between 58 and 92, for different connifers (eqn 9)
# Vcmax <- function (Vmo, Tv) {
# Vmo * exp((36380 * (Tv - 298)) / (298 * 8.314 * Tv))
# }
# Compensation point equation from Medgivy (b.7)
# comp = 21.2 * exp(5000 * ((1/288.15) - (1/Tv))) #CO2 compensation point where photosynthesis = respiration
comp <- function(Tv) { 21.2 * exp(5000 * ((1/288.15) - (1/Tv))) } #units: umol/mol
# K1 = 150 * exp(6000 * ((1/288.15) - (1/Tv)))
K1 <- function(Tv) { 150 * exp(6000 * ((1/288.15) - (1/Tv))) }
# K2 = 0.836 * exp(-1400 * ((1/288.15) - (1/Tv)))
K2 <- function(Tv) {0.836 * exp(-1400 * ((1/288.15) - (1/Tv))) }
# alpha = 0.06 # mol CO2 / mol photons ## quantum efficiency
# Rd = gamma * Vcmax # ?? gamma = 0.015?
# el # saturated leaf concentration - 100
# ea = atmospheric water concentration - meterology?
# Saturation vapor pressure equation: el = a * exp(b*T/(T+c))
# T is temperature (degrees C) of the *leaf*
# a = 0.611 kPa
# b = 17.502
# c = 240.97 degrees C
# function to convert kelvin to celcius for input into the saturated leaf function (el)
convertTv <- function(Tv) {
TvC <- Tv -273.15
return(TvC)
}
el <- function(Tv) {
TvC <- convertTv(Tv)
el <- 0.611 * exp((17.502 * TvC)/(TvC + 240.97))
return(el)
}
ea <- function (relHum, Tv) {
relHum /(100 * el(Tv)) # changed from multiplication to divide; Campbell and Norman page 42
}
# PAR - photon flux density, 500-2000
# J = 0 - 250 (put alpha*PAR instead of J)
# Ca = [atm CO2]
##### The Farquhar Solver Function #####
farquhar_solver <- function (input.df, stomata = c('open', 'closed')) {
## input.df needs to have columns for Tv and Ca and relhum. the rest can be calculated...
## defaults that we'd like to avoid including in the giant function call
gamma <- 0.015
Vmo <- 23 #92 #35
b <- 2
alpha <- 0.04
Do <- 1.3
M <- 9 #stomatal slope; unitless; Raczka et al 2016, Biogeosciences; (also Heroult 2013, Plant Cell & Environment)
Tvlo <- 277.85
## Functions used to calculate other inputs from input.df
input.df$Vcmax <- Vcmax(Vmo = Vmo, Tvlo = Tvlo, Tv = input.df$Tv )
# calculated in function
Rd <- gamma * input.df$Vcmax
k1.dat <- K1(input.df$Tv)
k2.dat <- K2(input.df$Tv)
X = k1.dat * (1 + k2.dat)
Y <- alpha * input.df$PAR
FF <- ((input.df$Ca - input.df$comp) * (1 + ((input.df$el - input.df$ea)/Do)))
# relevant functions
A1<- function(Ci, Tv, Vmo = 35, gamma = 0.015) {
Rd <- gamma * input.df$Vcmax
A <- (input.df$Vcmax * (Ci - comp(Tv))) / (Ci + K1(Tv)*(1+K2(Tv))) - Rd
return(A)
}
A2 <- function(Ci, Tv, PAR, Vmo, alpha = 0.04, gamma = 0.015) {
Rd <- gamma * input.df$Vcmax
A <- (alpha * PAR * ((Ci - comp(Tv)) / (Ci + 2 * comp(Tv)))) - Rd
return(A)
}
### stomata closed ###
if(stomata == 'closed') {
### CO2 limited case, coefficients for polyroot function
aa <- (b * X * input.df$Ca/1.6) + (input.df$Vcmax * input.df$comp) + (Rd * X)
bb <- (Rd) + (b * input.df$Ca / 1.6) - (b * X / 1.6) - (input.df$Vcmax)
cc <- (-b / 1.6)
z <- data.frame(aa = aa, bb = bb, cc = cc) # where aa + bb*c1 + cc*c1^2
#solve the polynomial
roots <- apply(z, 1, polyroot)
if(round(Im(roots[1]), 10) != 0) {
stop("quadratic roots are imaginary")
}
#coerce into non-imaginary components
roots.num <- Re(roots)
#extract the non-negative value
Ci.extract.A1 <- apply(roots.num, 2, max)
# calculate A
AA1 <- A1(Ci.extract.A1, input.df$Tv, Vmo = Vmo, gamma = gamma)
### Light limited case, coefficients for polyroot function
aa <- ((b * 2 * input.df$comp * input.df$Ca / 1.6) + (Rd * 2 * input.df$comp) + (Y * input.df$comp))
bb <- (Rd + (b * input.df$Ca / 1.6) - (b * 2 * input.df$comp / 1.6) - Y)
cc <- (-b / 1.6)
# define polynomial roots for each data point
z <- data.frame(aa = aa, bb = bb, cc = cc) # where aa + bb*c1 + cc*c1^2
#solve the polynomial
roots <- apply(z, 1, polyroot)
if(round(Im(roots[1]), 10) != 0) {
stop("quadratic roots are imaginary")
}
#coerce into non-imaginary components
roots.num <- Re(roots)
# extract the non-negative value
Ci.extract.A2 <- apply(roots.num, 2, max)
# calculate A2
AA2 <- A2(Ci.extract.A2, Tv = input.df$Tv, PAR = input.df$PAR, Vmo = Vmo, alpha = alpha) # only works if PAR has values 6 orders of magnitude higher
### Build output data frame
# pick minimum for each time point:
A.df <- data.frame (AA1 = AA1, AA2 = AA2, Ci.A1 = Ci.extract.A1, Ci.A2 = Ci.extract.A2)
A.df$A.min <- apply(A.df[,1:2], 1, min)
A.df$min.eq <- apply(A.df[,1:2], 1, which.min)
## Solve for gsw ##
# stomata closed, gsw = b
A.df$gsw <- rep(b, dim(A.df)[1])
}
#### Stomata Open ###
if(stomata == 'open') {
############ vvvvv THIS IS WRONG!!!!!!! vvvvv ************
### CO2 limited coefficients
aa <- ((b * X * input.df$Ca) + (1.6 * FF * input.df$Vcmax * input.df$comp) + (1.6 * FF * Rd * X) +
(M * input.df$Vcmax * input.df$comp * input.df$Ca) + (M * Rd * X * input.df$Ca))
bb <- ((b * input.df$Ca) - (b * X) - (1.6 * FF * input.df$Vcmax) + (1.6 * FF * Rd) + (M * input.df$Vcmax * input.df$Ca) +
(M * input.df$Vcmax * input.df$comp) + (Rd * M * input.df$Ca) + (M * Rd * X))
cc <- ((-b * FF) + (M * input.df$Vcmax) - (M * Rd))
############ ^^^^^ THIS IS WRONG!!!!!!! ^^^^^ ************
# define polynomial roots for each data point
z <- data.frame(aa = aa, bb = bb, cc = cc) # where aa + bb*c1 + cc*c1^2
#solve the polynomial
roots <- apply(z, 1, polyroot)
if(round(Im(roots[1]), 10) != 0) {
stop("quadratic roots are imaginary")
}
#coerce into non-imaginary components
roots.num <- Re(roots)
#extract the non-negative value
Ci.extract.A1 <- apply(roots.num, 2, max)
#calculate A1
AA1 <- A1(Ci.extract.A1, input.df$Tv, Vmo = Vmo, gamma = gamma)
### Light-limited coefficients
aa <- ((b * FF * input.df$Ca * 2 * input.df$comp) + (1.6 * FF * Y * input.df$comp) + (1.6 * FF * Rd * 2 * input.df$comp) -
(M * input.df$Ca * Y * input.df$comp) - (M * Rd * 2 * input.df$comp * input.df$Ca))
bb <- ((b * FF * input.df$Ca) - (b * FF * 2 * input.df$comp) - (1.6 * FF * Y) + (1.6 * FF * Rd) + (M * Y * input.df$Ca) +
(M * Y * input.df$comp) - (M * Rd * input.df$Ca) + (M * Rd * 2 * input.df$comp))
cc <- ((-b * FF) - (M * Y) + (M * Rd))
# define polynomial roots for each data point
z <- data.frame(aa = aa, bb = bb, cc = cc) # where aa + bb*c1 + cc*c1^2
#solve the polynomial
roots <- apply(z, 1, polyroot)
if(round(Im(roots[1]), 10) != 0) {
stop("quadratic roots are imaginary")
}
# coerce into non-imaginary components
roots.num <- Re(roots)
# extract the non-negative value
Ci.extract.A2 <- apply(roots.num, 2, max)
# calculate A2
AA2 <- A2(Ci.extract.A2, Tv = input.df$Tv, PAR = input.df$PAR, Vmo = Vmo, alpha = alpha) # only works if PAR has values 6 orders of magnitude higher
### build output data frame
# pick minimum for each time point:
A.df <- data.frame (AA1 = AA1, AA2 = AA2, Ci.A1 = Ci.extract.A1, Ci.A2 = Ci.extract.A2)
A.df$A.min <- apply(A.df[,1:2], 1, min)
A.df$min.eq <- apply(A.df[,1:2], 1, which.min)
### solve for gsw ###
gsw.solve <- ((M * A.df$A.min)/((input.df$Ca - input.df$comp) * (1 + ((input.df$el - input.df$ea)/Do)))) + b
A.df$gsw <- gsw.solve
}
return(A.df)
}
### GSW troubleshooting
M<-9
input.df <- dummy
A.min <- closed.sol$A.min
Do <- 1.3
((M * A.min)/((input.df$Ca - input.df$comp) * (1 + ((input.df$el - input.df$ea)/Do))))
##### The Data #####
### Dummy data ###
dummy <- data.frame(time = c(0:23))
dummy$Tv <- c(seq(280,300, length.out = 12), seq(300, 280, length.out = 12))
dummy$relHum <- c(rep(70, 3), 80, 80, 90, 100, 100, 90, 80, 70, 60, 50, 40, 50, 60, rep(70, 8))
dummy$Ca <- rep(400, 24)
dummy$comp <- comp(Tv = dummy$Tv)
dummy$Vcmax <- Vcmax(Vmo = 6.3, Tv = dummy$Tv, Tvlo = 277.85)
dummy$el <- el(Tv = dummy$Tv)
dummy$ea <- ea(relHum = dummy$relHum, Tv = dummy$Tv)
dummy$PAR <- c(rep(0, 5), seq(0, 0.002, length.out = 7), seq(0.002, 0, length.out = 7), rep(0,5))
# more realistic dummy PAR:
dummy$PAR <- c(rep(0, 5), seq(0, 2000, length.out = 7), seq(2000, 0, length.out = 7), rep(0, 5))
### Real data ###
dat <- read.csv('Aggregated_Climate_Data.csv', header=TRUE)
dat$Tv <- dat$Air_Temp_K
dat$relhum <- dat$Relative_Humidity_Percent
dat$Ca <- dat$Atmospheric_CO2
dat$comp <- comp(Tv = dat$Tv)
dat$Vcmax <- Vcmax(Vmo = 35, Tv = dat$Tv)
dat$el <- el(Tv = dat$Tv)
dat$ea <- ea(relHum = dat$relhum, Tv = dat$Tv)
dat$PAR <- dat$Par_moles_m2_s * 1000000 ### only keep this until push updated to git
##### Applying the farquhar_solver function #####
farquhar_solver(input.df = dummy, stomata = 'closed')
farquhar_solver(input.df = dummy, stomata = 'open')
|
56d7d6089a5a5f7ee0b97233da8859e79ce08809
|
64c60c76b22f6131ea683ee1b85abc49b1aefc96
|
/Lista2/z1.R
|
e164ef446e313adf7fbd73e4a21fd4b9f4067600
|
[] |
no_license
|
majsylw/statistical-methods-in-biology
|
870166e16269186449f645428f81e03513e01246
|
5f74b634fe6e57fd97093c03d149220d5ca27deb
|
refs/heads/master
| 2020-12-10T00:16:20.443423
| 2020-01-12T20:46:21
| 2020-01-12T20:46:21
| 233,455,417
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,271
|
r
|
z1.R
|
# Zadanie1
set.seed(10) # sianie nasionka
n1 = 5
n2 = 10
s1 = 1
s2 = 5
u1 = 0
u2 = 0
Welch <- function(a,n1,n2,u1,u2,s1,s2){
symNum = 10000 #ilosc powtorzen eksperymentu
Xe=matrix(rnorm(n1*symNum,u1,s1),n1,symNum)
Ye=matrix(rnorm(n2*symNum,u2,s2),n2,symNum)
Xme=apply(Xe,2,mean)
Xs=c(1:symNum)
for (i in 1:symNum){
Xs[i] = 1/(n1-1)*sum((Xe[,i]-Xme[i])^2)}
Yme=apply(Ye,2,mean)
Ys=c(1:symNum)
for (k in 1:symNum){
Ys[k] = 1/(n2-1)*sum((Ye[,k]-Yme[k])^2)}
Ze=(Xme-Yme)/sqrt((Xs)/n1+Ys/n2)
v = (Xs/n1 + Ys/n2)^2/((Xs^2/n1^2)/(n1-1)+(Ys^2/n2^2)/(n2-1))
wynik=0
for(j in 1:symNum){
if(Ze[j] < (-qt((1-a/2),v[j])) || Ze[j] > qt((1-a/2),v[j])){
wynik = wynik + 1
}
}
return (wynik/symNum)
}
przedzial<- function(p,N,alfa){
return(c(p - qnorm(1-alfa/2)*sqrt(p*(1-p)/N),p + qnorm(1-alfa/2)*sqrt(p*(1-p)/N)))
}
Welch(0.05, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.05, n1, n2, u1, u2, s1, s2),10000,0.05)
Welch(0.1, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.1, n1, n2, u1, u2, s1, s2),10000,0.05)
plot(wek,spr2, type="p", col="black", ylab = "Moc testu",
xlab="Parametr przesuniecia")
lines(wek,Zw,col="green",type="l")
arrows(wek, Zw-bladteo, wek, Zw+bladteo,
length=0.05, col="green", angle=90, code=3)
|
b2477a00c379efbd2cd1ae70e00b6e0edf961e45
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610383365-test.R
|
1456c22a4d3a07261e9bb1dc2e73221a5221d516
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
1610383365-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(-2.87893513927628e-63, 8.74079907220133e-313, NaN, NaN, NaN, NaN, NaN, 2.23764539047744e-310, NaN, NaN, NaN, NaN, -5.17495827121042e+245, NaN, NaN, 1.39769620676628e-309, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
b93269d9b14e37f551b54a49c6cb41be346ab697
|
de9d448132f90f073d29add688de2fcf72527a89
|
/man/scoreStem.Rd
|
a159d5e9239ee3168a1610c5ced21960c438f94a
|
[
"MIT"
] |
permissive
|
NMikolajewicz/scMiko
|
33f137e9e3a6318fb0386506ac4666a3822463f0
|
bd00724889db265817fc54d0d50b14647d32438d
|
refs/heads/master
| 2023-06-09T05:51:30.199131
| 2023-06-04T20:23:51
| 2023-06-04T20:23:51
| 249,496,034
| 20
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,197
|
rd
|
scoreStem.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_functions.R
\name{scoreStem}
\alias{scoreStem}
\title{Calculate stem index}
\usage{
scoreStem(
object,
method = c("CCAT", "StemSC", "CytoTRACE"),
min.pct = 0,
assay = DefaultAssay(object),
batch = NULL,
verbose = T
)
}
\arguments{
\item{object}{Seurat object}
\item{method}{method used to calculate stem index. One of "CCAT", "StemSC", or "CytoTrace"}
\item{min.pct}{minimum expressing fraction. Values of 0.005-0.01 recommended if data set is large.}
\item{assay}{assay used for computing stem index. DefaultAssay(object) is taken if not specified.}
\item{batch}{meta data feature containing batch memberships. Only used for CytoTrace algorithm.}
\item{verbose}{print progress. Default is T.}
}
\value{
numeric vector
}
\description{
Calculate stem index at single-cell resolution using CCAT, StemSC or CytoTrace algorithms.
}
\examples{
ccat.score <- scoreSTEM(object = seurat.object, method = "CCAT")
}
\seealso{
\code{\link{CompCCAT}} for computing CCAT scores, \code{\link{StemSC}} for StemSC scores, and \code{\link{CytoTRACE}} for CytoTrace scores.
}
\author{
Nicholas Mikolajewicz
}
|
a1297a9777817d31417241c877ba6546e7a44e27
|
a71fcfc790356fd50c355c50a4f6dd6442c51d54
|
/R/predict.spacious.R
|
4a8e31260dc410e0149b6fbfaa4e8c12c1c3ca91
|
[] |
no_license
|
jarad/spacious
|
75a550abe0e22a227bfc78974576515f900e5cc1
|
5f63a204664ee12c7d1f5dcced3b935f2233593b
|
refs/heads/master
| 2016-09-06T11:33:57.882695
| 2014-03-04T23:50:52
| 2014-03-04T23:50:52
| 3,471,933
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,456
|
r
|
predict.spacious.R
|
# predict values at new sites using fit from spacious block composite model
"predict.spacious" <- function(object, newdata, newS, newB, D,
opts = list(type="block"),
interval = "none", level = 0.95,
nthreads = 1, gpu=FALSE,
engine, # use the C or R implementation?
...) {
if (missing(newS)) {
stop("Must specify locations newS to make predictions at.")
}
if (!object$convergence) {
warning("Predictions being made with fit that did not converge!")
}
nFit <- nrow(object$S)
nNew <- nrow(newS)
# build model matrix for the new data
if (missing(newdata)) {
if (length(object$beta) > 1) {
# newdata missing, yet we have covariates...
stop("newdata must be specified for this fit.")
}
# use intercept only
X <- matrix(1, nrow=nNew, ncol=1)
} else {
if (is.data.frame(newdata)) {
terms <- delete.response(terms(object))
mf <- model.frame(terms, newdata)
X <- model.matrix(terms, mf)
} else if (is.matrix(newdata)) {
# assume newdata is a matrix of covariates
X <- cbind(1, newdata)
} else {
stop(paste0("Unable to handle newdata with class ", class(newdata)))
}
}
# ensure this is a matrix
newS <- matrix(newS, ncol=2)
# locations for all points
S <- rbind(object$S, newS)
if (missing(engine)) {
engine <- object$engine
}
if (object$lik_form == "block" & opts$type == "block") {
# number of blocks
nB <- length(object$grid)
# figure out newB the location is in
if (!missing(newB)) {
# make sure we have these blocks defined in the neighbors
uniqueBlocks <- unique( as.vector( object$neighbors ) )
sapply(newB, function(b) {
if (sum(uniqueBlocks == b) == 0) {
stop(paste0("No neighbors defined for block ",b,". Unable to make predictions."))
}
})
} else {
# user did not define blocks, so find some
if (is.null(object$grid)) {
# we don't have polygons, so find the closest point and use that block
stop("No polygons defining blocks. Specify newB to make predictions.")
}
# figure out newBs contain these new locations
newB <- rep(NA, nNew)
for (b in 1:nB) {
in_poly <- point.in.polygon(newS[,1], newS[,2],
object$grid@polygons[[b]]@Polygons[[1]]@coords[,1],
object$grid@polygons[[b]]@Polygons[[1]]@coords[,2]) >= 1
newB[in_poly] <- b
}
if (sum(is.na(newB)) > 0) {
warning("Unable to find blocks for all prediction locations. Assigning locations to blocks using block centroids.")
which.naB <- which( is.na(newB) ) # which obs are missing blocks?
N.naB <- length(which.naB) # how many of these are there?
# get centers of blocks
block.centers <- coordinates(object$grid)
# compute distances between block centers and obs with missing blocks
D.centers <- matrix( (rdist(rbind(newS[which.naB,], block.centers)))[1:N.naB, N.naB + 1:nrow(block.centers)], nrow=N.naB )
# find block for each missing
foundB <- apply(D.centers, 1, function(row) {
which.min(row)
})
# updated new blocks
newB[which.naB] <- foundB
}
}
} else {
nB <- 0
}
if (opts$type=="local") engine <- "R" # C doesn't support local kriging (yet)
if (engine == "C") {
do_sd <- interval=="prediction"
local <- opts$type=="local"
if (missing(newB)) newB <- rep(1, nNew)
preds <- .C(spacious_predict,
# new site information
n_0=as.integer(nNew), y_0=as.double(rep(NA, nNew)), newS=as.double(newS), newB=as.integer(newB-1),
newX=as.double(X), do_sd=as.logical(do_sd), sd=as.double(rep(NA, nNew)),
local=as.logical(local), Nlocal=as.integer(opts$num),
# data used in fit
y=as.double(object$y), X=as.double(object$X), S=as.double(object$S), B=as.integer(object$B-1),
neighbors=as.integer(object$neighbors-1), n=as.integer(length(object$y)), p=as.integer(ncol(object$X)),
nblocks=as.integer(object$nblocks), npairs=as.integer(nrow(object$neighbors)),
# type of fit
lik_form=as.character(object$lik_form), cov=as.character(object$cov),
# fitted values
beta=as.double(object$beta), theta=as.double(object$theta),
# parallelization options
nthreads=as.integer(nthreads), gpu=as.logical(gpu),
NAOK=TRUE
)
y_0 <- preds$y_0
if (do_sd) {
sd.pred <- preds$sd
tile <- qnorm((1-level)/2, lower.tail=FALSE)
return(data.frame(y=y_0, sd=sd.pred, lwr=y_0-tile*sd.pred, upr=y_0+tile*sd.pred))
} else {
return(data.frame(y=y_0))
}
} else if (engine == "R") {
if (missing(D)) {
# compute distance matrix
D <- rdist(S)
diag(D) <- 0
}
# vector to hold predictions
y_0 <- rep(NA, nNew)
if (interval == "prediction") {
# vector to hold prediction std devs
sd.pred <- rep(NA, nNew)
}
# spatial params in terms of model structure
theta <- object$theta
if ( (nB == 0 && missing(newB)) || (opts$type == "all") ) {
# use all data to perform predictions and perform traditional kriging
n <- nrow(S)
# compute covariance matrix
Sigma <- compute_cov(object$cov, theta, D)
# get the predictions
y_0[1:nNew] <- X %*% object$beta + Sigma[nFit+1:nNew,1:nFit] %*%
chol2inv(chol(Sigma[1:nFit,1:nFit])) %*% object$resids
if (interval == "prediction") {
invSigma <- chol2inv(chol(Sigma))
sd.pred <- sqrt(diag( chol2inv(chol( invSigma[nFit+1:nNew,nFit+1:nNew] )) ))
}
} else {
if (opts$type == "block") {
# predict when we have blocks
# block memberships for all points
B <- c(object$B, newB)
# predict for each unique block
y_0[1:nNew] <- X %*% object$beta
# unique blocks where we have points to predict
uB <- unique(newB)
for (b in uB) {
# neighbors of this block
neighbors <- as.vector(object$neighbors[which( rowSums( object$neighbors==b ) == 1 ),])
neighbors <- neighbors[neighbors != b]
# what new points are in this block?
in.new <- which(newB == b)+nFit
n.in.new <- length(in.new)
# what observed points are in this block?
in.obs <- which(object$B == b)
n.in.obs <- length(in.obs)
# new and observed in this block
in.b <- c(in.new, in.obs)
n.in.b <- n.in.new + n.in.obs
A_0 <- matrix(0, nrow=n.in.new, ncol=n.in.new)
b_0 <- rep(0, n.in.new)
if (interval == "prediction") {
J_0 <- matrix(0, nrow=n.in.new, ncol=n.in.new)
B_0 <- matrix(0, nrow=n.in.new, ncol=n.in.new+n.in.obs)
}
for (n1 in neighbors) {
in.n1 <- which(object$B == n1)
n.in.n1 <- length(in.n1)
# take points so that we have Y = (Y_{b,new}, Y_{b,obs}, Y_{n1})
in.pair.n1 <- c(in.b, in.n1)
# covariance for b and n1
Sigma.n1 <- compute_cov(object$cov, theta, D[in.pair.n1,in.pair.n1])
invSigma.n1 <- chol2inv(chol(Sigma.n1))
A_0 <- A_0 + invSigma.n1[1:n.in.new,1:n.in.new]
b_0 <- b_0 +
matrix(invSigma.n1[1:n.in.new,n.in.new+1:n.in.obs],nrow=n.in.new,ncol=n.in.obs) %*% object$resids[in.obs] +
matrix(invSigma.n1[1:n.in.new,n.in.new+n.in.obs+1:n.in.n1],nrow=n.in.new,ncol=n.in.n1) %*% object$resids[in.n1]
if (interval == "prediction") {
B_0 <- B_0 + cbind(
matrix(invSigma.n1[1:n.in.new,1:n.in.new],nrow=n.in.new,ncol=n.in.new),
matrix(invSigma.n1[1:n.in.new,n.in.new+1:n.in.obs],nrow=n.in.new,ncol=n.in.obs)
)
for (n2 in neighbors) {
in.n2 <- which(object$B == n2)
n.in.n2 <- length(in.n2)
# take points so that we have Y = (Y_{b,new}, Y_{b,obs}, Y_{n2})
in.pair.n2 <- c(in.b, in.n2)
# points for Y = (Y_{n1}, Y_{n2})
in.n1n2 <- c(in.n1, in.n2)
# covariance for b and n2
Sigma.n2 <- compute_cov(object$cov, theta, D[in.pair.n2,in.pair.n2])
invSigma.n2 <- chol2inv(chol(Sigma.n2))
# covariance for n1 and n2
Sigma.n1n2 <- compute_cov(object$cov, theta, D[in.n1n2,in.n1n2])
J_0 <- J_0 + invSigma.n1[1:n.in.new,n.in.new+n.in.obs+1:n.in.n1] %*%
Sigma.n1n2[1:n.in.n1,n.in.n1+1:n.in.n2] %*% invSigma.n2[n.in.new+n.in.obs+1:n.in.n2,1:n.in.new]
}
}
}
# complete predictions
b_0 <- -b_0
y_0[in.new-nFit] <- y_0[in.new-nFit] + chol2inv(chol(A_0)) %*% b_0
if (interval == "prediction") {
# complete prediction variances
H_0 <- -A_0
Sigma <- compute_cov(object$cov, theta, D[in.b,in.b])
J_0 <- J_0 + B_0 %*% Sigma %*% t(B_0)
for (n1 in neighbors) {
in.n1 <- which(object$B == n1)
n.in.n1 <- length(in.n1)
# take points so that we have Y = (Y_{b,new}, Y_{b,obs}, Y_{n1})
in.pair.n1 <- c(in.b, in.n1)
# covariance for b and n1
Sigma.n1 <- compute_cov(object$cov, theta, D[in.pair.n1,in.pair.n1])
invSigma.n1 <- chol2inv(chol(Sigma.n1))
J_0 <- J_0 + 2 * B_0 %*% Sigma.n1[1:n.in.b,n.in.b+1:n.in.n1] %*% invSigma.n1[n.in.b+1:n.in.n1,1:n.in.new]
}
sd.pred[in.new-nFit] <- sqrt(diag( chol2inv(chol(H_0 %*% chol2inv(chol(J_0)) %*% H_0)) ))
}
}
} else if (opts$type == "local") {
nLocal <- opts$num
if (is.null(opts$num)) {
nLocal <- min(100, nFit) # default number of local points to use
warning(paste0("Number of points to use in local kriging not specified. Using ",nLocal," points"))
}
for (i in 1:nNew) {
# use closest nLocal points for kriging
pred.index <- i + nFit
# the closest nLocal points to Snew[i,]
which.local <- sort( D[pred.index, 1:nFit], decreasing=FALSE, index.return=TRUE )$ix[1:nLocal]
# compute covariance matrix
Sigma <- compute_cov(object$cov, theta, D[c(which.local, pred.index), c(which.local, pred.index)])
# get the predictions
y_0[i] <- X[i,] %*% object$beta + Sigma[nLocal+1,1:nLocal] %*%
chol2inv(chol(Sigma[1:nLocal,1:nLocal])) %*% object$resids[which.local]
if (interval == "prediction") {
invSigma <- chol2inv(chol(Sigma))
sd.pred[i] <- sqrt( 1/invSigma[nLocal+1,nLocal+1] )
}
}
} else {
stop(paste0("Unknown prediction type: ",opts$type))
}
} # end prediction
if (interval == "prediction") {
tile <- qnorm((1-level)/2, lower.tail=FALSE)
return(data.frame(y=y_0, sd=sd.pred, lwr=y_0-tile*sd.pred, upr=y_0+tile*sd.pred))
} else {
return(data.frame(y=y_0))
}
} else {
error(paste0("Unknown engine: ",engine,"\n"))
}
}
|
95fc4b73126ae070912799929ce097ec57d07fd9
|
fc06b4acacaee65445d4b633f664339da4feac16
|
/unemployment_analysis.R
|
4777c16e007fd7719747179c080f64b3c50ccb7c
|
[] |
no_license
|
cxpeng9223/CS341Final
|
ebecc2d0b96fe69dfee2689a18d47d3b038117e2
|
f4f4fcf1a21c015be45ceca436b46f675af95910
|
refs/heads/master
| 2020-03-19T15:19:34.561599
| 2018-06-15T09:25:22
| 2018-06-15T09:25:22
| 136,666,563
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
unemployment_analysis.R
|
#Post hoc unemployment analysis
#This file is to generate USA Map plots of the location bias in Gallup data
library(raster)
setwd("~/Box Sync/Data_for_341/Vowpal_wabbit_files")
results_sim <- fread("predictor_ECON_sim.txt")
results_sim$V1 <- ifelse(results_sim$V1 > 0, 1, 0)
#Actual
df.test = getData("file_for_match_vowpal_wabbit_ECON11.tsv","predictor_ECON11_actual.txt")
df.test$Prediction_b <- ifelse(df.test$Predicted > 0, 1, 0)
df.test$Actual_b <- ifelse(df.test$Actual > 0, 1, 0)
mean(results_sim$V1) # Simulated Employment
mean(df.test$Actual_b) # Actual Employment via Gallup
mean(df.test$Prediction_b) #Predicted Employment via Gallup
usa <- map("usa")
gg1 <- ggplot() + geom_polygon(data = usa, aes(x=long, y = lat, group = group)) +
coord_fixed(1.3)
map("world", c("hawaii"), boundary = TRUE, col=8, add = TRUE, fill=TRUE )
map("world", c("USA:Alaska"), boundary = TRUE, col='orange', add = TRUE, fill=TRUE )
#Plot 1000 points
g1k <- gallup[200000:201000,]
gg2 <- gg1 +
geom_point(data = gallup, aes(x = long, y = lat), color = "yellow", size = 1)
ggsave(plot = gg2, filename = "Full_map.png", width = 10, height = 7)
gg1 +
geom_point(data = sim[0:1000,], aes(x = V8, y = V7), color = "yellow", size = 1)
|
1e8c13f6b6ac3dd58296b94e56a1c8f5dbf682f1
|
cee086cf984e907a217ac0992cf9073a5a3e46a5
|
/tests/testthat.R
|
bd6bd489b3246ecc585b2256bd0fc3a6e1921159
|
[] |
no_license
|
cran/ROCaggregator
|
1c3e3ba71ef7a791d527f3b344b30a37c20c88ac
|
dbba3d5f06a68aa43699269e0cee22ab139e2c58
|
refs/heads/master
| 2023-07-08T02:51:09.893467
| 2021-08-10T08:10:14
| 2021-08-10T08:10:14
| 394,796,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
testthat.R
|
library(testthat)
library(mockery)
library(mockr)
library(ROCaggregator)
test_check("ROCaggregator")
|
a93db9937b2af742c1f6ba3e584aa784dcbc892b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/evolqg/examples/KrzSubspace.Rd.R
|
83e6ba1211ee3a3302319a82322128df5b594d35
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,053
|
r
|
KrzSubspace.Rd.R
|
library(evolqg)
### Name: KrzSubspace
### Title: Krzanowski common subspaces analysis
### Aliases: KrzSubspace
### ** Examples
data(dentus)
dentus.matrices = dlply(dentus, .(species), function(x) cov(x[-5]))
KrzSubspace(dentus.matrices, k = 2)
# The method in Aguirre et al. 2014 can de implemented usign this function as follows:
#Random input data with dimensions traits x traits x populations x MCMCsamples:
cov.matrices = aperm(aaply(1:10, 1, function(x) laply(RandomMatrix(6, 40,
variance = runif(6,1, 10)),
identity)),
c(3, 4, 1, 2))
library(magrittr)
Hs = alply(cov.matrices, 4, function(x) alply(x, 3)) %>% llply(function(x) KrzSubspace(x, 3)$H)
avgH = Reduce("+", Hs)/length(Hs)
avgH.vec <- eigen(avgH)$vectors
MCMC.H.val = laply(Hs, function(mat) diag(t(avgH.vec) %*% mat %*% avgH.vec))
# confidence intervals for variation in shared subspace directions
library(coda)
HPDinterval(as.mcmc(MCMC.H.val))
|
90ef8308c430a861bfd7b7bb0447867066a0d166
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/photobiology/examples/expanse.Rd.R
|
70d3cfa52b8a88dd37377d5ebc6e2fba8c598edf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
expanse.Rd.R
|
library(photobiology)
### Name: spread
### Title: Expanse
### Aliases: spread wl_expanse expanse expanse.default expanse.numeric
### expanse.waveband expanse.generic_spct expanse.generic_mspct
### ** Examples
expanse(10:20)
expanse(sun.spct)
wl_expanse(sun.spct)
expanse(sun.spct)
|
94186100629a8113c4ee41153b1782115f3ffdd6
|
f0b08a058676e4b1ce0c1436bfe022aee9989326
|
/target/res/iPatBSA.r
|
4d65c6df5d25480ba1c98419551728c21eae2dad
|
[] |
no_license
|
Poissonfish/iPat
|
1535c0bc7055a572edd8e035d6f946fdefd149f9
|
7e6c3acbb7bcc5e44385c875f6dc5c2fe312674f
|
refs/heads/master
| 2022-08-10T17:44:04.145533
| 2020-09-09T15:57:18
| 2020-09-09T15:57:18
| 59,730,270
| 17
| 2
| null | 2019-04-20T02:51:11
| 2016-05-26T07:40:57
|
Java
|
UTF-8
|
R
| false
| false
| 3,024
|
r
|
iPatBSA.r
|
# Input arguments
args = commandArgs(trailingOnly=TRUE)
GM.path = args[1]
GD.path = args[2]
W = as.numeric(args[3])
project = args[4]
wd = args[5]
lib = args[6]
arg_length = 6
# GM.path= "/Users/Poissonfish/Dropbox/MeetingSlides/iPat/Demo_data/BSA/data.map"
# GD.path= "/Users/Poissonfish/Dropbox/MeetingSlides/iPat/Demo_data/BSA/data.bsa"
# wd="/Users/Poissonfish/Desktop/test/seg"
# lib = "/Users/Poissonfish/git/iPat/libs/"
# W = 50000
# project = "project 1"
tryCatch({
# Load libraries
print("Initializing iPat")
setwd(lib)
list.of.packages <- c("data.table", "magrittr", "bigmemory", "biganalytics", "MASS", "gplots", "compiler", "scatterplot3d", "R.utils", "snpMatrix")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos="http://cran.rstudio.com/")
library(data.table)
library(magrittr)
library(bigmemory)
library(biganalytics)
library(compiler) #this library is already installed in R
library(MASS) # required for ginv
library(multtest)
library(gplots)
library(scatterplot3d)
library(R.utils)
source("./Function_GAPIT.R")
setwd(wd)
# Load Data
print("Loading data")
GM = fread(GM.path, head=TRUE); names(GM)[1] = "SNP"; setkey(GM, SNP)
GD = fread(GD.path, head=TRUE); names(GD)[1] = "SNP"; setkey(GD, SNP)
data = merge(GM, GD)
data = data[order(Chromosome, Position)]
m = nrow(data)
adjust = 1e-10
# Compute G
print("Computing G Statistics")
data[, sum := LowA + LowB + HighA + HighB]
data[, LA.hat := ((LowA+LowB)*(LowA+HighA)/sum)+adjust]
data[, LB.hat := ((LowB+LowA)*(LowB+HighB)/sum)+adjust]
data[, HA.hat := ((HighA+HighB)*(HighA+LowA)/sum)+adjust]
data[, HB.hat := ((HighB+HighA)*(HighB+LowB)/sum)+adjust]
data[, G_Stat := 2*(LowA*log((LowA/LA.hat)+adjust) +
LowB*log((LowB/LB.hat)+adjust) +
HighA*log((HighA/HA.hat)+adjust) +
HighB*log((HighB/HB.hat)+adjust))]
# Compute G'
print("Computing G-Prime Statistics")
gp = vector(mode = "numeric", length = m)
for (i in 1:m){
focal = data[i, Position]
chr = data[i, Chromosome]
subset = data[Chromosome==chr & Position < focal+(W/2) & Position > focal-(W/2)]
subset[, D := (1-(abs((focal-Position)/(W/2)))^3)^3]
Sw = sum(subset$D)
subset[,k := D/Sw]
gp[i] = t(matrix(subset$k)) %*% matrix(subset$G_Stat)
}
data[, G_Prime:= gp]
data[, P:= pchisq(G_Prime, df = 3, lower.tail = F)]
print("Plotting")
GAPIT.Manhattan(GI.MP= data.frame(Chromosom = data$Chromosome,
Position = data$Position,
P = data$P), name.of.trait = project)
GAPIT.QQ(data$P, name.of.trait = project)
write.table(x = data[,c("SNP", "Chromosome", "Position", "G_Stat", "G_Prime", "P")], file = sprintf("BSA_%s.txt", project), sep = "\t", quote = F, row.names = F)
print("Done")
print(warnings())
},error = function(e){
stop(e)
})
|
c1e686456aee208625b77e2313c98703bd1e4ad9
|
00057700ab765bec2038840bfc5462ee06c3b3c5
|
/rankhospital(correcion).R
|
4d954655b842807b3abed29e1cf81e087a3daeda
|
[] |
no_license
|
ChristianPerezCastillo/Programacion_Actuarial_III
|
e912e67a1616d5655da9117b64d133b9567d7415
|
36fb00c332a5aa652e8ffca3400c7ba714458f6f
|
refs/heads/master
| 2021-01-09T09:38:53.525422
| 2017-06-02T17:23:41
| 2017-06-02T17:23:41
| 81,164,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,398
|
r
|
rankhospital(correcion).R
|
rankhospital<- function(estado, resultado, num= "mejor"){
setwd("~/GitHub/Programacion_Actuarial_III/Caso 2")
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
y <- levels(factor(levels(factor(outcome$State)) == estado))
x <- is.na(y[2]) || as.logical(y[1])
if(x == TRUE){
stop("Estado invalido")
} |
if((resultado != "ataque") && (resultado != "neumonia") && (resultado !="falla")){
stop("Resultado inválido")
}
if(resultado == "ataque"){
Col <- 11}
if(resultado == "neumonia"){
Col <- 23}
if(resultado == "falla"){
Col <-17}
uCol <- outcome[c(2,7,Col)]
tCol <- subset(uCol, uCol$State==estado & !uCol[[3]]=="Not Available")
tColOr <- tCol[order(as.numeric(tCol[[3]]),tCol[[1]]),]
if(num == "peor"){
max <- tColOr[which.max(tColOr[[3]]),]
H <- max$Hospital.Name
H
}else if(num== "mejor"){
min <-tColOr[which.min(tColOr[[3]]),]
H <- min$Hospital.Name
H
} else {
u <- subset(outcome,outcome$State == estado)
uCol <- suppressWarnings(as.numeric(u[,Col]))
O <- u[(uCol),]
ordenar <- order(uCol,u[,2])
H <- u[ordenar,2]
HP <- H[num]
HP
}
}
rankhospital("TX", "falla", 4)
rankhospital("MD", "ataque", "peor")
rankhospital("MN", "ataque", 5000)
|
20a48cf5995dc4ce27c61fbc47b9768dc51a86a8
|
722594cdf49f51ff0af655cd34b6079bd43fdaa4
|
/R/filters.R
|
3a73426c01cefff1b4c0b219e07090e3b38b0ffa
|
[
"MIT"
] |
permissive
|
mdlincoln/fugger
|
c1b2071c8176cd48e17211c51156e5b15e017c75
|
5bdc03704a0ffd05a0e3cef008e6cf849ea01535
|
refs/heads/master
| 2020-12-25T00:50:50.267661
| 2016-04-19T20:34:19
| 2016-04-19T20:34:19
| 56,629,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
filters.R
|
#' Grayscale filter
#'
#' Run a grayscale filter on a plot
#'
#' @param plot An object or expression that evaluates as a plot.
#' @export
fug_gs <- function(p) {
raster_plot <- rasterize(p)
gs_raster <- imager::grayscale(raster_plot)
try(dev.off(), silent = TRUE)
plot(gs_raster, xaxt = "n", yaxt = "n", ann = FALSE)
invisible(gs_raster)
}
#' Black and white filter
#'
#' Run a grayscale filter on a plot
#'
#' @param plot An object or expression that evaluates as a plot.
#' @param threshold Set a threshold for black vs. white. Defaults to "auto". See \link{threshold}.
#' @export
fug_bw <- function(p, threshold = "auto") {
raster_plot <- rasterize(p)
bw_raster <- imager::threshold(imager::grayscale(raster_plot), thr = threshold)
try(dev.off(), silent = TRUE)
plot(bw_raster, xaxt = "n", yaxt = "n", ann = FALSE)
invisible(bw_raster)
}
#' Run an arbitrary function from imager
#'
#' @param p Expression that evaluates to a plot
#' @param imager_fun imager function
#' @param ... Arguments to pass to \code{imager_fun}
#'
#' @export
fug_fun <- function(p, imager_fun, ...) {
# Stop if imager_fun does not take "im" as an argument
stopifnot("im" %in% names(formals(imager_fun)))
raster_plot <- rasterize(p)
fugged_raster <- imager_fun(im = raster_plot, ...)
try(dev.off(), silent = TRUE)
plot(fugged_raster, xaxt = "n", yaxt = "n", ann = FALSE)
invisible(fugged_raster)
}
|
99f07b45868b8eea2b348ab3bf6a9c19cc4025c8
|
94eac0978570c5c33e8bbc1f588b81fae98bcca7
|
/R/useful.R
|
f60666ca710b18e34611b53c8277d289ae6b1e58
|
[
"MIT"
] |
permissive
|
fdrennan/initr
|
ca8c4ba7ecdfdcd831e3528d47ca1a80758a1f78
|
1d4b73076e33ff2edf5096d5ecd21f596adbd28f
|
refs/heads/master
| 2022-12-22T10:08:24.990650
| 2020-09-28T16:25:43
| 2020-09-28T16:25:43
| 298,953,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
useful.R
|
#' glue_message
#' @importFrom glue glue
#' @param string A string
#' @export glue_message
glue_message <- function(string, ...) message(glue(string, ...))
|
73734d94f215f02a2150bc4127a17163efe9b583
|
0cc53564d31bf69182a1c17ef188139d78519852
|
/macierz_odwrotna.R
|
ea225647a9c077bd752eae543eb4df2133a97c4b
|
[] |
no_license
|
KrzysztofKsiazek/Operacje_na_macierzach_w_R
|
efb9fc312f97b8b1ad3384b2b55900fef0f6e20d
|
bb179bd9202ecf69276ee0e97ea13c1304341fc8
|
refs/heads/master
| 2021-04-13T12:16:07.633480
| 2020-03-28T14:53:25
| 2020-03-28T14:53:25
| 249,162,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,373
|
r
|
macierz_odwrotna.R
|
funkcja <- function(A)
{
if(class(A)!='matrix')
{
print('A must to be a Matrix')
}
else
{
cat('\\documentclass[10pt,a4paper]{article}
\\usepackage[utf8]{inputenc}
\\usepackage{amsmath}
\\usepackage{amsfonts}
\\usepackage{amssymb}
\\usepackage[T1]{fontenc}
\\begin{document}
\\begin{LARGE}
\\begin{center}\\textbf{Inverse Matrix}\\\\\\end{center}
\\end{LARGE}',file='macierz_odwrotna.tex',append=TRUE)
if(dim(A)[1] != dim(A)[2])
{
write('Matrix must be square',file='macierz_odwrotna.tex',append=TRUE)
cat('\\end{document}',file='macierz_odwrotna.tex',append=TRUE)
print("Matrix muste be square")
}
else
{
write("Matrix",file='macierz_odwrotna.tex',append=TRUE)
znaki<- '$$\\begin{pmatrix}'
for(i in 1:nrow(A)){
for(n in 1:ncol(A)){
if(n<ncol(A)){
znaki<-paste(znaki,ceiling(A[i,n]*100)/100,'&')
}
if(n==ncol(A)){
znaki<-paste(znaki,ceiling(A[i,n]*100)/100,'\\\\')
}
}
}
znaki<-paste(znaki,'\\end{pmatrix}$$')
#print(znaki)
write(znaki,'macierz_odwrotna.tex',append = TRUE)
write("Inverse Matrix",file='macierz_odwrotna.tex',append=TRUE)
znaki<- '$$\\begin{pmatrix}'
for(i in 1:nrow(solve(A))){
for(n in 1:ncol(solve(A))){
if(n<ncol(solve(A))){
znaki<-paste(znaki,ceiling(solve(A)[i,n]*100)/100,'&')
}
if(n==ncol(solve(A))){
znaki<-paste(znaki,ceiling(solve(A)[i,n]*100)/100,'\\\\')
}
}
}
znaki<-paste(znaki,'\\end{pmatrix}$$')
#print(znaki)
write(znaki,'macierz_odwrotna.tex',append = TRUE)
cat('\\end{document}',file='macierz_odwrotna.tex',append=TRUE)
print("Matrix:")
print(A)
print("Inverse Matrix:")
print(solve(A))
}
}
}
#wpisz macierz!!!
A <- matrix(data=c(2,1,5,3),nrow =2,ncol=2,byrow=TRUE)
setwd("C:/Users/Krzysiek/desktop/github/macierz odwrotna")
file.create('macierz_odwrotna.tex')
funkcja(A)
file.show('macierz_odwrotna.tex')
file.remove('macierz_odwrotna.tex')
|
bcf91e1d9b07f1e58cae97a0395f376be8039818
|
62275a1dde11833b7f9148cc720875c591d0fa1d
|
/jumpin2.R
|
e0266ebb7892d5346b502a6ff16ac1a53a8c4dfe
|
[] |
no_license
|
jimhester/dashboard
|
4256cde1b94728156a4aff2b780d47131023dccf
|
11571007c517362a74b77f8c6fbb58bd621774f5
|
refs/heads/master
| 2020-03-13T14:45:12.541273
| 2018-04-26T16:25:51
| 2018-04-26T16:25:51
| 131,164,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
jumpin2.R
|
library(dashboard)
package <- c("alm","AntWeb","bmc","bold","clifro","dependencies",
"ecoengine","ecoretriever","elastic","elife","floras",
"fulltext","geonames","gistr", "jekyll-knitr","mocker",
"neotoma","plotly","rAltmetric","rAvis","rbhl","rbison",
"rcrossref","rdatacite","rdryad","rebird","rentrez","reol",
"reproducibility-guide","rfigshare","rfishbase","rfisheries",
"rflybase","rgauges","rgbif","rglobi","rhindawi",
"rinat","RMendeley","rmetadata","RNeXML","rnoaa","rnpn",
"traits","rplos","rsnps","rspringer","rvertnet","rWBclimate",
"solr","spocc","taxize","togeojson","treeBASE","ucipp","testdat",
"git2r","rdat","EML",'aRxiv','datapackage','dvn','gender','ggit',
'gigadb','historydata','ICES','mdextract','ots','paleobioDB',
'pangaear','prism','rDat','rebi','rnbn','rOBIS','rorcid',
'RSelenium','sheetseeR','USAboundaries','zenodo')
# Add the GitHub organization/user name before each page
pkgs <- add_github(package,"ropensci")
message("Now querying the GitHub API \n")
# Run the stats on all the packages
results <- lapply(pkgs,github_stats) %>% Filter(Negate(is.null),.)
# Finally generate a static html page
generate_html(results)
|
288ad3b4dff187df56f35564c3cdf30b987223f3
|
c78d8465d47ae60f6f5aa0fbb2dad8f479fa6ad7
|
/R/swaleFunctions.R
|
20302e1de0c879192e773c4bf47396e4e453166d
|
[] |
no_license
|
wdweeda/swale
|
9ca563a537c59963752e2cc2054b242de8a04f14
|
e293937273e3de7f7f75ad4196ad2dc92411bfb0
|
refs/heads/master
| 2023-04-05T07:02:10.007323
| 2023-03-20T12:18:11
| 2023-03-20T12:18:11
| 224,168,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,233
|
r
|
swaleFunctions.R
|
#############################################
# Basis function FUNCTIONS #
# simultaneous waveform and amplitude #
# Copyright(c) 2009 Wouter D. Weeda #
# University of Amsterdam #
#############################################
#CONTAINS
#estimate.f
#average.f
#split.f.window
#split.f.fix
#estimate.ab
#model
#rss
#aic
estimate.f <-
function(swaledat)
#estimate waveform (polynomial coeficients)
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
Y = .eeg.data.data(.swale.internal.eeg.data(swaledat))
TP = .basis.matrix(.swale.internal.basis(swaledat))
dTP = .basis.deriv(.swale.internal.basis(swaledat))
a = .swale.internal.amps(swaledat)
b = .swale.internal.lats(swaledat)
#estimate f
X = TP%x%a + dTP%x%b
f = solve(t(X)%*%X)%*%t(X)%*%as.vector(Y)
.swale.internal.waves(swaledat) = matrix(f,,ncol(a),byrow=T)
return(swaledat)
}
average.f <-
function(swaledat)
#calculate summed waveform based on mean amps and lats
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
TP = .basis.matrix(.swale.internal.basis(swaledat))
dTP = .basis.deriv(.swale.internal.basis(swaledat))
a = .swale.internal.amps(swaledat)
b = .swale.internal.lats(swaledat)
f = .swale.internal.waves(swaledat)
trials = nrow(.eeg.data.data(.swale.internal.eeg.data(swaledat)))
#calculate average f
fhat = matrix(0,trials,nrow(TP))
for(i in 1:ncol(a)) {
for(trial in 1:trials) {
fhat[trial,] = fhat[trial,] + ((TP%*%f[,i])*(a[trial,i])+(dTP%*%f[,i])*(b[trial,i]))
}
}
fhatmean = as.vector(apply(fhat,2,mean))
.swale.internal.waves(swaledat) = t(TP)%*%fhatmean
return(swaledat)
}
split.f.window <-
function(swaledat,window,plot=FALSE)
#cut up data into different waveforms
#checks for maxima within a window and cuts in the midpoint between maxima
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
TP = .basis.matrix(.swale.internal.basis(swaledat))
f = .swale.internal.waves(swaledat)
ft = TP%*%f
#search for maxima within window
nc = nrow(window)
mx = numeric(nc)
for(i in 1:nc) mx[i] = window[i,1] + which.max(abs(ft[window[i,1]:window[i,2]]))
#estimate cutpoints
mx = sort(mx)
cutpoints = numeric(nc-1)
for(i in 1:(nc-1)) cutpoints[i] = round((mx[i]+mx[i+1])/2)
#cut-up-nicely
fs = matrix(ft,nrow(ft),nc)
end = length(ft)
#first and last
fs[cutpoints[1]:end,1] = fs[cutpoints[1],1]
fs[1:cutpoints[length(cutpoints)],nc] = fs[cutpoints[length(cutpoints)],nc]
#cut in between
if(length(cutpoints)>1) {
for(i in 2:(nc-1)) {
fs[1:cutpoints[i-1],i] = fs[cutpoints[i-1],i]
fs[cutpoints[i]:end,i] = fs[cutpoints[i],i]
}
}
#set waves matrix
waves = matrix(NA,ncol(TP),nc)
for(i in 1:nc) waves[,i] = t(TP)%*%fs[,i]
.swale.internal.waves(swaledat) = waves
if(plot) {
sumwave = apply(fs,1,sum)
yl = c(min(cbind(fs,sumwave)),max(cbind(fs,sumwave)))
par(las=1)
plot(sumwave,type='l',lty=2,xlab='samples',ylab='mV',ylim=yl,bty='n',col=gray(.5),lwd=3)
lines(apply(.eeg.data.data(.swale.internal.eeg.data(swaledat)),2,mean),col=gray(.2),lwd=5)
for(i in 1:ncol(fs)) lines(fs[,i],lwd=3,lty=1,col=i)
}
return(swaledat)
}
split.f.one <-
function(swaledat,window)
#cut up data into different waveforms
#checks for maxima within a window and cuts in the midpoint between maxima
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
TP = .basis.matrix(.swale.internal.basis(swaledat))
f = .swale.internal.waves(swaledat)
ft = TP%*%f
#search for maxima within window
nc = nrow(window)
mx = numeric(nc)
for(i in 1:nc) mx[i] = window[i,1] + which.max(abs(ft[window[i,1]:window[i,2]]))
#estimate cutpoints
mx = sort(mx)
cutpoints = numeric(nc-1)
for(i in 1:(nc-1)) cutpoints[i] = round((mx[i]+mx[i+1])/2)
return(cutpoints)
}
split.f.fix <-
function(swaledat,cutpoints)
#cut up data into different waveforms
#checks for maxima within a window and cuts in the midpoint between maxima
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
TP = .basis.matrix(.swale.internal.basis(swaledat))
f = .swale.internal.waves(swaledat)
ft = TP%*%f
nc = length(cutpoints)+1
#cut-up-nicely
fs = matrix(ft,nrow(ft),nc)
end = length(ft)
#first and last
fs[cutpoints[1]:end,1] = fs[cutpoints[1],1]
fs[1:cutpoints[length(cutpoints)],nc] = fs[cutpoints[length(cutpoints)],nc]
#cut in between
if(length(cutpoints)>1) {
for(i in 2:(nc-1)) {
fs[1:cutpoints[i-1],i] = fs[cutpoints[i-1],i]
fs[cutpoints[i]:end,i] = fs[cutpoints[i],i]
}
}
#set waves matrix
waves = matrix(NA,ncol(TP),nc)
for(i in 1:nc) waves[,i] = t(TP)%*%fs[,i]
.swale.internal.waves(swaledat) = waves
return(swaledat)
}
estimate.ab <-
function(swaledat)
#estimate amplitude and latency
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
Y = .eeg.data.data(.swale.internal.eeg.data(swaledat))
TP = .basis.matrix(.swale.internal.basis(swaledat))
dTP = .basis.deriv(.swale.internal.basis(swaledat))
f = .swale.internal.waves(swaledat)
I = diag(nrow(Y))
#estimate ab
X = cbind( I%x%(TP%*%f) , I%x%(dTP%*%f) )
ab = as.vector(t(Y))%*%X%*%solve(t(X)%*%X)
#rearrange vectors to matrices
nf = ncol(f)
ab1 = matrix(ab,,nf,byrow=T)
.swale.internal.amps(swaledat) = matrix(ab1[1:nrow(Y),],,nf)
.swale.internal.lats(swaledat) = matrix(ab1[(nrow(Y)+1):nrow(ab1),],,nf)
#scale amplitude to mean 1
ma = apply(.swale.internal.amps(swaledat),2,mean)
for(i in 1:ncol(.swale.internal.amps(swaledat))) .swale.internal.amps(swaledat)[,i] = .swale.internal.amps(swaledat)[,i] / ma[i]
return(swaledat)
}
model <-
function(swaledat)
#calculate model estimates
{
if(class(swaledat)!='swale.internal') stop('Input must be of class \'swale.internal\'')
#set estimation objects
TP = .basis.matrix(.swale.internal.basis(swaledat))
dTP = .basis.deriv(.swale.internal.basis(swaledat))
a = .swale.internal.amps(swaledat)
b = .swale.internal.lats(swaledat)
f = .swale.internal.waves(swaledat)
#calculate model estimates
Y = (TP%x%a + dTP%x%b)%*%as.vector(t(f))
.swale.internal.model(swaledat) = matrix(Y,nrow(a),nrow(TP),byrow=F)
return(swaledat)
}
rss <-
function(swaledat)
#calculate rss
{
if(class(swaledat)=='swale.solution') swaledat = .swale.solution.internal(swaledat) else if(class(swaledat)!='swale.internal') stop('input must be of class internal or solution')
#set estimation objects
Y = .eeg.data.data(.swale.internal.eeg.data(swaledat))
yhat = .swale.internal.model(swaledat)
#calculate rss (innerproduct of residuals)
.swale.internal.rss(swaledat) = as.vector(t(as.vector(Y)-as.vector(yhat))%*%((as.vector(Y)-as.vector(yhat))))
return(swaledat)
}
aic <-
function(swaledat,correct=F,adjustWave=1,adjustAmp=1,adjustLat=1,adjustN=0)
#calculate modelfit using AIC
{
if(class(swaledat)=='swale.solution') swaledat = .swale.solution.internal(swaledat) else if(class(swaledat)!='swale.internal') stop('input must be of class internal or solution')
k = (.basis.num.funcs(.swale.internal.basis(swaledat))*ncol(.swale.internal.waves(swaledat))*adjustWave)+length(as.vector(.swale.internal.amps(swaledat)))/adjustAmp+length(as.vector(.swale.internal.lats(swaledat)))/adjustLat
n = length(as.vector(.eeg.data.data(.swale.internal.eeg.data(swaledat))))-adjustN
rss = .swale.internal.rss(swaledat)
aicvalue = try( 2*k + n*(log(rss/n)) )
if(correct) aicvalue = aicvalue + ( (2*k*(k+1)) / (n-k-1) )
return(aicvalue)
}
bic <-
function(swaledat)
#calculate modelfit using BIC
{
if(class(swaledat)=='swale.solution') swaledat = .swale.solution.internal(swaledat) else if(class(swaledat)!='swale.internal') stop('input must be of class internal or solution')
k = .basis.num.funcs(.swale.internal.basis(swaledat))*ncol(.swale.internal.waves(swaledat))+length(as.vector(.swale.internal.amps(swaledat)))+length(as.vector(.swale.internal.lats(swaledat)))
n = length(as.vector(.eeg.data.data(.swale.internal.eeg.data(swaledat))))
resids = as.vector(.eeg.data.data(.swale.internal.eeg.data(swaledat)) - .swale.internal.model(swaledat))
rss = .swale.internal.rss(swaledat)
#bicvalue = try( n*(log(var(resids)/n)) + k*log(n) )
bicvalue = try( n*(log(rss/n)) - k*log(n) )
return(bicvalue)
}
Rsq <-
function(swaledat)
#calculate Rsquared value for a model
{
if(class(swaledat)=='swale.solution') swaledat = .swale.solution.internal(swaledat) else if(class(swaledat)!='swale.internal') stop('input must be of class internal or solution')
#set estimation objects
Y = .eeg.data.data(.swale.internal.eeg.data(swaledat))
yhat = .swale.internal.model(swaledat)
#calculate rss (innerproduct of residuals)
SSres = as.vector(t(as.vector(Y)-as.vector(yhat))%*%((as.vector(Y)-as.vector(yhat))))
SStot = as.vector(t(as.vector(Y))%*%((as.vector(Y))))
Rsq = 1 - (SSres/SStot)
return(Rsq)
}
swalecor <-
function(swalesol)
#calculate correlations between amplitude and latency parameters in a solution
{
ncors = ncol(.swale.solution.waveform(swalesol))
ampcor = ampcor.p = latcor = latcor.p = matrix(NA,ncors,ncors)
valid = which(.swale.solution.discard(swalesol)!=1)
if(length(valid)<2) {warning('Not enough valid trials!')} else {
for(row in 1:ncors) {
for(col in 1:ncors) {
ampcor[row,col] = cor.test(.swale.solution.amplitude(swalesol)[valid,row],.swale.solution.amplitude(swalesol)[valid,col])$estimate
ampcor.p[row,col] = cor.test(.swale.solution.amplitude(swalesol)[valid,row],.swale.solution.amplitude(swalesol)[valid,col])$p.value
latcor[row,col] = cor.test(.swale.solution.latency(swalesol)[valid,row],.swale.solution.latency(swalesol)[valid,col])$estimate
latcor.p[row,col] = cor.test(.swale.solution.latency(swalesol)[valid,row],.swale.solution.latency(swalesol)[valid,col])$p.value
}
}
}
return(list(amplitude=list(estimate=ampcor,p.value=ampcor.p),latency=list(estimate=latcor,p.value=latcor.p)))
}
|
699f696b1304a4464d91dd0edcdcfa8b2d2c73d6
|
156bf3896664187be3e56b33f908a0d865f01b7d
|
/chigcrim/man/community_bounds.Rd
|
b5cbe239578295f20af37180cc61f676119fc975
|
[] |
no_license
|
shannon-wms/chicago-crime
|
76d317be53ea491266cf66c804b4c0e8a58f0edc
|
5c5bad65df41385f3a481d9b95654ed29fdf8bb6
|
refs/heads/main
| 2023-05-01T19:45:00.856379
| 2021-01-25T14:30:17
| 2021-01-25T14:30:17
| 316,237,665
| 0
| 1
| null | 2021-05-24T16:17:18
| 2020-11-26T13:21:57
|
R
|
UTF-8
|
R
| false
| true
| 591
|
rd
|
community_bounds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{community_bounds}
\alias{community_bounds}
\title{Community area boundaries
City of Chicago community area boundaries.}
\format{
An object of class \code{sf} (inherits from \code{data.frame}) with 77 rows and 10 columns.
}
\usage{
community_bounds
}
\description{
Community area boundaries
City of Chicago community area boundaries.
}
\references{
\url{https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6}
}
\keyword{datasets}
|
ddab9e2c3ba4b7ab55b20494cfc2ee52bf693298
|
b040a32bdf134e214179481ac5ef4a0de6f5aa40
|
/11_Stargazer/tabular.R
|
a9e673d31b6e1d856cfaaee621b7e526f1bf5b60
|
[] |
no_license
|
khbae/r-project-lecture
|
1de3bd032264dd9a59888dc1aba6451b82695efc
|
eb62c2ff90a80473c487e3ef808d912471a96e4a
|
refs/heads/main
| 2023-03-27T14:18:06.283030
| 2021-03-09T13:47:45
| 2021-03-09T13:47:45
| 320,273,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
tabular.R
|
library(dplyr)
library(stargazer)
out = stargazer(iris)
is(out)
tabular <- function(stargazer.out,file.name){
out = head(stargazer.out[-1:-6],-1)
cat(paste(out, collapse="\n"),sep="\n",file = file.name)
}
iris %>%
stargazer(.) %>%
tabular(.,"fn.tex")
|
d59001cc674d2412d87e4fd4ed72fb2718f06127
|
e91c5f8da9291cb2dfb9436bd9934054df6940cf
|
/Today_Flat_Systems.R
|
675a38c38045f4f2f50976b09010d06d108a060c
|
[] |
no_license
|
melgj/UKHR
|
671ee8307f2146f44c753aef8ade03eda68e4021
|
5653ff75aeddc6657503bfe171b9e6a3ce493b0a
|
refs/heads/master
| 2020-03-27T14:34:17.238033
| 2019-04-29T18:38:28
| 2019-04-29T18:38:28
| 105,633,141
| 0
| 0
| null | 2017-10-03T09:27:40
| 2017-10-03T09:12:01
| null |
UTF-8
|
R
| false
| false
| 4,179
|
r
|
Today_Flat_Systems.R
|
# filter Flat turf races only
ukFlat <- filter(ukhr_master_BF, RaceType == "FLAT")
ukFlat$Dist_Range <- as.factor(ifelse(ukFlat$Furlongs < 8, "Sprint",
ifelse(ukFlat$Furlongs < 14,"Middle", "Long")))
ukFlat$Dist_Range_Split <- as.factor(ifelse(ukFlat$Furlongs < 8, "Sprint",
ifelse(ukFlat$Furlongs < 14,"Middle", "Long")))
#remove jump specific fields
ukFlat <- select(ukFlat, -c(ChaseJumpingAbility, HunterChase, Beginner))
irishMeetings <- c("BALLINROBE ", "BELLEWSTOWN", "CLONMEL", "CORK", "CURRAGH", "DOWN ROYAL", "DOWN ROYAL", "FAIRYHOUSE ",
"GALWAY", "GOWRAN PARK", "KILLARNEY", "LEOPARDSTOWN", "LIMERICK", "LISTOWEL", "NAAS", "PUNCHESTOWN",
"ROSCOMMON", "SLIGO", "TIPPERARY", "TRAMORE", "WEXFORD")
##################################################################################
todayFlat <- filter(today, RaceType == "FLAT")
todayFlat$Dist_Range <- as.factor(ifelse(todayFlat$Furlongs < 8, "Sprint", "Route"))
today$Dist_Range_Split <- as.factor(ifelse(today$Furlongs < 8, "Sprint",
ifelse(today$Furlongs < 14,"Middle", "Long")))
today$Dist_Range <- as.factor(ifelse(today$Furlongs < 8, "Sprint", "Route"))
trainersFlatUK <- read_csv("Flat_TFC.csv", col_names = T)
trQuals <- trainersFlatUK%>%
left_join(todayFlat, by = c("Trainer","Meeting","RaceType"))%>%
filter(!is.na(Time24Hour)) %>%
arrange(Time24Hour, Meeting, Horse, Rating_Rank)
trQuals
if(nrow(trQuals) > 0) {
trQuals$System_Name <- "Flat_TFC"
}
softSiresFlat <- read_csv("Soft_Ground_Sires_Flat.csv", col_names = T)
todaySoftSiresFlatQ <- softSiresFlat %>%
left_join(todayFlat, by = c("Sire", "Dist_Range")) %>%
arrange(Time24Hour, Meeting, Horse) %>%
filter(Going %in% softGround, NumberOfResults >= 1, !is.na(Time24Hour), RaceType == "FLAT")
todaySoftSiresFlatQ
if(nrow(todaySoftSiresFlatQ) > 0) {
todaySoftSiresFlatQ$System_Name <- "Soft_Ground_Flat_Sires"
}
trJkComboFlat <- read_csv("TrainerJockeyFlatCombo.csv", col_names = T)
trJkComboFlatQ <- trJkComboFlat%>%
left_join(todayFlat, by = c("Trainer","Jockey")) %>%
filter(RaceType == "FLAT", !is.na(Time24Hour)) %>%
arrange(Time24Hour, Meeting, Horse)
trJkComboFlatQ
if(nrow(trJkComboFlatQ) > 0) {
trJkComboFlatQ$System_Name <- "Flat_TJ_Combo"
}
flat.TR.BW <- read_csv("TopRatedSoftGroundLowWeights.csv", col_names = T)
todayBWHQuals <- flat.TR.BW %>%
left_join(today, by = c("Rev_Weight_Rank")) %>%
filter(Going %in% softGround, Handicap == "HANDICAP", RaceType == "FLAT", Rating_Rank <= 3, !is.na(Time24Hour),
!(Meeting %in% irishMeetings)) %>%
arrange(Time24Hour, Meeting, Horse)
todayBWHQuals
if(nrow(todayBWHQuals) > 0) {
todayBWHQuals$System_Name <- "Flat_Soft_Ground_Hcp_Bottom_Weights"
}
flatTrDr <- read_csv("FlatTrainerDistanceRange.csv", col_names = T)
todayTrDrQuals <- flatTrDr %>%
left_join(today, by = c("Trainer", "Dist_Range_Split", "Handicap")) %>%
filter(Age <= 5, RaceType == "FLAT", !is.na(Time24Hour)) %>%
arrange(Time24Hour, Meeting, Horse)
# todayTrDrQuals <- select(todayTrDrQ ,Time24Hour, Meeting, Horse, Runs, meanPL, totalPL, AE_Ratio, WinPercent, Winners,
# Exp_Wins, Trainer, Handicap, Dist_Range, Ratings_Range, Archie, BetFairSPForecastWinPrice, ValueOdds_BetfairFormat,
# Rating_Rank)
todayTrDrQuals
if(nrow(todayTrDrQuals) > 0) {
todayTrDrQuals$System_Name <- "Flat_Trainer_Hcp_Distance_Range"
}
sireGngDist <- read_csv("SireGoingDistanceRange.csv", col_names = T)
todaySireGngDistQ <- sireGngDist %>%
left_join(today, by = c("Sire", "Dist_Range", "Going_Range")) %>%
filter(RaceType == "FLAT", !is.na(Time24Hour), NumberOfResults >= 1) %>%
arrange(Time24Hour, Meeting, Horse)
todaySireGngDistQ
if(nrow(todaySireGngDistQ) > 0) {
todaySireGngDistQ$System_Name <- "Flat_Sires_Going_Distance_Range"
}
allFlatQuals <- trQuals %>%
full_join(todaySoftSiresFlatQ) %>%
full_join(trJkComboFlatQ) %>%
full_join(todayBWHQuals) %>%
full_join(todayTrDrQuals) %>%
full_join(todaySireGngDistQ)
allFlatQuals
|
b2a2726abd95150ed53a3260cb1ab22f1c9902cf
|
c3105f7d7a422ba31d5d3db002db0c1c71c7fbf6
|
/finalCode.R
|
f3e7e3f0774496cc34bd0bb5cd9f2b2729891caf
|
[] |
no_license
|
hjgoldma/Math189HW3
|
c033687c200fa6003ca879545ea27f0bbcb9277c
|
692a2befaeca5723abee216e589f996814308e50
|
refs/heads/master
| 2021-01-18T05:43:14.737489
| 2017-03-08T06:34:38
| 2017-03-08T06:34:38
| 84,281,787
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 7,974
|
r
|
finalCode.R
|
# Filename: finalCode
#Authors: Himanshu Makhija, Hanna Goldman, Amey Paranjape, Shagun Gupta
#Class: MATH 189 -- Winter, 2017
#Date: 3/7/2017
data <- read.table("gauge.txt", header=TRUE)
gain <- data$gain
density <- data$density
plot(x=gain, y=density)
# Least Squares Fit
fit <- lm(density~gain, data)
fit
abline(fit,col="red")
plot(fit$residuals)
abline(0, 0, col="red")
hist(fit$residuals)
qqnorm(fit$residuals)
qqline(fit$residuals, col="red")
# Quadratic Fit
fit.2 <- lm(density~gain+I(gain^2),data)
fit.2
# Log Transform
logG <- log(gain)
fit.3 <- lm(density~logG, data)
fit.3
plot(fit.3$residuals)
abline(0, 0, col="red")
plot(logG,density)
abline(fit.3,col="red")
qqnorm(fit.3$residuals)
qqline(fit.3$residuals, col="red")
#R^2 Check
summary(fit)
summary(fit.2)
summary(fit.3)
#Quantile Regression
quantile_loss <- function(x, tau) {
ind <- 1*(x < 0)
return (x*(tau - ind))
}
x<- 1.2980 - 0.2162*(logG)
plot(x, quantile_loss(x, 0.2), type = "l", lty = 1, col = 1, ylab = "quantile loss")
lines(x, quantile_loss(x, 0.4), lty = 2, col = 2)
lines(x, quantile_loss(x, 0.5), lty = 3, col = 3, lwd = 5)
lines(x, quantile_loss(x, 0.6), lty = 4, col = 4)
lines(x, quantile_loss(x, 0.8), lty = 5, col = 5)
legend(x = 0.6, y = 0.10, legend = c(expression(paste(rho, "=", 0.2)),
expression(paste(rho, "=", 0.4)),
expression(paste(rho, "=", 0.5)),
expression(paste(rho, "=", 0.6)),
expression(paste(rho, "=", 0.8))),
lty = c(1,2,3,4,5), lwd = c(1,1,5,1,1), col = c(1,2,3,4,5), cex=0.5)
quantile(x, probs = seq(0.1, 0.9, by = 0.2))
#Confidence Interval
qt(.95, df=88)
#Removing Outliers
#part3
points = data[data$density==0.508,]
dataout1 <- data[data$density != 0.508,]
dataout2 <- data[data$density != 0.001,]
pointx = data[data$density == 0.001,]
#part 3.1
gain <- dataout1$gain
density <- dataout1$density
plot(x=gain, y=density)
# Least Squares Fit
fito1 <- lm(density~gain, dataout1)
fito1
abline(fito1,col="red")
plot(fit$residuals)
abline(0, 0, col="red")
hist(fito1$residuals)
qqnorm(fito1$residuals)
qqline(fito1$residuals, col="red")
# Quadratic Fit
fito1.2 <- lm(density~gain+I(gain^2),dataout1)
fito1.2
# Log Transform
logG <- log(gain)
fito1.3 <- lm(density~logG, dataout1)
fito1.3
plot(fito1.3$residuals)
abline(0, 0, col="red")
plot(logG,density)
abline(fito1.3,col="red")
qqnorm(fito1.3$residuals)
qqline(fito1.3$residuals, col="red")
summary(fito1)
summary(fito1.2)
#Quantile Regression
quantile_loss <- function(x, tau) {
ind <- 1*(x < 0)
return (x*(tau - ind))
}
x<- 1.2980 - 0.2162*(logG)
plot(x, quantile_loss(x, 0.2), type = "l", lty = 1, col = 1, ylab = "quantile loss")
lines(x, quantile_loss(x, 0.4), lty = 2, col = 2)
lines(x, quantile_loss(x, 0.5), lty = 3, col = 3, lwd = 5)
lines(x, quantile_loss(x, 0.6), lty = 4, col = 4)
lines(x, quantile_loss(x, 0.8)í ½, lty = 5, col = 5)
#part 3.2
gain <- dataout2$gain
density <- dataout2$density
plot(x=gain, y=density)
# Least Squares Fit
fito2 <- lm(density~gain, dataout2)
fito2
abline(fito2,col="red")
plot(fito2$residuals)
abline(0, 0, col="red")
hist(fito2$residuals)
qqnorm(fito2$residuals)
qqline(fito2$residuals, col="red")
# Quadratic Fit
fito2.2 <- lm(density~gain+I(gain^2),dataout1)
fito2.2
# Log Transform
logG <- log(gain)
fito2.3 <- lm(density~logG, dataout1)
fito2.3
plot(fito2.3$residuals)
abline(0, 0, col="red")
plot(logG,density)
abline(fito2.3,col="red")
qqnorm(fito2.3$residuals)
qqline(fito2.3$residuals, col="red")
summary(fito2)
summary(fito2.2)
#Quantile Regression
quantile_loss <- function(x, tau) {
ind <- 1*(x < 0)
return (x*(tau - ind))
}
x<- 1.2980 - 0.2162*(logG)
plot(x, quantile_loss(x, 0.2), type = "l", lty = 1, col = 1, ylab = "quantile loss")
lines(x, quantile_loss(x, 0.4), lty = 2, col = 2)
lines(x, quantile_loss(x, 0.5), lty = 3, col = 3, lwd = 5)
lines(x, quantile_loss(x, 0.6), lty = 4, col = 4)
lines(x, quantile_loss(x, 0.8), lty = 5, col = 5)
####Read dataset again from txt file
dat <- read.table("gauge.txt",header = T)
summary(dat)
# Loading dataset
gauge <- read.csv("gauge.txt", sep="")
head(gauge)
density <- gauge$density
gain <- gauge$gain
Y<-cbind(density)
X<-cbind(gain)
corr<-cor(Y,X)
# 9 Unique values of density for which observations have been recorded
obs.density <- unique(gauge$density)
# gain observations for each unique density stored as a row vector
obs.gain <- matrix(nrow = length(obs.density),ncol = nrow(gauge)/length(obs.density))
for (i in 1:length(obs.density)){
obs.gain[i,] <- gauge$gain[which(gauge$density==unique(gauge$density)[i])]
}
# summary statistics for gain observations of each unique density
obs.mean <- numeric(length(obs.density))
obs.sd <- numeric(length(obs.density))
for (i in 1:length(obs.density)){
obs.mean[i] <- mean(obs.gain[i,])
obs.sd[i] <- sd(obs.gain[i,])
}
print(matrix(c(obs.density,obs.mean,obs.sd),nrow = 9,ncol=3,dimnames = list(1:9,c("Density","Mean(gain)"," Std.dev(gain)"))))
str(gauge)
library(ggplot2)
ggplot(gauge,aes(x = density, y=gain))+geom_point(col="blue",size=2)+labs(y="Gain",x="Density",title="Scatter plot of Observed Data (90 observations)")+geom_smooth(mapping = aes(x = density, y=gain),col='black',se=FALSE)+geom_smooth(method = "lm", se = FALSE,col='red')
##checking whether coeff is significant or not
model1=lm(density~gain,data=gauge)
plot(model1)
summary(model1)
confint(model1, level=0.95)
#scatterplot of log-transformed data
ggplot(gauge,aes(x = density, y=log(gain)))+geom_point(col="blue",size=2)+labs(y="Log(Gain)",x="Density",title="Scatter plot of Log-Transformed Observed Data (90 observations)")+geom_smooth(mapping = aes(x = density, y=log(gain)),col='black',se= FALSE)
ggplot(gauge,aes(x = log(density), y=gain))+geom_point(col="blue",size=2)+labs(y="Gain",x="Log(Density)",title="Scatter plot of Log-Transformed Observed Data (90 observations)")+geom_smooth(mapping = aes(x = log(density), y=gain),col='black',se= FALSE)
#linear regression for log-linear transformed data
ggplot(gauge,aes(x = density, y=log(gain)))+geom_point(col="blue",size=2)+labs(y="Log(Gain)",x="Density",title="Scatter plot of Log-Transformed Observed Data (90 observations)")+geom_smooth(method = "lm", se = FALSE,col='red')+geom_smooth(mapping = aes(x = density, y=log(gain)),col='black',se= FALSE)
#residuals for log-linear transformed data & Goodness of fit
model2=lm(density~log(gain))
plot(model2)
##checking whether coeff is significant or not
summary(model2)
confint(model2, level=0.95)
#correlation of log-linear model
fit.log <- lm(density~I(log(gain)) )
fit.linear <- lm( density~I(gain) )
install.packages("ellipse")
library(ellipse)
plotcorr(cor(gauge))
print("The correlation matrix of the given data is: ")
cor(gauge)
#residuals
res.linear <- as.numeric(residuals(fit.linear))
res.log <- as.numeric(residuals(fit.log))
# To check if residuals are nearly normal
a<-fortify(fit.linear)
sres.linear <- as.numeric(a$.stdresid)
#linear regression
ggplot(b, aes(sres.linear))+geom_histogram(binwidth = diff(range(sres.linear))/8)+labs(x="Standardized Residuals of linear Fit Model",y="Counts",title="Histogram of Residuals of linear Fit Model")+geom_smooth(aes(y=45*dnorm(sres.linear,mean=mean(sres.linear),sd=sd(sres.linear))),se = FALSE)
#log-linear regression
b<-fortify(fit.log)
sres.log <- as.numeric(b$.stdresid)
ggplot(b, aes(sres.log))+geom_histogram(binwidth = diff(range(sres.log))/8)+labs(x="Standardized Residuals of Log-linear Fit Model",y="Counts",title="Histogram of Residuals of Log-linear Fit Model")+geom_smooth(aes(y=45*dnorm(sres.log,mean=mean(sres.log),sd=sd(sres.log))),se = FALSE)
|
f2b69a59df8cb50cd6a1c2532ad62a4558d6139d
|
cd4a2798eed1c32779c205aedefbfca78d54b752
|
/R/g2l.proc.R
|
c4e0c24953c5be14378e8929002a8b81fd6f2259
|
[] |
no_license
|
cran/LPRelevance
|
2d0a2c7deb6e5bef991d5eca08a6aad80192786f
|
1e3fb4acc10d04bd07e6897c466b773440e81597
|
refs/heads/master
| 2022-06-03T14:48:47.471853
| 2022-05-18T05:20:02
| 2022-05-18T05:20:02
| 206,745,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,971
|
r
|
g2l.proc.R
|
g2l.proc <-
function(X,z,X.target=NULL,z.target=NULL,m=c(4,6),alpha=.1,nbag=NULL,nsample=length(z),
lp.reg.method='lm', null.scale='QQ',
approx.method="direct",ngrid=2000,centering=TRUE, coef.smooth='BIC',
fdr.method='locfdr', plot=TRUE, rel.null='custom',
locfdr.df=10,fdr.th.fixed=NULL,parallel=FALSE,...){
extraparms<-list(...)
if(is.null(extraparms$k) & lp.reg.method=='knn'){
extraparms$k<-sqrt(length(z))
}
X<-as.matrix(X)
if(ncol(X)>1){plot.macro=FALSE}else{plot.macro=TRUE}
tasks='macro'
if(!is.null(X.target)){
x0=matrix(X.target,ncol=ncol(X))
z0=z.target
ntarget<-length(z0)
tasks='micro'
}
out<-list()
##for ggplot variables, avoiding CRAN check notes:
x<-tags<-score<-value<-variable<-ystart<-yend<-NULL
##determine whether use g2l.infer.boot
iter.flag=0
if(!is.null(nbag)){if(nbag>=2){iter.flag=1}}
for(tid in 1:length(tasks)){
#print(tid)
t_name<-tasks[tid]
if(t_name=='macro'){
####macro.ensemble results####
if(iter.flag==0){
g2l_res<-g2l.infer(X,z,m=m,X.test=NULL,alpha=alpha,nsample=length(z), lp.reg.method=lp.reg.method,
fdr.curve.approx=approx.method,null.scale=null.scale,
ngrid=ngrid,centering=centering,fdr.method=fdr.method, locfdr.df=locfdr.df,coef.smooth=coef.smooth,
fdr.th.fixed=fdr.th.fixed,rel.null=rel.null,parallel=parallel,k=extraparms$k)
}else{
g2l_res<-g2l.infer.boot(X,z,m=m,X.test=NULL,alpha=alpha,B=nbag,nsample=length(z),
lp.reg.method=lp.reg.method,fdr.curve.approx=approx.method,null.scale=null.scale,
ngrid=ngrid,centering=centering, locfdr.df=locfdr.df,coef.smooth=coef.smooth,
fdr.th.fixed=fdr.th.fixed,rel.null=rel.null,parallel=parallel,k=extraparms$k)
}
macro.out<-list()
macro.out$result<-g2l_res$fdr.z
macro.out$result$signal<-g2l_res$data$rej
if(plot==TRUE & plot.macro==TRUE){
nrej=sum(macro.out$result$signal)
if(fdr.method=='locfdr'){
d0<-data.frame(x=X,z=z,tags=factor(macro.out$result$signal,levels=c(0,1),ordered=TRUE),score=-log10(macro.out$result$prob_null))
}else{
d0<-data.frame(x=X,z=z,tags=factor(macro.out$result$signal,levels=c(0,1),ordered=TRUE))
}
p_dscv<-ggplot2::ggplot(data=d0,aes(x=x,y=z,color=tags))+geom_point(size=.8)+
ylab('score')+xlab('Covariate X')+ggtitle(paste0('Discoveries: ', nrej))+
scale_color_manual(values=c('lightblue','red'))+
theme(text=element_text(size=13),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14),
legend.position="none",
legend.title=element_blank())
macro.out$plots<-list(signal_x=p_dscv)
if(fdr.method=='locfdr'){
p<-ggplot2::ggplot(data=d0,aes(x=x,y=z,color=score))+geom_point(size=.8)+scale_color_gradient2(low=rgb(0.4,0.4,0.4,.4),mid='red',high='yellow',midpoint=3)+
ylab('Z-values')+xlab('Covariate X')+ggtitle('Discovery Propensity Scores by Sample')+
theme(text=element_text(size=13),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14),
legend.position="right",
legend.title=element_blank())
p1<-ggplot2::ggplot(data=d0,aes(x=x,y=score,color=score))+geom_point(size=.8)+scale_color_gradient2(low=rgb(0.4,0.4,0.4,.4),mid='red',high='yellow',midpoint=3)+
ylab('score')+xlab('Covariate X')+ggtitle('Discovery Propensity Scores by Covariate')+
theme(text=element_text(size=13),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14),
legend.position="none",
legend.title=element_blank())
macro.out$plots$dps_xz<-p
macro.out$plots$dps_x<-p1
}
}
out[[tid]]<-macro.out
names(out)[tid]<-'macro'
}else if(t_name=='micro'&!is.null(X.target)){
####individual inference results####
if(iter.flag==0){
g2l_res<-g2l.infer(X,z,m=m,X.test=x0,alpha=alpha,nsample=length(z),
lp.reg.method=lp.reg.method,fdr.curve.approx=approx.method, null.scale=null.scale,
ngrid=ngrid,centering=centering,fdr.method=fdr.method, locfdr.df=locfdr.df,
fdr.th.fixed=fdr.th.fixed,rel.null=rel.null,parallel=parallel,k=extraparms$k)
}else{
g2l_res<-g2l.infer.boot(X,z,m=m,X.test=x0,alpha=alpha,nsample=length(z),
lp.reg.method=lp.reg.method,B=nbag, fdr.curve.approx=approx.method,
null.scale=null.scale,
ngrid=ngrid,centering=centering,locfdr.df=locfdr.df,
fdr.th.fixed=fdr.th.fixed,rel.null=rel.null,parallel=parallel,k=extraparms$k)
}
w <- locfdr(z,bre=200,df=10,nulltype=1,plot=0)
fdrpool<-approxfun(z,w$fdr,rule=2,method='linear')
micro.out<-list()
micro.out$result<-data.frame(x=x0,z=z0,prob.null.est=NA,prob.null.sd=NA,signal=NA)
micro.out$global<-data.frame(x=x0,z=z0,prob.null.est=fdrpool(z0))
micro.out$result$signal=rep(0,ntarget)
for(i in 1:ntarget){
if(iter.flag==0){
micro.out$result$prob.null.est[i]=getNullProb(g2l_res,x0[i,],z0[i])
}else{
micro.out$result[i,c('prob.null.est','prob.null.sd')]=getNullProb(g2l_res,x0[i,],z0[i])
}
micro.out$result$signal[i]=(micro.out$result$prob.null.est[i]<g2l_res$lpfdr.th$ti[i])
}
if(plot==TRUE){
plots<-list()
for(i in 1:ntarget){
zgrid<-g2l_res$z.grid[i,]
if(iter.flag==0){
fdr.c<-g2l_res$lpfdr[i,]
}else{
fdr.c<-apply(g2l_res$lpfdr[i,,],2,FUN=mean)
}
Dat.fdr<-data.frame(zgrid=zgrid,Global=fdrpool(zgrid),Customized=fdr.c)
d0<-reshape2::melt(Dat.fdr,id.vars='zgrid')
z.ind<-which(apply(X,1,function(x) all(x==x0[i,])))
if(length(z.ind)>0){
segticks<-data.frame(x=z[z.ind],ystart=-.08,yend=-.03)
}else{
segticks<-NULL
}
if(ncol(x0)>1){
y.label=paste0('Pr(Null|z,x=[',paste(x0[i,],collapse=','),'])')
}else{
y.label=paste0('Pr(Null|z,x=',x0[i,],')')
}
p_fdr<-ggplot2::ggplot(data=d0,aes(x=zgrid,y=value))+geom_line(size=.8,aes(linetype =variable,color=variable))+
ylab(y.label)+xlab('Z')+ggtitle('')+
scale_color_manual(breaks=c('Global','Customized'),values=c('blue','red'))+
scale_linetype_manual(breaks=c('Global','Customized'),values=c(2,1))+
scale_y_continuous(breaks=c(0,.25,.5,.75,1),limits=c(-.1,1.2))+
geom_hline(yintercept=0)+geom_vline(xintercept=0)+geom_hline(yintercept=0.2,color='darkred',linetype=3)+
theme(text=element_text(size=13),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(size=16),
axis.text.y = element_text(size=14),
legend.position="right",
legend.title=element_blank())
if(!is.null(segticks)){
p_fdr<-p_fdr+ggplot2::geom_segment(data=segticks,aes(x=x,y=ystart,xend=x,yend=yend),color='lightblue')
}
plots[[i]]<-p_fdr
names(plots)[i]<-paste0('fdr.',i)
}
micro.out$plots<-plots
}
out[[tid]]<-micro.out
names(out)[tid]<-'micro'
}
}
m.lp<-g2l_res$lp.m
out$m.lp=m.lp
return(out)
}
|
63042a07c922ed0531d45543c05881eeae556be5
|
46c1cdb91955b383ea6176d108f2d67fd0cd5a8d
|
/ISCAM2/R/iscambinom.R
|
53268a67baadce4d12174c185b5ed65ac4f1ed7c
|
[] |
no_license
|
shannonpileggi/SP--Pablo--RProgramming
|
0f1a6621847d013e0c445059894e5bad5a940086
|
ad60dc509b077dd9657f7e97f587809251e19c18
|
refs/heads/master
| 2018-10-22T19:21:27.375766
| 2018-07-21T07:01:43
| 2018-07-21T07:01:43
| 114,397,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
iscambinom.R
|
#' iscambinom Function
#'
#' This function calculates either a tail probability or the inverse cumulative
#' probability depending on whether k or prob is passed a question mark.
#' @param k number of successes
#' @param prob probability of interest
#' @param n number of trials (zero or more)
#' @param pi probability of success on each trial
#' @param lower.tail logical; if TRUE, probabilities are P[X <= k], otherwise, P[X > k]
#' @keywords binomial
#' @export
#' @examples
#' iscambinom(?, .50, 20, lower.tail = TRUE)
#' iscambinom(10, ?, 20, lower.tail = TRUE)
iscambinom <- function(k, prob, n, pi, lower.tail){
if(as.character(prob)=="?") iscambinomprob(k, n, pi, lower.tail)
if(as.character(k)== "?") iscaminvbinom(prob, n, pi, lower.tail)
}
|
25be212940685117e8bcb473c7dc9d9acef0b073
|
1b97386c886c413e845954cadc86726dfb91ce12
|
/R/error_proc.R
|
24a992a163644b3f0d647b85d2669dfcf230a0b0
|
[] |
no_license
|
alveraboquet/krakenTools
|
be31c84ed91a05cec7d75df5196685674200e910
|
27945c44634dd41c5b47423aa1b486a2b41b314f
|
refs/heads/master
| 2023-03-23T14:58:25.424749
| 2021-03-05T13:05:25
| 2021-03-05T13:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,495
|
r
|
error_proc.R
|
#'@title process the errors from check_long_df
#'@description Convert data downloaded from kraken to long dataframe
NOT_USED_error_proc_fun <- function(curr_he_in,
kraken_root_dir_in) {
print("=======================")
print(curr_he_in)
curr_dat_prod_dir <- file.path(kraken_root_dir, curr_he_in , "data_products")
error_file <- list.files(curr_dat_prod_dir, pattern = "ERROR")
error_file_path <- file.path(curr_dat_prod_dir, error_file)
# check the error file exists
if(!file.exists(error_file_path)) {
return("Error file doesn't exist")
}
#====================================
# read in the error file
error_df <- read_delim(error_file_path,
delim = ";")
# add the group numbers
error_df <- error_df %>% mutate(group_no = as.integer(factor(unix_time)))
#====================================
# break it into the different sections
curr_dups <- filter(error_df, flag == "DUPLICATED")
curr_miss <- filter(error_df, flag == "NOT_IN_LONG")
# show user how many different duplicate(s) and how many missing rows
print(paste0("Asset ", curr_he_in, " has ", nrow(unique(curr_dups)),
" duplicates with ", nrow(curr_dups), " rows in total. ",
nrow(curr_miss), " rows that exist in the raw data are missing in the long_df"))
#====================================
# process duplicates one by one
process_dup_bloc <- function(curr_group) {
dups_sub <- filter(curr_dups, group_no==curr_group)
print(paste0("Current duplicates for group ",
curr_group,
" are: "))
print(dups_sub)
action <- menu(c("Yes", "No"),
title = "Delete duplicates?")
if(action==1) {
print("Delete the duplicates")
# take in longdataframe
curr_long_filename <- list.files(curr_dat_prod_dir,
pattern = "long_tick_data.csv")
curr_long_df <- readr::read_delim(file.path(curr_dat_prod_dir,
curr_long_filename),
delim = ";")
# find the duplicates in the dataframe that match dups_sub
# check that dups are in df and get the row numbers
df_rows <- which(curr_long_df$price==dups_sub$price&
curr_long_df$volume==dups_sub$volume&
curr_long_df$unix_time==dups_sub$unix_time&
curr_long_df$buy_sell==dups_sub$buy_sell)
# same number of rows in the dups and df
stopifnot(all.equal(nrow(dups_sub), length(df_rows)))
# keep only the first instance of the duplicates
rows_to_remove <- df_rows[-1]
return(rows_to_remove)
# remove the duplicates (leave one row)
new_df <- curr_long_df[-rows_to_remove, ]
readr::write_delim(new_df, file.path(curr_dat_prod_dir,
"test_df.csv"),
delim = ";")
} else if (action == 2) {
print("Keep the duplicates")
} else {
stop("Incorrect duplicate selection")
}
}
# process the duplicates - save or delete
sapply(unique(error_df$group_no), process_dup_bloc)
# process the missing rows
# update the metadata file
}
|
0246b73c2ef6e1d3487623d11134b8a3218740ef
|
25cc04f448e132072f62cae1f027bb8dbbdfdc61
|
/inst/develo/combineDataSetsPrototype.R
|
8afdeab929e4754103c9856bbaa570cd051d324f
|
[] |
no_license
|
C3BI-pasteur-fr/UTechSCB-SCHNAPPs
|
c04c4aa8601b10dfd9d39dda9e0e449b89facb3d
|
a1ba962ad7d831876dba1458c9d8fd53b9660deb
|
refs/heads/master
| 2023-07-10T06:49:49.615792
| 2023-07-04T19:27:56
| 2023-07-04T19:27:56
| 181,477,991
| 28
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
combineDataSetsPrototype.R
|
cp = load("/Volumes/LaCie2022/scData/scEx.51e3d27df0e E13,5.RData")
cp
scEx2 = scEx$scEx
cp = load("/Volumes/LaCie2022/scData/scEx.51e7382b945 E16,5.RData")
cp
scEx= scEx$scEx
allCols = union(colnames(colData(scEx2)), colnames(colData(scEx)))
colData(scEx2)[,setdiff(allCols,colnames(colData(scEx2)) )] = NA
colData(scEx)[,setdiff(allCols,colnames(colData(scEx)) )] = NA
newcDat = DataFrame(rbind(colData(scEx2),colData(scEx)))
commGenes = intersect(rownames(scEx), rownames(scEx2))
mat = cbind(as.data.frame(assays(scEx)[["counts"]])[commGenes,], as.data.frame(assays(scEx2)[["counts"]])[commGenes,]) %>% as.matrix %>% as("CsparseMatrix")
mat
allCols = union(colnames(rowData(scEx2)), colnames(rowData(scEx)))
rowData(scEx2)[,setdiff(allCols,colnames(rowData(scEx2)) )] = NA
rowData(scEx)[,setdiff(allCols,colnames(rowData(scEx)) )] = NA
newrDat = DataFrame(rbind(rowData(scEx2),rowData(scEx)))
newrDat
combscEx <- SingleCellExperiment(
assay = list(counts = mat),
colData = newcDat[colnames(mat),],
rowData = newrDat[commGenes,]
)
save(file = "/Volumes/LaCie2022/scData/smd.e16.RData", list = c("combscEx"))
|
93bb25631759280ad8d8bf6dd3e6bdfdff4b1626
|
085c1f0d348b6be6eef1917e74cfde2247853d84
|
/man/random_coefmats2.Rd
|
44259fbd4a1ae49e01cc9ff41510fe5c2a72d3ba
|
[] |
no_license
|
saviviro/gmvarkit
|
657691acd9577c5aacefe01778bb53d8746613c2
|
ad17dd159d0dfa816dcdf9f3ff8be8d633e3b0ef
|
refs/heads/master
| 2023-07-11T19:48:43.851308
| 2023-06-26T08:18:28
| 2023-06-26T08:18:28
| 193,906,969
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,325
|
rd
|
random_coefmats2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateParams.R
\name{random_coefmats2}
\alias{random_coefmats2}
\title{Create random stationary VAR model \eqn{(dxd)} coefficient matrices \eqn{A}.}
\usage{
random_coefmats2(p, d, ar_scale = 1)
}
\arguments{
\item{p}{a positive integer specifying the autoregressive order of the model.}
\item{d}{the number of time series in the system.}
\item{ar_scale}{a positive real number. Larger values will typically result larger AR coefficients.}
}
\value{
Returns \eqn{((pd^2)x1)} vector containing stationary vectorized coefficient
matrices \eqn{(vec(A_{1}),...,vec(A_{p})}.
}
\description{
\code{random_coefmats2} generates random VAR model coefficient matrices.
}
\details{
The coefficient matrices are generated using the algorithm proposed by Ansley
and Kohn (1986) which forces stationarity. It's not clear in detail how \code{ar_scale}
affects the coefficient matrices. Read the cited article by Ansley and Kohn (1986) and
the source code for more information.
}
\references{
\itemize{
\item Ansley C.F., Kohn R. 1986. A note on reparameterizing a vector autoregressive
moving average model to enforce stationarity.
\emph{Journal of statistical computation and simulation}, \strong{24}:2, 99-106.
}
}
\keyword{internal}
|
dec3bcf131706039a4955eb25e8b004e603ec441
|
f382948bb9a0bfd84861a5a9cde044dc94c2f481
|
/cachematrix.R
|
af1e3297a5cd901ef4ff304a99b84c13a8de974d
|
[] |
no_license
|
Lelli0/ProgrammingAssignment2
|
cfd0e30df8c4d53604cd8f763a023f30385c8334
|
5cbe16185bde848ec4218707dd9559d45c57e4ce
|
refs/heads/master
| 2020-12-26T04:16:06.341297
| 2016-08-28T21:52:39
| 2016-08-28T21:52:39
| 66,757,567
| 0
| 0
| null | 2016-08-28T08:20:16
| 2016-08-28T08:20:16
| null |
UTF-8
|
R
| false
| false
| 1,081
|
r
|
cachematrix.R
|
## run defined, invertible matrix through function to add the ability to
## cache the inverse once it has been solved
## For example: x<-matrix(rnorm(1:9),3,3)
## z<-makeCacheMatrix(x)
##
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
##cacheSolve processes makeCacheMatrix(x) or cacheSolve(z) from example above
## to check if inverse has already been calculated
## if it has not,m is still null from earlier function and cacheSolve will
##solve for the inverse and return it as m
##if already solved, returns a msg and the previously calculated result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
51876ecd373b0680e81394ad88ad0cde84e174c7
|
8407b09535288144309c207492d9bece0af3a8d0
|
/man/returnBaseEndPoint.Rd
|
8e14bb2f10cec62bdea25b0cc3e9be85cdd41157
|
[
"BSD-3-Clause"
] |
permissive
|
d3b-center/RDiseaseXpress
|
faefbef79269ea0791fc2bcc485a6989a667568f
|
1bc2fde12d6a3bc1c29823232f39de9efa43fc6b
|
refs/heads/master
| 2021-06-25T22:03:49.590134
| 2020-06-13T18:33:40
| 2020-06-13T18:33:40
| 99,952,653
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 331
|
rd
|
returnBaseEndPoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RDE_AggMergeVis.R
\name{returnBaseEndPoint}
\alias{returnBaseEndPoint}
\title{Get API base URL}
\usage{
returnBaseEndPoint()
}
\value{
character string representing the base url.
}
\description{
Returns api base url
}
\examples{
returnBaseEndPoint()
}
|
87f431856e80d8ea8a65035378a348d700b82247
|
0c871ec77d671bdf60ba7e968d65f73536e76f92
|
/wincustomize.R
|
1a10d19970ca46f0329ad8ed67a73003c12b489f
|
[] |
no_license
|
kenkenlin/emailWebscrape
|
93e4efd9c87801284f7ceeb72fd967b9f9d9bc7c
|
8e0a18d048f9c8ec3b935fca7033fd978307ac80
|
refs/heads/master
| 2020-06-17T07:40:25.689679
| 2019-07-16T12:08:05
| 2019-07-16T12:08:05
| 195,848,711
| 0
| 1
| null | 2019-07-13T15:58:00
| 2019-07-08T16:22:43
|
Python
|
UTF-8
|
R
| false
| false
| 733
|
r
|
wincustomize.R
|
library(rvest)
library(dplyr)
##
PTT<-"https://www.wincustomize.com"
PTT1<-"https://www.wincustomize.com/explore/start_buttons"
underline1<-NULL
underlineUrl1<-NULL
for (a in 1:30) {
httml<-read_html(PTT1)%>%html_nodes("a.pnext")%>%html_attr("href")
PTT1<-paste0(PTT,httml[1])
PTT1
underline<-MediaReport%>% html_nodes("#skincontainer h1 a")%>%html_text()
underlineUrl<-MediaReport%>% html_nodes(".rating")%>% html_attr("class")
underline1<-c(underline1,underline)
underlineUrl1<-c(underlineUrl1,underlineUrl)
}
##
thrmedia<-data.frame(
title=underline1,
link=underlineUrl1
)
thrmedia$link<-substr(thrmedia$link,start = 18,stop = 19)
View(thrmedia)
str(thrmedia$link)
thrmedia$link<-as.numeric(thrmedia$link)
|
1f56b429f4db6ed2a8615d22f5aa293f4738b808
|
7be7f8542482852dca98b5152b97123b1798fc90
|
/R/RemoteReference.R
|
728c45515e1e7563d460a982ee7d94304b1c5ddd
|
[
"MIT"
] |
permissive
|
jmanitz/riskmetric
|
6dee3e2be0b147b507266cf8dd91d7b059cef135
|
099873289b9fb2e2d84febf9f27b28073eea0970
|
refs/heads/master
| 2020-08-06T07:43:37.393604
| 2020-07-29T15:31:26
| 2020-07-29T15:31:26
| 212,895,679
| 0
| 0
|
NOASSERTION
| 2019-10-04T20:16:15
| 2019-10-04T20:16:15
| null |
UTF-8
|
R
| false
| false
| 4,649
|
r
|
RemoteReference.R
|
#
# Commenting out until we're ready to roll in source downloading
#
#
# # Removing these as exports while aiming for initial release - can revisit
# # once a solid foundation is built.
#
# #' Remote reference to package sources
# #'
# #' @param x package/repo name
# #' @param type "cran" or "github"
# #' @param version version tag to use for CRAN e.g. 0.7.1, release tag for github
# #' @param repos list with single CRAN-like repository to query sources from
# #'
# #' @import remotes
# RemoteReference <- function(x, type, version = "latest",
# repos = list(CRAN = "https://cloud.r-project.org")) {
#
# type <- match.arg(type, c("cran", "github"), several.ok = FALSE)
#
# if (type == "cran")
# ref <- remotes:::cran_remote(x, repos = repos, type = "source")
#
# if (type == "github")
# ref <- remotes:::github_remote(x)
#
# ref$version <- version
# ref_class <- c("RemoteReference", class(ref), "environment")
# ref <- as.environment(ref)
# class(ref) <- ref_class
#
# return(ref)
#
# }
#
# #' Download source code from RemoteReference
# #'
# #' @param remote_ref remote reference
# #' @param dest desitnation path for downloaded sources
# #' @param ... further parameters passed to methods
# #'
# download_pkg_sources <- function(remote_ref, dest, ...)
# UseMethod("download_pkg_sources")
#
#
# #' @import remotes rvest magrittr
# #' @importFrom xml2 read_html
# download_pkg_sources.cran_remote <- function(remote_ref, dest, unpack = FALSE,
# method = "auto", ...) {
#
# url_suffix_newest <- xml2::read_html(sprintf("%s/src/contrib/", remote_ref$repos[[1]])) %>%
# rvest::html_nodes("a") %>%
# rvest::html_attr("href") %>%
# grep(sprintf("^%s_.+.tar.gz$", remote_ref$name), ., value = TRUE)
#
# if (remote_ref$version == "latest") {
# url_suffix <- url_suffix_newest
# } else {
# # handle case where lastest is given explicitly
# if (remote_ref$version == stringr::str_extract(url_suffix_newest, "(?<=_).+(?=.tar.gz)"))
# url_suffix <- url_suffix_newest
# url_suffix <- sprintf("Archive/%s/%s_%s.tar.gz", remote_ref$name,
# remote_ref$name, remote_ref$version)
# }
#
# url_pkg_sources <- sprintf("%s/src/contrib/%s", remote_ref$repos, url_suffix)
# dir.create(dest, recursive = TRUE, showWarnings = FALSE)
# outfile <- sprintf("%s/%s", normalizePath(dest),
# stringr::str_extract(url_pkg_sources,
# sprintf("(?<=/)%s_.+\\.tar\\.gz",
# remote_ref$name)))
# utils::download.file(url_pkg_sources, outfile, method = method, ...)
# remote_ref$local_sources <- outfile
# remote_ref$tmplib <- dirname(outfile)
# if (unpack) {
# utils::untar(outfile, files = remote_ref$name, exdir = dirname(outfile))
# }
#
# }
#
#
#
# #' @import stringr
# #' @importFrom httr GET content
# download_pkg_sources.github_remote <- function(remote_ref, dest, unpack = FALSE,
# method = "auto", ...) {
#
# # resolve version: release (TODO: support tags)
# if (remote_ref$version == "latest") {
# url <- sprintf(
# "https://api.github.com/repos/%s/%s/releases/%s",
# remote_ref$username, remote_ref$repo, remote_ref$version)
# } else {
# url <- sprintf(
# "https://api.github.com/repos/%s/%s/releases/tags/%s",
# remote_ref$username, remote_ref$repo, remote_ref$version)
# }
# response <- httr::content(httr::GET(url), as = "parsed",
# type = "application/json")
#
# url_pkg_sources <- response$tarball_url
#
# dir.create(dest, recursive = TRUE, showWarnings = FALSE)
# version_tag <- basename(url_pkg_sources)
# outfile <- sprintf("%s/%s_%s.tar.gz", normalizePath(dest), remote_ref$repo,
# version_tag)
# utils::download.file(url_pkg_sources, outfile, method = method, ...)
# # need to get rid of intermedate dir in GH return .tar.gz
# ext_name <- utils::untar(outfile, exdir = dirname(outfile), list = TRUE)[1]
# pkg_dirname <- stringr::str_extract(outfile, sprintf(".+/%s", remote_ref$repo))
# utils::untar(outfile, exdir = dirname(outfile))
# file.rename(sprintf("%s/%s", dirname(outfile), ext_name), pkg_dirname)
# unlink(outfile)
# oldwd <- setwd(pkg_dirname)
# utils::tar(tarfile = outfile, files = list.files(), tar = "tar")
# setwd(oldwd)
# remote_ref$local_sources <- outfile
# remote_ref$tmplib <- dirname(outfile)
# remote_ref$name <- remote_ref$repo
# if (!unpack) {
# unlink(pkg_dirname, recursive = TRUE)
# }
#
# }
|
dddc20fa48342081638ad8269e4bcf1438d61460
|
87d71d69451feb49902c07c0696734da72d6046a
|
/man/fpl_get_player_detailed.Rd
|
ece1f0ba1a0516be5ff62050173ef2926f527b5a
|
[
"Apache-2.0"
] |
permissive
|
baklouti1994/Fantasyplr
|
31da309ae8899aec7ac0136cf0d66390e6374edf
|
fd715c2564b3b02cf6ce153a20a239b9530fd134
|
refs/heads/master
| 2020-04-11T05:31:44.368643
| 2018-12-12T22:57:05
| 2018-12-12T22:57:05
| 161,552,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 638
|
rd
|
fpl_get_player_detailed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/players.R
\name{fpl_get_player_detailed}
\alias{fpl_get_player_detailed}
\title{Retrieve detailed data for a player in the current FPL season}
\usage{
fpl_get_player_detailed(player_id)
}
\arguments{
\item{player_id}{\code{id} field from \code{\link{fpl_get_players}}.}
}
\value{
a tibble
}
\description{
Retrieve detailed data for a player in the current FPL season, obtained via the
\href{https://fantasy.premierleague.com/drf/bootstrap-static}{bootstrap-static JSON}.
}
\examples{
\dontrun{
fpl_get_player_detailed(player_id = 2)
}
}
|
2aada3889e210394233441e82b617a4af289b742
|
76a3033de0150c3c5323fc686ff472e5202415aa
|
/Programs/R/TimeSeries/time_series_analysis.R
|
a5df1b4eb2a5b875ab01b2e8b7caa0397dcc0577
|
[] |
no_license
|
chellawabac/BMTC
|
a0c6d9985c3d7e7a0eb4b43b11831a437ed936be
|
558be1aa26a58f1876b23db114a19ce95202e531
|
refs/heads/master
| 2021-01-13T04:28:26.125131
| 2017-05-12T07:15:21
| 2017-05-12T07:15:21
| 79,935,999
| 0
| 3
| null | 2017-05-12T07:01:00
| 2017-01-24T17:34:56
|
R
|
UTF-8
|
R
| false
| false
| 1,734
|
r
|
time_series_analysis.R
|
library(timeSeries)
library(readxl)
library(utils)
bunch_ts <- read_excel('segmented_bunching_data.xlsx', sheet = "d3_d4", col_names=TRUE)
plot(bunch_ts, type = 'o', main = "Before Detrending")
fit = lm(bunch_ts$X2~bunch_ts$time)
plot(bunch_ts$time, resid(fit), type = 'o', main = "Detrended using line fit")
first_diff <- c(0, diff(bunch_ts$X2))
plot(bunch_ts$time, first_diff, type = 'o', main = "First Difference")
second_diff <- c(0, diff(first_diff))
plot(bunch_ts$time, second_diff, type = 'o', main = "Second Difference")
acf(bunch_ts$X2, 48, main='acf: bunch_ts')
acf(resid(fit), 48, main='acf: detrended using line fit')
acf(first_diff, 48, main='acf: first differences')
acf(second_diff, main = 'acf: second differences')
pacf(bunch_ts$X2, 48, main='pacf: bunch_ts')
pacf(resid(fit), 48, main='pacf: detrended using line fit')
pacf(first_diff, 48, main='pacf: first differences')
pacf(second_diff, main = 'pacf: second differences')
##Results: quadratic trend in data hence second differences works
#######################################################################
# #load all sheets of segmented bunching data of differences
# bunch_ts_1 <- read_excel('segmented_bunching_data.xlsx', sheet = "d1_d4", col_names=TRUE)
# #change the col names appropriately
# names(bunch_ts_1) <- c("time1", "y1")
# bunch_ts_2 <- read_excel('segmented_bunching_data.xlsx', sheet = "d2_d4", col_names=TRUE)
# #change the col names appropriately
# names(bunch_ts_2) <- c("time2", "y2")
# bunch_ts_3 <- read_excel('segmented_bunching_data.xlsx', sheet = "d3_d4", col_names=TRUE)
# #change the col names appropriately
# names(bunch_ts_3) <- c("time3", "y3")
#
# bunch_ts <- c(bunch_ts_1, bunch_ts_2, bunch_ts_3)
# print(bunch_ts)
|
df284c3d8c70ecc137937b9ed06e5740aee8d6ff
|
0853134802bde59234f5b0bd49735b9b39042cfb
|
/Rsite/source/Rd-man-files/mx.nd.random.negative.binomial.Rd
|
a901cf1f2047d26b8bb02343298156ca24806660
|
[] |
no_license
|
mli/new-docs
|
2e19847787cc84ced61319d36e9d72ba5e811e8a
|
5230b9c951fad5122e8f5219c4187ba18bfaf28f
|
refs/heads/master
| 2020-04-02T03:10:47.474992
| 2019-06-27T00:59:05
| 2019-06-27T00:59:05
| 153,949,703
| 13
| 15
| null | 2019-07-25T21:33:13
| 2018-10-20T21:24:57
|
R
|
UTF-8
|
R
| false
| true
| 1,270
|
rd
|
mx.nd.random.negative.binomial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.nd.random.negative.binomial}
\alias{mx.nd.random.negative.binomial}
\title{Draw random samples from a negative binomial distribution.}
\arguments{
\item{k}{int, optional, default='1'
Limit of unsuccessful experiments.}
\item{p}{float, optional, default=1
Failure probability in each experiment.}
\item{shape}{Shape(tuple), optional, default=[]
Shape of the output.}
\item{ctx}{string, optional, default=''
Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls.}
\item{dtype}{{'None', 'float16', 'float32', 'float64'},optional, default='None'
DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None).}
}
\value{
out The result mx.ndarray
}
\description{
Samples are distributed according to a negative binomial distribution parametrized by
*k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment).
Samples will always be returned as a floating point data type.
}
\details{
Example::
negative_binomial(k=3, p=0.4, shape=(2,2)) = [[ 4., 7.],
[ 2., 5.]]
Defined in src/operator/random/sample_op.cc:L163
}
|
66659747c990d8eae8f16394b3b5c570d88b56fd
|
2893762bd05f76e15b9c0bc61acdca323a355618
|
/man/peguy.Rd
|
790f22ede3c88b4328681620a618e8be4b5882df
|
[] |
no_license
|
ECO2ZTS/ClimClass
|
b184fdeb68f376185e1fe2d63aa1cd830bc1b340
|
b149ec6f4eb6a27a10379374570d46f66fca4395
|
refs/heads/master
| 2021-06-01T01:41:46.321867
| 2016-08-04T21:43:32
| 2016-08-04T21:43:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,308
|
rd
|
peguy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peguy.R
\name{peguy}
\alias{peguy}
\title{Peguy Climograph}
\usage{
peguy(data = NULL, TemperatureTriangleCoords = c(0, 23.4, 15),
PrecipitationTriangleCoords = c(0, 40, 200), ylab = "Precipitation[mm]",
xlab = "Mean Temperature [degC]", lambda.label = 1.75,
climate.label = c("Temperate", "Cool", "Arid", "Hot"), xyField = c("Tn",
"P"), pointsField = "month", StationsField = "station",
color.scale = "monthly", ...)
}
\arguments{
\item{data}{input dataset with climatological monthly weather data}
\item{TemperatureTriangleCoords}{Temperature coordinates for triangle vertices in the Peguy Climograph. Default coordinates are expressed in Celsius Degrees.}
\item{PrecipitationTriangleCoords}{Precipitation coordinates for triangle vertices in the Peguy Climograph. Default coordinates are expressed in millimeters.}
\item{xlab, ylab}{xy axis labels}
\item{lambda.label}{numeric value used to locate climate attribute labels}
\item{climate.label}{string vector containing climate attributes. Default is \code{c("Temperate", "Cold", "Arid", "Hot")}. Alternatively it can be translated into any other languange.}
\item{xyField}{column names of \code{data} for the x and y variables used in the Peguy Climate Diagram.}
\item{pointsField}{column name of \code{data} containing the fields to be represented with different point colors. Default is \code{"month"}.}
\item{StationsField}{column name of \code{data} containing the fields with station ID names. Default is \code{"station"}.}
\item{color.scale}{character scale indicating a use of a specific color scale. Default is \code{"monthly"}.}
\item{...}{further arguments}
}
\description{
Representation of Peguy Climograph from monthly weather data (Mean Temperature, Precipitation)
}
\examples{
library(stringr)
data(Trent_climate)
TrentinoClimateDf <- do.call(rbind,clima_81_10)
names <- rownames(TrentinoClimateDf)
TrentinoClimateDf$station <- unlist(lapply(X=str_split(names,pattern="[.]"),FUN=function(x) {x[1]}))
data <- TrentinoClimateDf[TrentinoClimateDf$station \%in\% unique(TrentinoClimateDf$station)[1:3],]
p <- peguy(data=data)
}
\author{
Emanuele Cordano
}
\references{
Peguy, C.P. (1970) Precis de climatologie, ed. Masson, Paris.
}
|
b3053d29cbed68f12530811eca12d44aa9e079c8
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/sulphenone.R
|
dc293ce6cf7acd54587d83e5a1faf545a4ba6b3e
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
sulphenone.R
|
library("knitr")
library("rgl")
#knit("sulphenone.Rmd")
#markdownToHTML('sulphenone.md', 'sulphenone.html', options=c("use_xhml"))
#system("pandoc -s sulphenone.html -o sulphenone.pdf")
knit2html('sulphenone.Rmd')
|
657f25e1f41a1c0014b8e12da2824595f5433991
|
0938510c4a6736c5043c31530627375ea742b7c1
|
/rprog%2Fdata%2FProgAssignment3-data/best.R
|
2b4b6a460f25dba3e14167fee1e3089d872ccedc
|
[] |
no_license
|
letyndr/datasciencecoursera
|
fdf7f9ba39ab127c6f02c7c0ff9b900029e8346c
|
f3e6ced8b6095a7b02b42a93d87648e9aa67082a
|
refs/heads/master
| 2020-05-25T02:34:52.500057
| 2017-10-05T20:15:20
| 2017-10-05T20:15:20
| 84,902,121
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
best.R
|
# Return hospital name in that state with lowest 30-day death
# Call example: best("TX", "heart attack")
best <- function(state, outcome) {
# Read outcome data
data_outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Check that state and outcome are valid
if (!is.element(state, data_outcome[['State']])) {
stop("invalid state")
}
outcomes_names <- c('heart attack', 'heart failure', 'pneumonia')
if (!is.element(outcome, outcomes_names)){
stop('invalid outcome')
}
hospital_index <- 2
state_index <- 7
# Outcomes
# 11: Heart Attack
# 17: Heart Failure
# 23: Pneumonia
outcomes <- c(11, 17, 23)
names(outcomes) <- outcomes_names
death_rate <- data_outcome[data_outcome[[state_index]] == state, c(hospital_index, state_index, outcomes[[outcome]])]
death_rate[[3]] <- as.numeric(death_rate[[3]])
death_rate <- death_rate[!is.na(death_rate[[3]]), ]
min_death_rate <- death_rate[which.min(death_rate[[3]]), 1]
# Return value: rate
min_death_rate[1]
}
|
36c717dc446ebbab65af4458b6be394ce0bc5be0
|
f7cd9023746b8bdd29b90731ec389ad9ccf626d1
|
/paper_validation/selecting_potential_pathogenic_cases_FMR1_EHv2.5.5.R
|
b6c98a9252645e7f229a6ec7a366b0ffa901f71e
|
[] |
no_license
|
kibanez/analysing_STRs
|
1a498f121763d7cb411e3b66b25ec22eea608244
|
b922ab7d2b9668dc8add9e73f1d4e44a0aac1fd4
|
refs/heads/master
| 2022-02-25T05:28:43.480585
| 2022-02-04T10:29:30
| 2022-02-04T10:29:30
| 218,277,677
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,898
|
r
|
selecting_potential_pathogenic_cases_FMR1_EHv2.5.5.R
|
# Objective: from EHv2.5.5 summer batch select genomes with the following selection criteria (and compare with Arianna's numbers):
# both from GRCH37 and GRCH38:
# proband only
# specific disease == intellectual disability
# Arianna's numbers are the following:
# 5783 probands recruited under ID and also kabuki (!!)
# repeat size > 55
# 131 probands
date ()
Sys.info ()[c("nodename", "user")]
commandArgs ()
rm (list = ls ())
R.version.string ## "R version 3.6.1 (2019-07-05)"
library(ggplot2); packageDescription ("ggplot2", fields = "Version") #"3.2.1"
library(grid); packageDescription ("grid", fields = "Version") #"3.6.2"
library(gridExtra); packageDescription ("gridExtra", fields = "Version") #"2.3"
library(reshape2); packageDescription ("reshape2", fields = "Version") #"1.4.3"
require(dplyr); packageDescription ("dplyr", fields = "Version") #"0.8.3"
# Set working dir
setwd("~/Documents/STRs/data/research/EH_2.5.5_research_August2019/EH_output_v2.5.5_research_August_2019/FMR1_GRCh37_GRCh38_encrypted/")
# Load clinically enriched data for FMR1
table_b37 = read.csv("./research_genomes_86457_GRCh37_EHv2.5.5_FMR1.tsv",
sep = "\t",
stringsAsFactors = F,
header = T)
dim(table_b37)
# 11913 26
table_b38 = read.csv("./research_genomes_86457_GRCh38_EHv2.5.5_FMR1_CGG.tsv",
sep = "\t",
stringsAsFactors = F,
header = T)
dim(table_b38)
# 108939 26
# merge b37 and b38 tables
merged_table = rbind(table_b37,
table_b38)
merged_table = unique(merged_table)
dim(merged_table)
# 120852 26
# Recode b37 and b38 chrX name
merged_table$chr = recode(merged_table$chr,
"X" = "chrX")
# 1- take only probands (== "N/A) [is.na() are genomes from Cancer programme]
fmr1_filtering = merged_table %>%
filter(biological_relationship_to_proband %in% "N/A")
length(unique(fmr1_filtering$participant_id))
# 26530
# 2 - `specific disease` == ID OR kabuki
fmr1_filtering = fmr1_filtering %>%
filter(grepl("[Ii]ntellectual disability",specific_disease) | grepl("[Kk]abuki",specific_disease))
length(unique(fmr1_filtering$participant_id))
# 5059
# 3 - repeat.size > 55
fmr1_filtering = fmr1_filtering %>%
filter(repeat.size > 55)
length(unique(fmr1_filtering$participant_id))
# 131
# Arianna's work is ending with 131 probands.
# Let's see which 2 are different
ari_fmr1 = read.table("~/Documents/STRs/VALIDATION/for_ILM_pileup/list_131_FMR1_probands_Arianna.txt", stringsAsFactors = F)
ari_fmr1 = ari_fmr1$V1
length(ari_fmr1)
# 131
l_130_my_analysis = unique(fmr1_filtering$plate_key.x)
length(l_130_my_analysis)
# 130
# differences?
setdiff(ari_fmr1, l_130_my_analysis)
# "LP3000046-DNA_E01"
# let's see what they are
merged_table %>%
filter(plate_key.x %in% "LP3000046-DNA_E01") %>%
View()
|
bdbdf5c70cb72a3cee58c7ba92b5782f5d356f12
|
58691c7e1dd0c241de7ec2898ea66b5d2e5f5f4a
|
/man/compBagplot.Rd
|
ea4723ed1665154079ea3641b8a7a8dc9c4a3bc1
|
[] |
no_license
|
PSegaert/mrfDepth
|
b7fefd1bc3e897c9b095fac0e0f4c2cf9b3ad45a
|
a1118ddeef1997c72aedbc65dc83b48deda807e3
|
refs/heads/master
| 2021-01-18T22:23:13.435472
| 2018-10-12T08:55:47
| 2018-10-12T08:55:47
| 87,052,317
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,236
|
rd
|
compBagplot.Rd
|
\name{compBagplot}
\alias{compBagplot}
\title{Location estimate based on the halfspace depth}
\description{
Computes all elements of the bagplot, a generalisation
of the univariate boxplot to bivariate data. The bagplot can be
computed based on halfspace depth, projection depth or skewness-adjusted projection depth. To make the actual plot, the function \code{bagplot} needs to be called on the result of \code{compBagplot}.
}
\usage{
compBagplot(x, type = "hdepth", sizesubset = 500,
extra.directions = FALSE, options = NULL)
}
\arguments{
\item{x}{An \eqn{n} by 2 data matrix.}
\item{type}{Determines the depth function used to construct the bagplot: \code{"hdepth"} for halfspace depth, \code{"projdepth"} for projection depth and \code{"sprojdepth"} for skewness-adjusted projection depth. \cr
Defaults to \code{"hdepth"}.
}
\item{sizesubset}{When computing the bagplot based on halfspace depth,
the size of the subset used to perform the main
computations. See Details for more information. \cr
Defaults to \eqn{500}.
}
\item{extra.directions}{Logical indicating whether additional directions should
be considered in the computation of the fence for the
bagplot based on projection depth or skewness-adjusted
projection depth. If set to \code{TRUE} an additional
250 equispaced directions
are added to the directions defined by the points in
\code{x} themselves and the center. If \code{FALSE}
only directions determined by the points in \code{x} are
considered. \cr
Defaults to \code{FALSE}.
}
\item{options}{A list of options to pass to the \code{projdepth} or \code{sprojdepth} function.
In addition the following option may be specified:
\itemize{
\item{ \code{max.iter} \cr
The maximum number of iterations in the bisection algorithm used
to compute the depth contour corresponding to the cutoff. See
\code{depthContour} for more information. \cr
Defaults to \eqn{100}.
}
}
}
}
\details{
The bagplot has been proposed by Rousseeuw et al. (1999) as a generalisation of the boxplot to bivariate data. It is constructed based on halfspace depth. In the original format the deepest point is indicated by a "+" and is contained in the bag which is defined as the depth region containing the 50\% observations with largest depth. The fence is obtained by inflating the bag (relative to the deepest point) by a factor of three. The loop is the convex hull of the observations of \code{x} inside the fence. Observations outside the fence are flagged as outliers and plotted with a red star. This function only computes all the components constituting the bagplot. The bagplot itself can be drawn using the \code{bagplot} function.
The bagplot may also be defined using other depth functions. When using projection depth or skewness-adjusted projection depth the bagplot is build as follows. The center corresponds to the observation with largest depth. The bag is constructed as the convex hull of the fifty percent points with largest depth. Outliers are identified as points with a depth smaller than a cutoff value, see \code{projdepth} and \code{sprojdepth} for the precise definition.
The loop is computed as the convex hull of the non-outlying points. The fence is approximated by the convex hull of those points that lie on rays from the center through the vertices of the bag and have a depth that equals the cutoff depth. For a better approximation the user can set the input parameter \code{extraDirections} to \code{TRUE} such that an additional 250 equally spaced directions on the circle are considered.
The computation of the bagplot based on halfspace depth can be time
consuming. Therefore it is possible to limit the bulk of the computations
to a random subset of the data. Computations of the halfspace median and
the bag are then based on this random subset. The number of points in this
subset can be controlled by the optional argument \code{sizesubset}.
It is first checked whether the data is found to lie on a line. If so, the routine will give a warning, giving back the dimension of the subspace (being 1) together with the normal vector to that line.
}
\value{
A list with components: \cr
\item{center}{Center of the data. \cr
When \code{type = "hdepth"}, this corresponds with the
Tukey median.
In other cases this point corresponds to the point
with maximal depth.
}
\item{chull}{When \code{type = "hdepth"}, these are the vertices of
the region with maximal halfspace depth.
In other cases this is a null vector.
}
\item{bag}{The coordinates of the vertices of the bag.}
\item{fence}{The coordinates of the vertices of the fence.}
\item{datatype}{An \code{(nx3)} matrix. The first two columns correspond with \code{x}. The third column indicates the
position of each observation of \code{x} in the bagplot: 2 for observations in
the bag, 1 for the observations in the fence and 3 for
outliers. \cr
Note that points may not be in the same order as in \code{x}.
}
\item{flag}{A vector of length \code{n} wich is 0 for outliers and 1 for regular observations of \code{x}.}
\item{depth}{The depth of the observations of \code{x}.}
\item{dimension}{If the data are lying in a lower dimensional subspace, the dimension of this subspace.}
\item{hyperplane}{If the data are lying in a lower dimensional subspace, a direction orthogonal to this subspace.}
\item{type}{Same as the input parameter \code{type}.}
}
\seealso{
\code{\link{bagplot}}, \code{\link{hdepth}}, \code{\link{projdepth}}, \code{\link{sprojdepth}}.
}
\references{
Rousseeuw P.J., Ruts I., Tukey J.W. (1999). The bagplot: A bivariate boxplot. \emph{The American Statistician}, \bold{53}, 382--387.
Hubert M., Van der Veeken S. (2008). Outlier detection for skewed data. \emph{Journal of Chemometrics}, \bold{22}, 235--246.
Hubert M., Rousseeuw P.J., Segaert, P. (2015). Rejoinder to 'Multivariate functional outlier detection'. \emph{Statistical Methods & Applications}, \bold{24}, 269--277.
}
\author{P. Segaert based on Fortran code by P.J. Rousseeuw, I. Ruts and A. Struyf.}
\examples{
data(bloodfat)
Result <- compBagplot(bloodfat)
bagplot(Result)
# The sizesubset argument may be used to control the
# computation time when computing the bagplot based on
# halfspace depth. However results may be unreliable when
# choosing a small subset for the main computations.
system.time(Result1 <- compBagplot(bloodfat))
system.time(Result2 <- compBagplot(bloodfat, sizesubset = 100))
bagplot(Result1)
bagplot(Result2)
# When using the projection depth or skewness-adjusted
# projection depth to compute the bagplot a list of options
# may be passed down to the (adj)outlyingness routines.
options <- list(type = "Rotation",
ndir = 50,
stand = "unimcd",
h = floor(nrow(bloodfat)*3/4))
Result <- compBagplot(bloodfat,
type = "projdepth", options = options)
bagplot(Result)
# The fence is computed using the depthContour function.
# To get a smoother fence, one may opt to consider extra
# directions.
options <- list(ndir = 500,
seed = 36)
Result <- compBagplot(bloodfat,
type = "sprojdepth", options = options)
bagplot(Result, plot.fence = TRUE)
options <- list(ndir = 500,
seed = 36)
Result <- compBagplot(bloodfat,
type = "sprojdepth", options = options,
extra.directions = TRUE)
bagplot(Result, plot.fence = TRUE)
}
\keyword{multivariate}
|
e11e501421fcfa8323dbd19cc17f0b5021c9624f
|
5ff2a3097691efac719ac50e3be4eca3627e860e
|
/R/crossref.R
|
2bc9cc5116e082cae1396257aef73fc5ff2d1c53
|
[] |
no_license
|
AlgoSkyNet/rj
|
c0f991041446aee9a00a97a451ab1b09091b7c0e
|
6e000df51ea1ceab28c4e6d78b8ad40d9acb5e7e
|
refs/heads/master
| 2021-04-24T04:23:29.281090
| 2020-03-09T03:05:13
| 2020-03-09T03:05:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,780
|
r
|
crossref.R
|
## code to create XML file to submit an issue
## code hevaily based on jss tools for DOI submission
##
## install.packages("jss", repos="http://R-Forge.R-project.org")
##
## Main function is rj:::upload_dois_from_issue()
## EIC will need login credentials in SOPS-editor.md
## Constants
doi_prefix = "10.32614"
rjournal_url = "https://journal.r-project.org/archive"
depositor = list(
depositor_name = "R Foundation",
depositor_email = "R-foundation@r-project.org"
)
journal_metadata = list(
journal_title = "The R Journal",
journal_abbreviated_title = "The R Journal",
coden = NULL, # JSS is "JSSOBK", we don't have one?
issn = "2073-4859" # 1548-7660"
)
doi_batch_id_tpl = "<doi_batch_id>{{ID}}</doi_batch_id>\n"
print_doi_batch_id <- function(file) {
x <- as.POSIXlt(Sys.time())
mon <- if(nchar(x$mon) == 1) paste("0", x$mon, sep="") else x$mon
mday <- if(nchar(x$mday) == 1) paste("0", x$mday, sep="") else x$mday
hour <- if(nchar(x$hour) == 1) paste("0", x$hour, sep="") else x$hour
min <- if(nchar(x$min) == 1) paste("0", x$min, sep="") else x$min
sec <- if(nchar(round(x$sec)) == 1) paste("0", round(x$sec), sep="") else round(x$sec)
ID = paste(x$year, mon, mday, hour, min, sec, sep="")
txt = whisker.render(doi_batch_id_tpl, list(ID=ID))
cat(txt, file=file, append=TRUE)
}
timestamp_tpl = "<timestamp>{{TIMESTAMP}}</timestamp>\n"
print_timestamp <- function(file) {
x <- as.POSIXlt(Sys.time())
mon <- if(nchar(x$mon) == 1) paste("0", x$mon, sep="") else x$mon
mday <- if(nchar(x$mday) == 1) paste("0", x$mday, sep="") else x$mday
hour <- if(nchar(x$hour) == 1) paste("0", x$hour, sep="") else x$hour
min <- if(nchar(x$min) == 1) paste("0", x$min, sep="") else x$min
sec <- if(nchar(round(x$sec)) == 1) paste("0", round(x$sec), sep="") else round(x$sec)
TIMESTAMP = paste(x$year, mon, mday, hour, min, sec, sep="")
txt = whisker.render(timestamp_tpl, list(TIMESTAMP=TIMESTAMP))
cat(txt, file=file, append=TRUE)
}
depositor_tpl = "
<depositor>
<depositor_name>{{depositor_name}}</depositor_name>
<email_address>{{depositor_email}}</email_address>
</depositor>
"
print_depositor = function(file) {
txt = whisker.render(depositor_tpl, depositor)
cat(txt, file=file, append=TRUE)
}
journal_metadata_tpl = "
<journal_metadata language=\"en\">
<full_title>{{journal_title}}</full_title>
<abbrev_title>{{journal_abbreviated_title}}</abbrev_title>
{{#issn}}<issn media_type=\"electronic\">{{issn}}</issn>{{/issn}}
{{#coden}}<coden>{{coden}}</coden>{{/coden}}
</journal_metadata>
"
print_journal_metadata = function(file) {
txt = whisker.render(journal_metadata_tpl, journal_metadata)
cat(txt, file=file, append=TRUE)
}
## jv no {{sf}}{{volume}} here
journal_issue_tpl = "
<journal_issue>
<publication_date media_type=\"online\">
<year>{{year}}</year>
</publication_date>
<journal_volume>
<volume>{{volume}}</volume>
</journal_volume>
<issue>{{issue}}</issue>
</journal_issue>
"
print_journal_issue = function(x, file) { #type, file) {
data = list(
year = x$year,
volume=x$volume,
number=x$number,
issue=x$issue)
txt = whisker.render(journal_issue_tpl, data)
cat(txt, file=file, append=TRUE)
}
## from c("a1", "a2", ...) create list(family, given)
author_list = function(auts) {
auts = strsplit(auts, "\\s+")
lapply(auts, function(x) list(family=x[length(x)], given=paste(x[-length(x)], sep=" ")))
}
author_tpl = "
<person_name sequence=\"{{author_order}}\" contributor_role=\"author\">
<given_name>{{given}}</given_name>
<surname>{{family}}</surname>
{{#suffix}}<suffix>{{suffix}}</suffix}{{/suffix}}
{{#affiliation}}<affiliation>{{affiliation}}</affiliation>{{/affiliation}}
{{#ORCID}}<ORCID>{{ORCID}}</ORCID>{{/ORCID}}
</person_name>
"
get_authors <- function(x) {
out = ""
for (i in 1:length(x$authors)) {
aut = x$authors[[i]]
l = list(
given=tth::tth(aut$given),
family=tth::tth(aut$family),
suffix=if (is.null(aut$suffix)) NULL else tth::tth(aut$suffix),
affiliation=aut$affiliation,
ORCID=aut$ORCID,
author_order = ifelse(i==1, "first", "additional")
)
out = paste(out, whisker.render(author_tpl, l), sep="\n")
}
out
}
## our DOI is just the slug
doi_tpl = "{{doi_prefix}}/{{slug}}"
get_doi <- function(x) {
data = list(doi_prefix = doi_prefix,
slug = x$slug
)
whisker.render(doi_tpl, data)
}
url_tpl = "{{rjournal_url}}/{{year}}/{{slug}}/index.html"
get_url <- function(x) {
data = list(
rjournal_url= rjournal_url,
year = x$year,
slug=x$slug
)
whisker.render(url_tpl, data)
}
journal_article_tpl = "
<journal_article publication_type=\"full_text\">
<titles>
<title>{{title}}</title>
</titles>
<contributors>
{{{authors}}}
</contributors>
<publication_date media_type=\"online\">
<year>{{year}}</year>
</publication_date>
<pages>
<first_page>{{first_page}}</first_page>
</pages>
<publisher_item>
<identifier id_type=\"doi\">{{DOI}}</identifier>
</publisher_item>
<doi_data>
<doi>{{DOI}}</doi>
<resource>{{url}}</resource>\
</doi_data>
</journal_article>
"
print_journal_article <- function(x, file) {
data = list(title = x$title,
authors = get_authors(x),
year = x$year,
first_page = x$pages[1],
last_page = x$pages[2],
DOI = get_doi(x),
url = get_url(x)
)
txt = whisker.render(journal_article_tpl, data)
cat(txt, file=file, append=TRUE)
}
## Could add citations
## by areading.bib file and creating from there
## <citation_list> / a citation list for the content being registered
## <citation key="key-10.9876/S0003695199034166-1">
## <issn>0027-8424</issn>
## <journal_title>Proc. Natl. Acad. Sci. U.S.A.</journal_title>
## <author>West</author>
## <volume>98</volume>
## <issue>20</issue>
## <first_page>11024</first_page>
## <cYear>2001</cYear>
## </citation>
## <citation key="key-10.9876/S0003695199034166-2">
## <journal_title>Space Sci. Rev.</journal_title>
## <author>Heber</author>
## <volume>97</volume>
## <first_page>309</first_page>
## <cYear>2001</cYear>
## </citation>
print_citation = function(cit, file) {
## how to do this for different types...
}
print_citations = function(art, file) {
cat("<citation_list>\n", out=file, append=true)
for (cit in art$citations) {
print_citation(cit, file)
}
cat("</citation_list>\n", out=file, append=true)
}
doi_batch_header_old = '<doi_batch xmlns="http://www.crossref.org/schema/4.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="4.4.0" xsi:schemaLocation="http://www.crossref.org/schema/4.4.0 http://www.crossref.org/schemas/crossref4.4.0.xsd">'
doi_batch_header = '<doi_batch xmlns="http://www.crossref.org/schema/4.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="4.4.0" xsi:schemaLocation="http://www.crossref.org/schema/4.4.0 http://www.crossref.org/schemas/crossref4.4.0.xsd">\n'
newDeposit = function(x, out="") {
cat(doi_batch_header, file=out)
cat("<head>\n", file=out, append=TRUE)
print_doi_batch_id(out)
print_timestamp(out)
print_depositor(out)
cat("<registrant>CrossRef</registrant> \n </head>\n
<body>\n", file=out, append=TRUE)
## this generates xml for all bib items stored in a bib file
for(i in 1:length(x$articles)) {
cat("<journal>\n", file=out, append=TRUE)
print_journal_metadata(out)
print_journal_issue(x, out)
print_journal_article(x$articles[[i]], out)
cat("</journal>\n\n\n", file=out, append=TRUE)
}
cat("
</body>
</doi_batch>
", file=out, append=TRUE)
## test via
## http://test.crossref.org, see http://help.crossref.org/verifying_your_xml
## make real deposits with https://doi.crossref.org/servlet/deposit
#system(paste("curl -F \'operation=doMDUpload\' -F \'login_id=fast\' -F \'login_passwd=fast_827\' -F \'fname=@", out, "\' ", "https://test.crossref.org/servlet/deposit", sep=""))
# system(paste("curl -F \'operation=doMDUpload\' -F \'login_id=fast\' -F \'login_passwd=fast_827\' -F \'fname=@", out, "\' ", "https://doi.crossref.org/servlet/deposit", sep=""))
}
## we need to get into format for crossref
## require(yaml)
## cfg = yaml.load_file("path/to/_config.yml")
## i = cfg[[ind]]
##
process_config_yml_component <- function(i) {
articles = Filter(function(i) !is.null(i$slug), i$articles)
articles = Filter(function(l) grepl("^RJ", l$slug), articles)
out = list()
out$year = i$year
out$volume = i$volume
out$issue = i$num
out$articles = list()
for (j in 1:length(articles)) {
a = articles[[j]]
l = list()
l$slug = a$slug
l$year = i$year
l$pages = a$pages
l$title = a$title # strip italics? ...
l$authors = author_list(a$author)
# other?
out$articles[[a$slug]] = l
}
out
}
load_issues <- function() {
if (!grepl("/RJournal/articles$", getwd())) {
warning("Not in proper directory: start in RJournal/articles")
return()
}
cfg = yaml.load_file("../rjournal.github.io/_config.yml")
cfg$issues
}
## Filter out a specific volume and number:
## issues = load_issues()
## issue = find_volume_num(issues, 9, 2)
## create_doi_from_issue(issue)
find_volume_num <- function(issues, volume, num) {
out = Filter(function(i) i$volume==volume && i$num==num, issues)
if (length(out) == 1) {
out[[1]]
} else {
warning("Not uniquely identified")
return
}
}
## system(paste("curl -F \'operation=doMDUpload\' -F \'login_id=fast\' -F \'login_passwd=fast_827\' -F \'fname=@", out, "\' ", "https://test.crossref.org/servlet/deposit", sep=""))
## system(paste("curl -F \'operation=doMDUpload\' -F \'login_id=fast\' -F \'login_passwd=fast_827\' -F \'fname=@", out, "\' ", "https://doi.crossref.org/servlet/deposit", sep=""))
## upload DOIs from an issue
## issues = rj:::load_issues()
## issue = rj:::find_volume_num(issues, 9, 2)
## rj:::upload_dois_from_issue(pissue, passwd="", url="test")
##
##
## Issues: in the 9-2 volume, an empty given_name is generated,
## causing an error. See
## https://doi.crossref.org/servlet/submissionAdmin?sf=detail&submissionID=1442846045
## to check on a submission, where the ID is shown in the queue.
upload_dois_from_issue <- function(issue,
login="rfou", passwd="see_EIC_instructions",
out=sprintf("/tmp/DOI_%s_%s.xml", issue$volume, issue$num),
url="test" # or live
) {
pissue = process_config_yml_component(issue)
newDeposit(pissue, out)
## test via
## http://test.crossref.org, see http://help.crossref.org/verifying_your_xml
## make real deposits with https://doi.crossref.org/servlet/deposit
q_url = list(
"test"="https://test.crossref.org/servlet/submissionAdmin?sf=showQ",
"live"="https://doi.crossref.org/servlet/submissionAdmin?sf=showQ"
)[[url]]
u_url = list(
"test"="https://test.crossref.org/servlet/deposit",
"live"="https://doi.crossref.org/servlet/deposit"
)[[url]]
cmd = sprintf("curl -F 'operation=doMDUpload' -F 'login_id=%s' -F 'login_passwd=%s' -F 'fname=@%s' %s", login, passwd, out, u_url)
system(cmd)
resp = system(cmd)
print(resp)
browseURL(q_url)
}
## create a DOI from the an issue component of the config$issues
##
## require(yaml)
## cfg = yaml.load_file("../rjournal.github.io/_config.yml")
## issue = cfg$issue[[length(cfg$issue)]] # last one
##
## or
##
## issues = load_issues()
## issue = find_volume_num(issues, 10, 2)
##
## then
##
## create_doi_from_issue(issue)
## Then, log onto https://doi.crossref.org
## upload this new xml file through upload; cross fingers
create_doi_from_issue <- function(issue, outdir="/tmp/DOI_XML/") {
if (is.null(issue$volume)) {
warning("No volume for this issue")
return
}
out = paste0(outdir, "DOI-", issue$volume, "-", issue$num, ".xml")
issue = process_config_yml_component(issue)
newDeposit(issue, out)
}
## create all DOI entries for initial upload
create_doi_xmls <- function(outdir="/tmp/DOI_XML/", config="../rjournal.github.io/_config.yml") {
cfg = yaml.load_file(config)
## identify volume, volnum
dir.create(outdir)
sapply(cfg$issues, create_doi_from_issue, outdir=outdir)
}
|
80781d4d5f7cc446fb0d6ef249b4ebd5473d5145
|
1d85ea0fd495bbb892175f20676ae38f61baa475
|
/man/miucs2latlon.Rd
|
678f8c89470718f18173400d1fe5291f268c95b6
|
[] |
no_license
|
steingod/R-mipolsat
|
e6a3ddedd31f0eaf26f6f56bb5b30219cc63968a
|
a19c0c34557cb81faa4f9297c44413af8e59488b
|
refs/heads/master
| 2021-01-19T20:29:57.560832
| 2013-05-28T20:33:58
| 2013-05-28T20:33:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,067
|
rd
|
miucs2latlon.Rd
|
\name{miucs2latlon}
\alias{miucs2latlon}
\title{Convert between UCS and geographical coordinates}
\description{
Coordinates within the Cartesian User Coordinate System, northings and
eastings are converted to geographical latitude and longitude using a
spherical Earth and an Earth radius of 6371. km. This is a very crude
approximation, for better accuracy and support of other Earth models use
e.g. PROJ.4 software (\url{http://www.remotesensing.org/proj}).
}
\usage{
miucs2latlon(northings, eastings)
}
\arguments{
\item{northings}{distance to the line through the North Pole and -180
- 180 degrees longitude in kilometers, negative southwards in the Atlantic}
\item{eastings}{distance from the Greenwich meridian in kilometers,
positive eastwards}
}
\value{Returns a \link{data.frame} with latitude (lat) and longitude (lon)
and metadata.
}
\seealso{\link{milatlon2ucs}}
\author{
Øystein Godøy (\email{o.godoy@met.no})
}
\note{
}
\examples{
geopos <- miucs2latlon(northings,eastings)
geopos$lat
geopos$lon
}
\keyword{}
|
0917232e140230269bf04a2d583d7b3b3892da1a
|
6902bceeb0dfdf1a0fe7fe2da9c92b9bf3bf1a65
|
/man/phnorm.Rd
|
680e7d0b9e905a5d4cc07d30445437d43e3db34c
|
[] |
no_license
|
cran/publipha
|
af504bc1af5bb3e7321dc7b32f03cfbf0a6f74fa
|
934b3c701d87a4eae80f92ec736c569dc3f92275
|
refs/heads/master
| 2023-04-15T04:50:57.508020
| 2023-04-04T15:10:06
| 2023-04-04T15:10:06
| 236,876,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,813
|
rd
|
phnorm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/densities-phnorm.R
\name{phnorm}
\alias{phnorm}
\alias{dphnorm}
\alias{rphnorm}
\alias{pphnorm}
\title{p-hacking Meta-analysis Model}
\usage{
dphnorm(x, theta, sigma, alpha = c(0, 0.025, 0.05, 1), eta, log = FALSE)
rphnorm(n, theta, sigma, alpha = c(0, 0.025, 0.05, 1), eta)
pphnorm(
q,
theta,
sigma,
alpha = c(0, 0.025, 0.05, 1),
eta,
lower.tail = TRUE,
log.p = FALSE
)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{theta}{vector of means.}
\item{sigma}{vector of study standard deviations.}
\item{alpha}{vector of thresholds for p-hacking.}
\item{eta}{vector of p-hacking probabilities, normalized to sum to 1.}
\item{log, log.p}{logical; If \code{TRUE}, probabilities are given as
\code{log(p)}.}
\item{n}{number of observations. If \code{length(n) > 1}, the length is taken
to be the number required.}
\item{lower.tail}{logical; If \code{TRUE} (default), the probabilities are
\eqn{P[X\leq x]} otherwise, \eqn{P[X\geq x]}.}
}
\value{
\code{dphnorm} gives the density, \code{pphnorm} gives the distribution
function, and \code{rphnorm} generates random deviates.
}
\description{
Density, distribution, and random variate generation for the p-hacking meta-
analysis model.
}
\details{
These functions assume one-sided selection on the effects. \code{alpha} contains
the selection thresholds and \code{eta} the vector of \emph{p}-hacking
probabilities. \code{theta} is the true effect, while \code{sigma} is the true
standard deviation before selection.
}
\examples{
rphnorm(100, theta = 0, sigma = 0.1, eta = c(1, 0.5, 0.1))
}
\references{
Moss, Jonas and De Bin, Riccardo. "Modelling publication
bias and p-hacking" Forthcoming (2019)
}
|
b7e087a4498360d6eb50a54fdc4ae1631ec00460
|
0f93f5aa525b40ad2fb69891f0edb510d2d771c4
|
/maricultura-app/scripts/footer.R
|
c7cb00425644cb4809bd6e528e9242ee10917d79
|
[] |
no_license
|
sandrafogg/maricultura-app_practice
|
e67d133f6f6e7f2c6f8145308ff71a14a31ded4c
|
17946efc461738945838546ed178a039251ef042
|
refs/heads/master
| 2023-03-15T09:36:20.336838
| 2020-10-13T00:29:12
| 2020-10-13T00:29:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
footer.R
|
footer <- div( style = "background-color: #c5dbeb; padding: 15px; text-align: center;",
tags$footer("Developed with",
tags$span(style = "font-family: 'Source Code Pro', monospace; font-size: 25px;" ,"Shiny"),
"from",
img(src = "https://rstudio.com/wp-content/uploads/2018/10/RStudio-Logo-Flat.png", height = "30px"),
".",
br(),
"R version 3.6.1 (2019-07-05). Code on ", tags$a(href ="https://github.com/maricultura/maricultura-app", target="_blank", icon("github"),"GitHub."))
)
|
f5eb05357c99f0118061f757d04c278e674593b5
|
d843965659ffab99d2dfcf923da8de0450958c8b
|
/Predictive_Modeling/7_NonlinearRegressionModels/7_Answers3.R
|
0930ef43848bbe3c4bf3a00be36627f292f07168
|
[] |
no_license
|
LataniaReece/R_Scripts
|
160d4c8424fb1797a147952cbd85b5f7fd73b8e2
|
44dba10b69db94e82273fc9f52834ac11f283edd
|
refs/heads/master
| 2022-02-01T05:29:20.226590
| 2019-04-29T23:15:40
| 2019-04-29T23:15:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,882
|
r
|
7_Answers3.R
|
#7.3 Answers
library(caret)
set.seed(0)
data(tecator)
fat = endpoints[,2] # what we want to predict
absorp = data.frame(absorp)
zero_cols = nearZeroVar( absorp ) # look for highly correlated predictors ... none found ...
zero_cols
# Split this data into training and testing sets:
#
training = createDataPartition( fat, p=0.8 )
absorp_training = absorp[training$Resample1,]
fat_training = fat[training$Resample1]
absorp_testing = absorp[-training$Resample1,]
fat_testing = fat[-training$Resample1]
# Build various nonlinear models and then compare performance:
#
# Note we use the default "trainControl" of bootstrap evaluations for each of the models below:
#
preProc_Arguments = c("center","scale")
#preProc_Arguments = c("center","scale","pca")
# A K-NN model:
#
set.seed(0)
knnModel = train(x=absorp_training, y=fat_training, method="knn", preProc=preProc_Arguments, tuneLength=10)
# predict on training/testing sets
knnPred = predict(knnModel, newdata=absorp_training)
knnPR = postResample(pred=knnPred, obs=fat_training)
rmses_training = c(knnPR[1])
r2s_training = c(knnPR[2])
methods = c("KNN")
knnPred = predict(knnModel, newdata=absorp_testing)
knnPR = postResample(pred=knnPred, obs=fat_testing)
rmses_testing = c(knnPR[1])
r2s_testing = c(knnPR[2])
# A Neural Network model:
#
set.seed(0)
nnetModel = train(x=absorp_training, y=fat_training, method="nnet", preProc=preProc_Arguments,
linout=TRUE,trace=FALSE,MaxNWts=10 * (ncol(absorp_training)+1) + 10 + 1, maxit=500)
saveRDS(nnetModel, 'NN_ans_7.3.rds')
nnetPred = predict(nnetModel, newdata=absorp_training)
nnetPR = postResample(pred=nnetPred, obs=fat_training)
rmses_training = c(rmses_training,nnetPR[1])
r2s_training = c(r2s_training,nnetPR[2])
methods = c(methods,"NN")
nnetPred = predict(nnetModel, newdata=absorp_testing)
nnetPR = postResample(pred=nnetPred, obs=fat_testing)
rmses_testing = c(rmses_testing,nnetPR[1])
r2s_testing = c(r2s_testing,nnetPR[2])
# Averaged Neural Network models:
#
set.seed(0)
avNNetModel = train(x=absorp_training, y=fat_training, method="avNNet", preProc=preProc_Arguments,
linout=TRUE,trace=FALSE,MaxNWts=10 * (ncol(absorp_training)+1) + 10 + 1, maxit=500)
saveRDS(avNNetModel, 'avNN_ans_7.3.rds')
avNNetPred = predict(avNNetModel, newdata=absorp_training)
avNNetPR = postResample(pred=avNNetPred, obs=fat_training)
rmses_training = c(rmses_training,avNNetPR[1])
r2s_training = c(r2s_training,avNNetPR[2])
methods = c(methods,"AvgNN")
avNNetPred = predict(avNNetModel, newdata=absorp_testing)
avNNetPR = postResample(pred=avNNetPred, obs=fat_testing)
rmses_testing = c(rmses_testing,avNNetPR[1])
r2s_testing = c(r2s_testing,avNNetPR[2])
# MARS model:
#
marsGrid = expand.grid(.degree=1:2, .nprune=2:38)
set.seed(0)
marsModel = train(x=absorp_training, y=fat_training, method="earth",
preProc=preProc_Arguments, tuneGrid=marsGrid)
saveRDS(marsModel, 'marsModel_ans_7.3.rds')
marsPred = predict(marsModel, newdata=absorp_training)
marsPR = postResample(pred=marsPred, obs=fat_training)
rmses_training = c(rmses_training,marsPR[1])
r2s_training = c(r2s_training,marsPR[2])
methods = c(methods,"MARS")
marsPred = predict(marsModel, newdata=absorp_testing)
marsPR = postResample(pred=marsPred, obs=fat_testing)
rmses_testing = c(rmses_testing,marsPR[1])
r2s_testing = c(r2s_testing,marsPR[2])
# Lets see what variables are most important in the MARS model:
varImp(marsModel)
# A Support Vector Machine (SVM):
#
set.seed(0)
svmModel = train(x=absorp_training, y=fat_training, method="svmRadial", preProc=preProc_Arguments,
tuneLength=20)
svmPred = predict(svmModel, newdata=absorp_training)
svmPR = postResample(pred=svmPred, obs=fat_training)
rmses_training = c(rmses_training,svmPR[1])
r2s_training = c(r2s_training,svmPR[2])
methods = c(methods,"SVM")
svmPred = predict(svmModel, newdata=absorp_testing)
svmPR = postResample(pred=svmPred, obs=fat_testing)
rmses_testing = c(rmses_testing,svmPR[1])
r2s_testing = c(r2s_testing,svmPR[2])
# Package the results up:
#
res_training = data.frame( rmse=rmses_training, r2=r2s_training )
rownames(res_training) = methods
training_order = order( -res_training$rmse )
res_training = res_training[ training_order, ] # Order the dataframe so that the best results are at the bottom:
print( "Final Training Results" )
print( res_training )
res_testing = data.frame( rmse=rmses_testing, r2=r2s_testing )
rownames(res_testing) = methods
res_testing = res_testing[ training_order, ] # Order the dataframe so that the best results for the training set are at the bottom:
print( "Final Testing Results" )
print( res_testing )
# EPage 82
resamp = resamples( list(knn=knnModel,svm=svmModel,mars=marsModel,nnet=nnetModel,avnnet=avNNetModel) )
print( summary(resamp) )
dotplot( resamp, metric="RMSE" )
print( summary(diff(resamp)) )
|
39e169953044b0a9023af09700e9d48dff46f0e6
|
afbcc28818986180ecfd94465071511940365ee5
|
/init.r
|
7ff71aed8ec2db3f1906e4aed4209f635fa1b686
|
[
"MIT"
] |
permissive
|
matthewgthomas/witch-stats
|
50784cf739f4f9623f113821f6a754cf18a81cad
|
2d32f9d64ec420da12a1b75c9823c6c85da03aad
|
refs/heads/master
| 2021-05-15T20:42:44.336077
| 2018-01-18T21:18:26
| 2018-01-18T21:18:26
| 107,792,387
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 668
|
r
|
init.r
|
##
## load Mosuo data
##
library(tidyverse)
source("init plots.r")
# where stuff is
data.dir = "data"
plots.dir = "plots"
models.dir = "models"
results.dir = "results"
# create folders if needed (except data folder: if that's missing, you can't do much more here)
if (!dir.exists(plots.dir)) # plots
dir.create(plots.dir)
if (!dir.exists(models.dir)) # models
dir.create(models.dir)
if (!dir.exists(results.dir)) # results
dir.create(results.dir)
load(file.path(data.dir, "mosuo.rdata"))
# split people into kids and adults
adult.age = 15
mosuo.children = subset(mosuo.people, Age < adult.age)
mosuo.adults = subset(mosuo.people, Age >= adult.age)
|
d0fd0e5b54a90df9794ee037980f3c3c6e4dc503
|
642015754f6334c883d87fc35d4ab83e3e1f2f5c
|
/metacell/scrdb/R/reduce.r
|
dd4f892bd47c7c865334822ff3df1198d79bc043
|
[] |
no_license
|
tanaylab/hematopoiesis2018
|
343d226e56842e0ccac088c9ced77a0e4a843e7e
|
5b83d8e5addbc827a39b85d12bdc5d03d5e97673
|
refs/heads/master
| 2020-07-17T14:08:29.587073
| 2018-06-19T07:18:38
| 2018-06-19T07:18:38
| 206,033,624
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
reduce.r
|
#' Dimensionality reduction for gene expression
#'
#' Reduce a single cell matrix over a gene set, or through various dimensionality reduction strategies.
#'
#' @param Object .
#' @param type .
#' @param d_n .
#'
#' @export
#'
setGeneric("tgscm_reduce",
function(.Object, type, d_n, ...) stnadrdGeneric("tgscm_reduce"))
setMethod("tgscm_reduce",
signature = "tgScMat",
definition =
function(.Object, type, d_n, ...) {
if(type == "on_genes") {
.tgscm_reduce_on_genes(.Object, d_n, ...)
} else if(type == "pca") {
.tgscm_reudce_pca(.Object, d_n, ...)
}
}
)
.tgscm_reduce_on_genes = function(scmat, d_n, genes)
{
if(length(setdiff(genes, rownames(scmat))) != 0) {
ndiff = length(setdiff(genes, rownames(scmat)))
message(ndiff, " genes out of ", length(genes), " missing in projection")
return(NA)
}
tgScMat(scmat[genes,], stat_type=scmat@stat_type)
}
.tgscm_reduce_pca = function(scmat, d_n)
{
#TBA
}
|
62a6d4736c236e3e04e4c79cfe1885bfa5067cab
|
154454a26186cc9f89245a9bd25295796999ae85
|
/04-26-2019.R
|
7c32cab846c8f836a24dce123dad536e73f5e4ea
|
[] |
no_license
|
ryanpeiffer/538_riddler
|
aeef9fba008b5ea97a555720deb447568e4497f8
|
368b3b8619ff936531a51695bf50a0985c53c83d
|
refs/heads/master
| 2023-09-02T17:30:32.793716
| 2023-08-18T15:32:43
| 2023-08-18T15:32:43
| 178,730,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
04-26-2019.R
|
# 538 Riddler - 04/26/2019
# Thanos, the all-powerful supervillain, can snap his fingers and destroy half of all the beings in the universe.
#
# But what if there were 63 Thanoses, each snapping his fingers one after the other?
# Out of 7.5 billion people on Earth, how many can we expect would survive?
#
# If there were N Thanoses, what would the survival fraction be?
population <- 7.5e9
n_thanos <- 63
prob_survival <- 0.5
#simplest answer: Thanos snaps only affect humans
remaining_pop1 <- floor(population * prob_survival^n_thanos)
#answer = 0
#slightly more nuanced answer: each Thanos snap destroys exactly 50% of all humans and exactly 50% of all thanii
snap_yo_finger <- function(population, n_thanos) {
pop <- population
thanos_count <- n_thanos
while(thanos_count > 0) {
thanos_count <- thanos_count - 1 #reduce thanos count by 1 since this one has now snapped
pop <- floor(pop * prob_survival)
thanos_count <- floor(thanos_count * prob_survival) #half of the unsnapped thanii also get obliterated
}
return(pop)
}
remaining_pop2 <- snap_yo_finger(population, n_thanos)
#answer = 117,187,500 ... a much nicer situation than the first!
#ultimate nuance: assume the universe of all beings is humans+thanii. Randomly simulate whether each living Thanos is
#destroyed by a given snap. Theoretically this means the number of humans shouldn't get cut exactly in half, but I'm
#going to keep it simple. You could also go down a rabbit hole of saying the # beings in the universe is likely infinite,
#so you should really simulate whether each human falls into the destroyed half.... but that's too much for me.
|
ee15bc42b8e8cbe1adf35662bd2aef7523fafa4c
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/base/db/R/derive.trait.R
|
7abc5b9cc6428fe3de720093c2a75f8554ccb4ca
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| false
| 2,450
|
r
|
derive.trait.R
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------------#
##'
##' Performs an arithmetic function, FUN, over a series of traits and returns
##' the result as a derived trait.
##' Traits must be specified as either lists or single row data frames,
##' and must be either single data points or normally distributed.
##' In the event one or more input traits are normally distributed,
##' the resulting distribution is approximated by numerical simulation.
##' The output trait is effectively a copy of the first input trait with
##' modified mean, stat, and n.
##'
##' @name derive.trait
##' @title Performs an arithmetic function, FUN, over a series of traits and returns the result as a derived trait.
##' @param FUN arithmetic function
##' @param ... traits that will be supplied to FUN as input
##' @param input list of trait inputs. See examples
##' @param var.name name to use in output
##' @param sample.size number of random samples generated by rnorm for normally distributed trait input
##' @return a copy of the first input trait with mean, stat, and n reflecting the derived trait
##' @export
##' @examples
##' input <- list(x = data.frame(mean = 1, stat = 1, n = 1))
##' derive.trait(FUN = identity, input = input, var.name = 'x')
derive.trait <- function(FUN, ..., input = list(...), var.name = NA, sample.size = 10^6){
if(any(lapply(input, nrow) > 1)){
return(NULL)
}
input.samples <- lapply(input, take.samples, sample.size=sample.size)
output.samples <- do.call(FUN, input.samples)
output <- input[[1]]
output$mean <- mean(output.samples)
output$stat <- ifelse(length(output.samples) > 1, stats::sd(output.samples), NA)
output$n <- min(sapply(input, function(trait){trait$n}))
output$vname <- ifelse(is.na(var.name), output$vname, var.name)
return(output)
}
##==================================================================================================#
|
ea5fb5a607e49d2a0c3824d4c30b0e96ae6ee7af
|
0392e3ed22ce0e014c04634f2adb018325c0025b
|
/man/pipe.Rd
|
e0771bbd3503cdaf31661086c90d496c1866adee
|
[] |
no_license
|
swang87/fc
|
cff78468ac1cdb1ed2922f94a29fd2e4eb682c92
|
f031539b06b26016560f47303ebef28dec90808d
|
refs/heads/master
| 2021-03-27T14:53:00.290061
| 2019-10-20T22:53:12
| 2019-10-20T22:53:12
| 114,667,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,039
|
rd
|
pipe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.r
\name{\%>\%}
\alias{\%>\%}
\title{Forward-pipe Operator Using Standard Evaluation}
\usage{
lhs \%>\% rhs
}
\arguments{
\item{lhs}{the function that will be applied second to an input.}
\item{rhs}{the function that will be applied first to an input.}
}
\value{
The composed function lhs(rhs(x)).
}
\description{
The forward pipe operator behaves similar to the magrittr
pipe operator with two exceptions. First, it only supports standard
evaluation. If modified parameter values are needed
then the 'fc' function should be used. Second, it composes functions.
The return type of this operator is an R function.
}
\examples{
# Create a new code block in case the pipe operator is already
# defined.
{
# Make sure the example uses the correct pipe operator.
`\%>\%` <- fc::`\%>\%`
# Create a function that gets the 9th and 10th objects using the head
# and tail functions.
nine_and_ten <- fc(head, n=10) \%>\% fc(tail, n=2)
nine_and_ten(iris)
}
}
|
90982b28b5a5e6b7b13c0b85739653cba71d7705
|
4e867cca2e7e628d9966b4de1d19bc1a46014b6f
|
/Rscripts/2_cometh_sensitivity_specificity_10-30-2018.R
|
2414ee74bdfd4e327ece8e0a12d5302f658028eb
|
[] |
no_license
|
lissettegomez/coMethDMRPaper
|
34657bd51a027b3bca5cdf7d0b81be3938f8316c
|
912ea71c5f3cc8c4a606d1d639a9455da16b57c8
|
refs/heads/master
| 2020-04-26T19:15:43.013322
| 2019-06-07T17:52:25
| 2019-06-07T17:52:25
| 173,768,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,784
|
r
|
2_cometh_sensitivity_specificity_10-30-2018.R
|
source ("./functions/contiguous_regions_noCpGsortByLocation.R")
source ("./functions/ComethCpgEval_SeSp.R")
source ("./functions/CpGsInRegion.R")
library(psych)
library(bumphunter)
library(coMethDMR)
# compute sensitivity and specificity for cometh selected cpg probes
locationFile <- readRDS ("./data/cpg.locations.RDS")
minCorr_vec <- c(0.5, 0.8)
ncpgs_vec <- c(3, 5, 8)
nrep <- 10
r.drop <- c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,0.9)
results_all <- data.frame (matrix(nrow = 0, ncol = 9))
for (c in 1:2){
for (n in 1:3){
c1 <- minCorr_vec[c]
c2 <- ifelse (c1 == 0.5, 0.8, 1)
for (fold in 1:2){
for (rep in 1:nrep){
########## true positive cpgs in regions
corr <- c1*10
tp_lsdf <- readRDS (paste0("./data/DATA_simulated/RegionPheno_C",
corr, "_ncpg", ncpgs_vec[n], "_rep", rep, ".RDS") )
tp2_lsdf <- lapply (tp_lsdf, function(item)
t(subset (item, select = -c(means, age))) )
trueCpgs_lsvec <- lapply (tp2_lsdf, function(item) rownames (item))
########## all cpgs in the regions
temp <- readRDS (paste0("./data/DATA_simulated/RegionPheno_C",
corr, "_ncpg", ncpgs_vec[n], "_rep", rep, "_fold",fold, ".RDS"))
# take out means and age
temp2 <- lapply (temp, function(item)
t(subset (item, select = -c(means, age))) )
allCpgs_lsvec <- lapply (temp2, function (item) rownames (item) )
######### cpgs in coMeth regions
for (r in 1:9) {
rdrop <- 10*r.drop[r]
markRegions_lsdf <- lapply (temp2, MarkComethylatedCpGs,
rDropThresh_num = r.drop[r])
contRegions_lsdf <- lapply (markRegions_lsdf, FindComethylatedRegions, minCpGs_int = 3)
# test <- betaMatrix_ex4
# marks_df <- MarkComethylatedCpGs(t(test), rDropThresh_num = 0.6)
# FindComethylatedRegions(marks_df)
if(length(contRegions_lsdf) > 0) {
predCpgs_lsvec <- lapply(contRegions_lsdf, CpgsInRegion)
########## compute precision and recall for each simulation dataset
result_lsdf <- mapply (ComethCpgEval_SeSp, allCpgs_lsvec,
trueCpgs_lsvec,
predCpgs_lsvec, SIMPLIFY = FALSE)
results_df <- do.call (rbind, result_lsdf)
##### add region names
strings <- strsplit(row.names(results_df), '\\.')
first <- do.call (rbind, lapply (strings, function (x) x[[1]]))
results_df$region <- first
######### output
write.csv (results_df, paste0("results/resultsCoMeth_C", corr, "_ncpg", ncpgs_vec[n],
"_rep", rep, "_fold",fold, "_rdrop_", rdrop,".csv"), row.names = FALSE )
results2_df <- unique(subset(results_df, select = c(region, sensitivity, specificity)))
results2_df$c1 <- c1
results2_df$c2 <- c2
results2_df$ncpgs <- ncpgs_vec[n]
results2_df$rep <- rep
results2_df$fold <- fold
results2_df$rdrop <- rdrop
results_all <- rbind (results_all, results2_df)
}
}
}
}
}
}
|
e09d5cea5c9626149d220938736b5ed5a21f5c2e
|
665c32727f3920aaaa8e2535f66d8df09d55944a
|
/inst/doc/multivariate.R
|
d05d23f676c6ebabfce11b3b214b940c24e4bef4
|
[] |
no_license
|
cran/survivalAnalysis
|
5dd6ad5938641467b41655c955416422ca078d7a
|
ea726a3265120f159a15cede3191b892bc79bf73
|
refs/heads/master
| 2022-02-23T03:34:21.388556
| 2022-02-11T13:00:02
| 2022-02-11T13:00:02
| 147,193,710
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,538
|
r
|
multivariate.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo=FALSE, results='asis'----------------------------------------------
knitr::kable(head(survival::lung, 10))
## ---- warning=FALSE, message=FALSE--------------------------------------------
library(tidyverse)
library(tidytidbits)
library(survivalAnalysis)
## -----------------------------------------------------------------------------
covariate_names <- c(age="Age at Dx",
sex="Sex",
ph.ecog="ECOG Status",
wt.loss="Weight Loss (6 mo.)",
`sex:female`="Female",
`ph.ecog:0`="ECOG 0",
`ph.ecog:1`="ECOG 1",
`ph.ecog:2`="ECOG 2",
`ph.ecog:3`="ECOG 3")
survival::lung %>%
mutate(sex=rename_factor(sex, `1` = "male", `2` = "female")) %>%
analyse_multivariate(vars(time, status),
covariates = vars(age, sex, ph.ecog, wt.loss),
covariate_name_dict = covariate_names) ->
result
print(result)
## -----------------------------------------------------------------------------
survival::lung %>%
mutate(sex=rename_factor(sex, `1` = "male", `2` = "female"),
ph.ecog = as.factor(ph.ecog)) %>%
analyse_multivariate(vars(time, status),
covariates = vars(sex, ph.ecog),
covariate_name_dict=covariate_names,
reference_level_dict=c(ph.ecog="0"))
## -----------------------------------------------------------------------------
exp((75-45)*log(1.04))
## ---- fig.width=8, fig.height=5-----------------------------------------------
forest_plot(result)
## ---- fig.width=9.7, fig.height=1.7-------------------------------------------
forest_plot(result,
factor_labeller = covariate_names,
endpoint_labeller = c(time="OS"),
orderer = ~order(HR),
labels_displayed = c("endpoint", "factor", "n"),
ggtheme = ggplot2::theme_bw(base_size = 10),
relative_widths = c(1, 1.5, 1),
HR_x_breaks = c(0.25, 0.5, 0.75, 1, 1.5, 2))
## ---- fig.width=9.7, fig.height=1.7-------------------------------------------
df <- survival::lung %>% mutate(sex=rename_factor(sex, `1` = "male", `2` = "female"))
map(vars(age, sex, ph.ecog, wt.loss), function(by)
{
analyse_multivariate(df,
vars(time, status),
covariates = list(by), # covariates expects a list
covariate_name_dict = covariate_names)
}) %>%
forest_plot(factor_labeller = covariate_names,
endpoint_labeller = c(time="OS"),
orderer = ~order(HR),
labels_displayed = c("endpoint", "factor", "n"),
ggtheme = ggplot2::theme_bw(base_size = 10))
## ---- fig.width=9.7, fig.height=1.7-------------------------------------------
survival::lung %>%
mutate(kras=sample(c("WT", "G12C", "G12V", "G12D", "G12A"),
nrow(.),
replace = TRUE,
prob = c(0.6, 0.24, 0.16, 0.06, 0.04))
) %>%
analyse_survival(vars(time, status), by=kras) %>%
forest_plot(use_one_hot=TRUE,
endpoint_labeller = c(time="OS"),
orderer = ~order(HR),
labels_displayed = c("endpoint", "factor", "n"),
ggtheme = ggplot2::theme_bw(base_size = 10))
|
11a9755fd6d47f38c64474dd9d6346692e27163b
|
76cb61dc6f0d84ac84627a2beecdb3cc6f4a851f
|
/r_scripts/join_sentiments.R
|
3e4f016fce3bd53c361ecaa51b9b498f42cd2ed0
|
[] |
no_license
|
serrat839/INFO-474-group-six-lol
|
8b01a99c5c9fbc89b8ec5b9b4d9753084e987652
|
1f2af9822b70d8641e17ee838de0944b18f3940f
|
refs/heads/main
| 2023-01-10T09:06:30.524112
| 2020-11-17T01:50:22
| 2020-11-17T01:50:22
| 308,190,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 734
|
r
|
join_sentiments.R
|
library(tidytext)
library(stringr)
library(dplyr)
song_data <- read.csv(paste0("./clean_data/album_song_lyric_data.csv"))
lyrics <- song_data %>%
select(album_ids, album, artist, track_id, lyrics)
lyric_tokens <- lyrics %>%
unnest_tokens(word, lyrics)
data(stop_words)
write.csv(stop_words, "./clean_data/stop_words.csv")
# join the sentiment sets
bing_add <- lyric_tokens %>%
left_join(get_sentiments("bing")) %>%
rename(bing_sentiment = sentiment)
afinn_add <- bing_add %>%
left_join(get_sentiments("afinn")) %>%
rename(bing_value = value)
nrc_add <- afinn_add %>%
left_join(get_sentiments("nrc")) %>%
rename(nrc_sentiment = sentiment)
write.csv(nrc_add, "./clean_data/lyric_token_sentiment.csv")
|
e3790d571c825247ccbb7309af32bc48e1561942
|
93ea3056aeca80141c2380d732c520391c8aa1ea
|
/BMAPack/R/summaryBMA.R
|
49b38b43515679b5db6458e97298f264c5a7f89e
|
[] |
no_license
|
karlosmantilla/midterm
|
22e0fab6480c6dc3c7a17845ef2434df7a032bd1
|
b949ae1988b676d59874030d8ffdd2144d64a7d1
|
refs/heads/master
| 2020-12-11T07:20:50.143098
| 2014-03-21T05:32:51
| 2014-03-21T05:32:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,879
|
r
|
summaryBMA.R
|
#' Provides descriptive summary statistics for either the posterior expected values or the posterior probability that coefficients are non-zero
#'
#' Provides summary statistics for posterior expected values or posterior probability that coefficients are non-zero
#'
#' @param M a matrix of observations for the covariates.
#' @param Y a numeric vector whose length is equal to the number of rows of \code{M}.
#' @param gprior a user specified prior.
#' @param whichsumm a user specifed argument for which summary to produce, expected values ("Expected") or non-zero probability ("Non-Zero").
#'
#' @return An object of class list with elements
#' \item{PosteriorValues}{If whichsumm specified as "Expected", posterior expected value of each coefficient.}
#' \item{SummaryPosteriorValues}{If whichsumm specified as "Expected", summary statistics for expected values.}
#' \item{PosteriorNonZero}{If whichsumm specified as "Non-Zero", posterior probability that the coefficient is non-zero.}
#' \item{SummaryPosteriorNonZero}{If whichsumm specified as "Non-Zero", summary statistics for posterior non-zero probabilities.}
#' @author Dino Hadzic \email{dino.hadzic@@wustl.edu}
#' @examples
#'
#' M <- matrix(rnorm(30), ncol=3)
#' Y <- c(rnorm(10))
#' summaryBMA(M, Y, gprior=3, whichsumm="Expected")
#' @seealso \code{\link{fitBMA}}
#' @rdname summaryBMA
#' @aliases summaryBMA, ANY-method
#' @export
summaryBMA <- function(M, Y, gprior, whichsumm){ #Function takes as arguments matrix of observations M, vector of
#outcomes Y, user specified gprior, and whichsumm. The argument whichsumm can be specified as either "Expected",
#in which case it will provide summary statistics for posterior expected values for the coefficients, or "Non-Zero",
#in which case it will provide summary statistics for posterior non-zero probabilities for the coefficients.
BMAOutput <- fitBMA(M, Y, gprior) #First run fitBMA, and store output as BMAOutput.
#The if statement below states that when whichsumm is specified as "Expected", the function will output summary
#satistics for posterior expected values for the coefficients. The output is a list.
if(whichsumm == "Expected"){
PosteriorValues <- BMAOutput$PosteriorValues
SummaryPosteriorValues <- summary(BMAOutput$PosteriorValues)
return(list("PosteriorValues"=PosteriorValues, "SummaryPosteriorValues"=SummaryPosteriorValues))
}
#The if statement below states that when whichsumm is specified as "Non-Zero", the function will output summary
#satistics for posterior probabilities that coefficients are non-zero. The output is a list.
if(whichsumm == "Non-Zero"){
PosteriorNonZero <- BMAOutput$PosteriorNonZero
SummaryPosteriorNonZero <- summary(BMAOutput$PosteriorNonZero)
return(list("PosteriorNonZero"=PosteriorNonZero, "SummaryPosteriorNonZero"=SummaryPosteriorNonZero))
}
}
|
ad3d34f85fc2b0bb1d1ca5e9a97fae3d2c04670b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/genpathmox/R/class5_node.reg.R
|
28022775ca83e61835597a84fc5f8c98a5953248
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
class5_node.reg.R
|
#' node.reg class
#'
#' info.pls is a S4 class that contains element of the node class
#'
#' @name node.reg_class
#' @rdname node.reg_class
setClass("node.reg",representation(
id = "numeric",
elements = "numeric",
father = "numeric",
childs = "numeric",
info = "info.reg"
))
|
f33ba8d422e5a4b4c837bb79f0ce508793b08803
|
b4853df8052fea8a830cef05805fdbb5afe2f6f9
|
/R/statistics.R
|
38348838e82f4fab674b28890033058b9798cfb9
|
[
"Apache-2.0"
] |
permissive
|
SilentSpringInstitute/RStateCancerProfiles
|
a18b61c15f593fecd33fd977b93025084164b03e
|
64edbc7cad3e2be028389e2006e2314f37804753
|
refs/heads/master
| 2020-06-10T22:36:52.409978
| 2017-01-03T22:33:11
| 2017-01-03T22:56:10
| 75,855,593
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,006
|
r
|
statistics.R
|
query_args <- function(l) {
paste(names(l), l, sep="=", collapse = "&")
}
#' @import maps
statistic_form_values <- function(by, state, cancer, race, sex, age) {
ret <- list()
if(by == "county" && (is.null(state) || state == "all")) {
ret$state = "99"
}
else if(by == "county" && !is.null(state)) {
stopifnot(state %in% tolower(maps::state.fips$abb))
ret$state = maps::state.fips$fips[tolower(maps::state.fips$abb) == state][1]
}
else if(by == "state") {
ret$state = "00"
}
ret$cancer <- cancer_value(cancer)
ret$race <- race_value(race)
ret$sex <- sex_value(sex)
ret$age <- age_value(age)
return(ret)
}
incidence_table_url <- function(by, state, cancer, race, sex, age) {
vals <- statistic_form_values(by, state, cancer, race, sex, age)
args <- list(
stateFIPS = vals$state,
cancer = vals$cancer,
race = vals$race,
type = "incd",
sortVariableName = "rate",
sortOrder = "desc",
output = "1"
)
if(!(cancer %in% c("childhood (ages <20, all sites)", "childhood (ages <15, all sites)"))) {
args$age = vals$age
}
url <- paste0("https://statecancerprofiles.cancer.gov/incidencerates/index.php?", query_args(args))
return(url)
}
mortality_table_url <- function(by, state, cancer, race, sex, age) {
vals <- statistic_form_values(by, state, cancer, race, sex, age)
args <- paste0(c(vals$state, vals$cancer, vals$race, vals$sex, vals$age, "0", "1", "1", "6"), collapse = "&")
url <- paste0("https://statecancerprofiles.cancer.gov/cgi-bin/deathrates/data.pl/death.csv?", args)
return(url)
}
#' @importFrom utils download.file
download_csv <- function(url, dest = tempfile(), delete_rows = c(), skip = 0, ...) {
download.file(url, destfile = dest, mode="wb")
# readlines complains when downloading mortality data that the last line isn't complete
# so we manually add a final endline character to be safe
out <- file(dest, 'a')
write("\n", file = out, append = TRUE)
close(out)
raw <- readLines(dest)
# Pilcrows are used to indicate when data is not available.
# replace pilcrows with "p"s for easier text handling.
raw <- stringr::str_replace_all(raw, "\U00B6", "p")
if(length(delete_rows) > 0) {
raw <- raw[-delete_rows]
}
# Cut off the first lines
raw <- raw[skip:length(raw)]
# Find the first blank line, then cut off everything after
last_row <- min(which(raw == ""))
raw <- raw[1:last_row]
dat <- readr::read_csv(paste(raw, collapse="\n"), ...)
readr::write_csv(dat, dest)
return(dat)
}
#' Download cancer incidence or mortality data.
#'
#' @param statistic "incidence" or "mortality"
#' @param by "county" or "state"
#' @param state restrict data download to one state; specify using state abbreviation (e.g. "MA"). Leave as NULL to retrieve data for all states.
#' @param cancer download data for a specific cancer type, see details for available options
#' @param race download data for a specific race
#' @param sex download data for a specific gender
#' @param age download data for a specific age group
#' @details
#'
#' Cancers:
#' \enumerate{
#' \item all
#' \item bladder
#' \item brain & ONS
#' \item breast (female)
#' \item breast (female in situ)
#' \item cervix
#' \item colon & rectum
#' \item esophagus
#' \item kidney & renal pelvis
#' \item leukemia
#' \item liver & bile duct
#' \item lung & bronchus
#' \item non-hodgkin lymphoma
#' \item melanoma of the skin
#' \item oral cavity & pharynx
#' \item ovary
#' \item pancreas
#' \item prostate
#' \item stomach
#' \item thyroid
#' \item uterus"
#' }
#'
#' @examples
#' \dontrun{
#' # Download county level bladder cancer incidence data
#' dat <- cancer_statistics(statistic = "incidence", by = "county", cancer = "bladder")
#' }
#'
#' @export
cancer_statistics <- function(statistic = "incidence", by = "county", state = NULL, cancer = "all", race = "all", sex = "all", age = "all") {
statistic <- tolower(statistic)
by <- tolower(by)
cancer <- tolower(cancer)
race <- tolower(race)
sex <- tolower(sex)
age <- tolower(age)
if(!is.null(state)) { state <- tolower(state) }
if(cancer == "breast (female)") { sex = "females" }
if(statistic == "incidence") {
url <- incidence_table_url(by, state, cancer, race, sex, age)
skip = 10
delete_rows = c()
col_names <- c(
"county",
"fips",
"incidence_rate",
"incidence_rate_95_confint_lower",
"incidence_rate_95_confint_upper",
"average_annual_count",
"recent_trend_description",
"recent_trend",
"recent_trend_95_confint_lower",
"recent_trend_95_confint_upper"
)
value_columns = col_names[c(3:6, 8:10)]
}
else {
url <- mortality_table_url(by, state, cancer, race, sex, age)
skip = 13
value_column = "mortality_rate"
delete_rows <- c(13)
col_names <- c(
"county",
"fips",
"met_objective",
"mortality_rate",
"mortality_rate_95_confint_lower",
"mortality_rate_95_confint_upper",
"average_deaths_per_year",
"recent_trend",
"recent_5_year_trend",
"recent_trend_95_confint_lower",
"recent_trend_95_confint_upper"
)
value_columns = col_names[c(4:7, 9:11)]
}
dat <- download_csv(url,
delete_rows = delete_rows,
skip = skip,
col_names = col_names,
col_types = strrep("c", length(col_names)))
dat$statistic = statistic
dat$cancer = cancer
dat$sex = sex
dat$age = age
dat$race = race
for(value_column in value_columns) {
dat[[value_column]] <- stringr::str_replace_all(dat[[value_column]], "3 or fewer", "")
dat[[value_column]] <- stringr::str_replace_all(dat[[value_column]], "[p#* ,]", "")
dat[[value_column]][dat[[value_column]] == ""] <- NA
dat[[value_column]] <- as.numeric(dat[[value_column]])
}
dat$fips <- as.numeric(dat$fips)
return(dat)
}
|
a4a08740f928f6d4457c5aaf4d6c2cd0cf019ea3
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4279_15/rinput.R
|
38eeb1030c97477b48ce5b7495b788ae3783c5a1
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4279_15.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4279_15_unrooted.txt")
|
f071214cc345a5612b672c233db293297c11b3da
|
b8ee42e013830b212f437290aae9e871e040bcfb
|
/Source Code/R Part/DeepLearning.R
|
58b43773d9138e96b146c4fd64fc48895177ecaa
|
[] |
no_license
|
AlenUbuntu/handwritten-digit-recognition
|
3d89d4ae01140f372c54dbd20f675c567b618099
|
419f7fef48a224ece36ed7fa257fe9e645186baf
|
refs/heads/master
| 2016-09-14T09:26:00.811450
| 2016-05-12T22:16:56
| 2016-05-12T22:16:56
| 58,679,764
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
DeepLearning.R
|
install.packages("h2o")
library(h2o)
setwd("E:/Number Recognition")
#import training data
mnistPath <- 'E:/Number Recognition/train-small.csv'
mnist.hex <- h2o.importFile(path = mnistPath, destination_frame = "mnist.hex")
train <- as.data.frame(mnist.hex)
train$label <- as.factor(train$label)
train_h2o <- as.h2o(train)
#training
model <- h2o.deeplearning(x = 2:785,
y = 1,
training_frame = train_h2o,
activation = "RectifierWithDropout",
input_dropout_ratio = 0.2,
hidden_dropout_ratios = c(0.5,0.5),
balance_classes = TRUE,
hidden = c(800,800),
epochs = 500)
#import test data
testorigi <- read.table(file = "E:/Number Recognition/test-small.csv", header=TRUE, sep=",")
test_h2o <- h2o.importFile(path = 'E:/Number Recognition/test-small.csv', destination_frame = "test_h2o")
yhat <- h2o.predict(model, test_h2o)
ImageId <- as.numeric(seq(1,1000))
names(ImageId)[1] <- "ImageId"
predictions <- cbind(as.data.frame(ImageId),as.data.frame(yhat[,1]))
names(predictions)[2] <- "Label"
write.table(as.matrix(predictions), file="DNN_pred.csv", row.names=FALSE, sep=",")
accuracy = sum(predictions$Label==testorigi$label)/nrow(testorigi)
accuracy
|
3cb8b14a1b06e934c58ce9a512efa5f6079a6f57
|
87f16d2eb5d898c9eabe604b7beefaa824bdde2e
|
/pp.sst/scripts/03.side.05.univariate.rk.histo.R
|
cbb430f0eb7fa1e542d74e346754667bc5e312ea
|
[
"MIT"
] |
permissive
|
jasa-acs/Multivariate-Postprocessing-Methods-for-High-Dimensional-Seasonal-Weather-Forecasts
|
733cd54f189ecaee995f221023dfea7d41a85ff1
|
f27b25ac475128ddf3020d1c4e77fb513fbc2d46
|
refs/heads/master
| 2023-02-09T02:53:43.656133
| 2021-01-04T16:52:32
| 2021-01-04T16:52:32
| 325,870,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,524
|
r
|
03.side.05.univariate.rk.histo.R
|
#############################################################################
############# side script 3.5 - univariate rank histograms ################
#############################################################################
# This script generates univariate rank histogram plots (not shown in the paper).
##### setting up ######
rm(list = ls())
options(max.print = 1e3)
library(pp.sst)
name_abbr = "Full"
save_dir = file.path('~','SST','Derived', name_abbr)
load(file = paste0(save_dir,"setup.RData"))
###########
na_loc = which(DT[,is.na(SST_bar) | is.na(Ens_bar) ]) # land locations
# number of simulations and of breaks in the histogram: choose n = 10*k-1 and nbreaks = 10*k
n=99
nbreaks = 21
means = DT[-na_loc,][year %in% validation_years & month %in% months,SST_hat]
sds = DT[-na_loc,][year %in% validation_years & month %in% months,SD_hat]
obs = DT[-na_loc,][year %in% validation_years & month %in% months,SST_bar]
fcs = trc(rnorm(n*length(means),mean = means,sd = sds))
rh_mat = matrix(c(obs,fcs),ncol = n+1)
rank_mat = t(apply(rh_mat, MARGIN = 1, rank, ties = 'random'))
pdf(file=paste0(plot_dir,"rkh.pdf"))
hist(rank_mat[,1],
breaks=seq(0, n+1, length.out = nbreaks),
main = 'univariate rank histogram',
xlab = "", ylab = "", axes=FALSE, col="gray80", border="gray60")
abline(a=dim(rank_mat)[1]/(nbreaks-1), b=0, lty=2, col="gray30")
dev.off()
### time and save ###
time_s35 = proc.time() - time_s35
save.image(file = paste0(save_dir,"setup.RData"))
|
befa70452de3d93aa49610ae8b1c3c7aed11372e
|
3e2ef5d8d37072c54e4fd4bc5f09e797bcabbb17
|
/src/script_01_load_tidy_data.R
|
42d14bdff3587d884b7fbb70a1464333b6d1297a
|
[] |
no_license
|
UBC-MDS/DSCI_522_Income_Prediction
|
8a0b5f86f578afce1ad30b93ca5aa7a5c2dee617
|
4d7d0f278e98759bfe03d1e7099a03c9c4342a58
|
refs/heads/master
| 2020-04-07T03:30:16.974167
| 2018-12-09T00:32:09
| 2018-12-09T00:32:09
| 158,018,782
| 0
| 9
| null | 2018-12-09T00:32:10
| 2018-11-17T19:44:23
|
R
|
UTF-8
|
R
| false
| false
| 3,443
|
r
|
script_01_load_tidy_data.R
|
#! /usr/bin/env Rscript
# script_01_load_tidy_data.R
# Olivia Lin, Mani Kohli Nov 22 2018
# This script takes in the original data set and returns a cleaned data set for our project.
# This script should be run under the root folder.
# Usage: Rscript src/script_01_load_tidy_data.R arg1 arg2 arg3
# Description: arg1 = input data file, arg2 = output data file for visualization, arg3 = output data file for machine learning
# Example: Rscript src/script_01_load_tidy_data.R https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data data/tidy_data_viz.csv data/tidy_data_ml.csv
# import libraries/packages
library(tidyverse)
# parse/define command line arguments here
args <- commandArgs(trailingOnly = TRUE)
input_file <- args[1]
output_file_viz <- args[2]
output_file_ml <- args[3]
# data <- read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", col_names = FALSE)
data <- read_csv(input_file, col_names = FALSE)
#Data Wrangling
colnames(data) <- c("age", "workclass", "fnlwgt", "education", "educationNum", "married",
"occupation", "relationship", "race", "sex", "capitalGain", "capitalLoss",
"hrPerWeek", "nativeCountry", "income")
data_cleaned_viz <- data %>%
mutate(workclass=factor(workclass), education=factor(education), married=factor(married),
occupation=factor(occupation), relationship=factor(relationship),
race=factor(race), sex=factor(sex), nativeCountry=factor(nativeCountry),
income=factor(income)) %>%
select(-occupation, -relationship, -nativeCountry, -workclass, -fnlwgt) %>%
mutate(married=fct_collapse(data$married,
married = c("Married-AF-spouse", "Married-civ-spouse", "Married-spouse-absent"),
notMarried = c("Divorced", "Never-married", "Separated", "Widowed"))) %>%
mutate(education=fct_collapse(data$education,
nonHSgrad = c("10th", "11th", "12th", "1st-4th", "5th-6th", "7th-8th", "9th", "Preschool"),
HSgrad = "HS-grad",
someCollege = "Some-college",
assoc = c("Assoc-acdm", "Assoc-voc"),
bachelors = "Bachelors",
masters = "Masters",
profSchool = "Prof-school",
doctorate = "Doctorate"))
data_cleaned_viz <- data_cleaned_viz %>%
mutate(education=fct_relevel(data_cleaned_viz$education, "nonHSgrad", "HSgrad", "someCollege", "assoc", "bachelors", "masters", "profSchool"))
#' Note
#' For variable `married`, 1 means not married and 2 means married
#' For variable `education`, 1 means not high school graduate, 2 means high school graduate, 3 means some college,
#' 4 means associate, 5 means bachelor's degree, 6 means master's degree, 7 means prof school, and 8 means doctorate,.
#' For variable `race`, 1 means Amer-Indian-Eskimo, 2 means Asian-Pac-Islander, 3 means Black, 4 means Other, 5 means White.
#' For variable `sex`, 1 means female and 2 means male.
#' For variable `income`, 1 means <=50K and 2 means >50K.
data_cleaned_ml <- data_cleaned_viz %>%
mutate_if(is.factor, as.numeric)
#Write output file
write_csv(data_cleaned_viz, output_file_viz)
write_csv(data_cleaned_ml, output_file_ml)
|
63b6977e7ee65ce18e88445645b581d649855189
|
9e98d6c50c13b4f8baa09de949cf6092bb27ec9f
|
/煤炭/coal0807.r
|
dbd4f4b888d9b4fa0dd9f8de03d2b1e2b17fd33d
|
[] |
no_license
|
DanieljcFan/genial-flow-model
|
620e8d048a12db3ed6a6c5a1bbbe22a34efbbbf3
|
6d464c6c5ed50fb44cbeb101e4598b040d85dc69
|
refs/heads/master
| 2021-01-19T23:20:54.933071
| 2017-08-31T09:27:50
| 2017-08-31T09:27:50
| 101,262,811
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 9,805
|
r
|
coal0807.r
|
library(ggplot2)
library(corrplot)
setwd("I:/work/genial-flow/煤炭/")
coal <- read.csv('煤炭开采行业财务数据.csv')
coal$报告期 <- as.POSIXlt(coal$报告期, format='%Y/%m/%d')
coal$year <- as.numeric(format(coal$报告期, format='%Y'))
str(coal)
#report_clean 函数处理公司季报,计算各季度财务指标的变化。对于季报残缺不齐的用平均法补齐
#默认报告期为每季度末,输出后会将所有31号改为30号
#函数输入 name 公司名 time 报表日(POSIXlt,包含年月日) y 需计算的财务指标(numeric)
#函数输出 data.frame: name time y
report_clean <- function(name, time, y){
library(lubridate) # date management
if(class(time)[1] != 'POSIXlt') return('Wrong: required POSIXlt for time')
day(time) <- 30
name <- as.factor(name)
#make diff################
diff_name <- c(F, abs(diff(as.numeric(name))) < 0.5)
year <- as.numeric(format(time, format='%Y'))
diff_year <- c(F, diff(year) < 0.5)
index <- as.numeric(diff_name & diff_year)
dy <- c(y[1], diff(y))
y <- dy*index + y*(1-index)
#seasonal average################
dmonth <- c(as.numeric(format(time, format='%Y%m')[1]),
diff(as.numeric(format(time, format='%Y%m'))))
month <- as.numeric(format(time,format='%m'))
dseason <- dmonth/3*index + month*(1-index)/3
y <- rep(y/dseason, dseason)
name <- rep(name, dseason)
time <- rep(time, dseason)
seasonal_index <- rep(dseason, dseason)
index4 <- which(seasonal_index == 4)
index3 <- which(seasonal_index == 3)
index2 <- which(seasonal_index == 2)
index_1 <- c(index2[c(1:length(index2))%%2 == 1],
index3[c(1:length(index3))%%3 == 2],
index4[c(1:length(index4))%%4 == 3])
index_2 <- c(index3[c(1:length(index3))%%3 == 1],
index4[c(1:length(index4))%%4 == 2])
index_3 <- c(index4[c(1:length(index4))%%4 == 1])
time[index_1] <- time[index_1] - months(3)
time[index_2] <- time[index_2] - months(6)
time[index_3] <- time[index_3] - months(9)
de_y <- data.frame(name, time, y)
return(de_y)
}
netin <- report_clean(coal$公司名称, coal$报告期, coal$净利润)
# write.csv(netin, file = 'netin.csv')
#calculation about net income########
netin <- netin[order(netin$time),]
ave <- aggregate(netin$y, by=list(netin$time), mean)
total <- aggregate(netin$y, by=list(netin$time), sum)
netin <- merge(ave, total, by='Group.1')
names(netin) <- c('time', 'ave', 'sum')
# write.csv(merge(ave, total, by='Group.1'),file = '净利润.csv')
#show average###########
coal <- coal[order(coal$报告期),]
num <- aggregate(coal$净利润, by=list(coal$报告期), length)
ggplot(as.data.frame(ave),aes(Group.1,x)) +
geom_point() + labs(x='', title = '行业净利润平均值')
ggplot(as.data.frame(total),aes(Group.1,x)) +
geom_point() + labs(x='', title = '行业净利润总值')
#chain read in and clean########
chain <- read.csv('产业链相关因子汇总.csv', header = T)
chain$时间 <- as.POSIXlt(chain$时间, format='%Y/%m/%d')
for(i in 2:ncol(chain)){
chain[,i] <- as.numeric(as.character(chain[,i]))
}
#ave函数对数据集中的指标按月或季度平均
#默认时间标签在第一列,格式为POSIXlt,包含年月信息
Ave <- function(dat, t=1, type='season'){
library(zoo)
df <- list()
if(type == 'season'){time <- as.yearqtr(dat[,t])}
if(type == 'month'){time <- format(dat[,t], format='%Y-%m')}
dat <- cbind(dat[,t],dat[,-t])
for(i in 2: ncol(dat)){
df[[i-1]] <- aggregate(dat[,i],by=list(time),sum, na.rm = T)[,2] /
aggregate(dat[,i], by=list(time),
FUN = function(x) max(c(1,sum(abs(x) > 0.1, na.rm = T))))[,2]
}
df <- as.data.frame(matrix(unlist(df),nrow=length(df[[1]])))
df <- cbind(unique(time), df)
names(df) <- c('time',names(dat)[-1])
return(df)
}
chain_season <- Ave(chain)
chain_month <- Ave(chain, type = 'month')
#power coal######
power_coal <- read.csv('动力煤价格指标.csv', header = T)
power_coal$时间 <- as.POSIXlt(power_coal$时间, format = '%Y/%m/%d')
p_coal <-
aggregate(power_coal$秦皇岛港.平仓价.大同优混.Q5800K.,
by=list(format(power_coal$时间, format='%Y-%m')),
sum)[,2] /
aggregate(power_coal$秦皇岛港.平仓价.大同优混.Q5800K.,
by=list(format(power_coal$时间, format='%Y-%m')),
FUN = function(x) max(c(1,sum(abs(x) > 0.1))))[,2]
price_coal <- data.frame(time = unique(format(power_coal$时间, format='%Y-%m')),
price = p_coal)
price_coal_d <- data.frame(time = unique(format(power_coal$时间, format='%Y-%m'))[-length(price_coal)],
price = p_coal[-1])
########
dat_coal <- merge(price_coal_d, chain_month, by = 'time')
dat_coal[dat_coal == 0] <- NA
for(i in 2:ncol(dat_coal)){
print(qplot(dat_coal$time, dat_coal[,i], geom = 'point', main = names(dat_coal)[i]))
}
for(i in vari[-c(3,9,10,12,13,15,16)]){
print(qplot(dat_coal$time, dat_coal[,i], geom = 'point', main = names(dat_coal)[i]))
}
dat_coal <- dat_coal[,-1]
dat_coal_scale <- dat_coal
for(i in 1:ncol(dat_coal_scale)){
dat_coal_scale[,i] <- scale(dat_coal_scale[,i])
}
# corr <- cor(dat_coal_scale[,which(names(dat_coal_scale) %in% names(lm_s$model))])
# which(abs(corr) > 0.8)
# corrplot.mixed(corr, tl.pos = 'n', diag = 'n')
lm_s <- lm(price~., data = dat_coal_scale[,-c(19:23)])
lm_s <- step(lm_s, scope = list(criterion = "BIC"))
summary(lm_s)
vari <- which(names(dat_coal_scale) %in% names(lm_s$model))
lm_1 <- lm(price~., data = dat_coal_scale[,vari[-c(3,9,10,12,13,15,16)]])
summary(lm_1)
which(names(dat_coal_scale) %in% names(lm_1$model))
lm1 <- lm(price~., data = dat_coal)
lm_aic <- step(lm1)
summary(lm_aic)
#######
y_for <- data.frame(as.character(ave$Group.1)[-nrow(ave)], ave$x[-1])
names(y_for) <- c('season', 'netincome')
dat_chain <- merge(y_for, chain_season, by.x = 'season', by.y = '时间')
dat_chain <- dat_chain[,-1]
dim(dat_chain)
explain_power <- data.frame(names(dat_chain),value=0)
for(i in 2:ncol(dat_chain)){
lm1 <- lm(netincome~., data = dat_chain[,c(1,i)])
explain_power[i,2] <- 1 - var(lm1$residuals)/var(dat_chain$netincome)
}
explain_power[order(explain_power$value, decreasing = T),]
lm1 <- lm(netincome~., data =
dat_chain[,c(1, head(order(explain_power$value, decreasing = T),n=20))])
lm_bic <- step(lm1, scope = list(criterion = "BIC"))
summary(lm_bic)
#0808#####
#model for coal price#####
x <- read.csv('产业链因子0808.csv')
names(x)[1] <- '时间'
x$时间 <- as.POSIXlt(x$时间, format='%Y/%m/%d')
for(i in 2:ncol(x)){
x[,i] <- as.numeric(x[,i])
}
names(x)
str(x)
#12-14 原煤产量
x$原煤产量 <- rowSums(x[,12:15])
x <- x[,-c(12:15)]
str(x)
#3螺纹钢删掉
x <- x[,-3]
x1 <- read.csv('煤炭数据5-尿素.csv')
x1$时间 <- as.POSIXlt(x1$时间, format='%Y/%m/%d')
x1$尿素平均价 <- rowMeans(x1[,2:6])
str(x1)
x <- Ave(x, type = 'month')
x1 <- Ave(x1, type = 'month')
x <- merge(x, x1[,c(1,7)], by='time')
dat_coal <- merge(price_coal_d, x, by = 'time')
# for(i in 2:ncol(dat_coal)){
# print(qplot(dat_coal$time, dat_coal[,i], geom = 'point', main = names(dat_coal)[i]))
#
# }
dat_coal[dat_coal == 0] <- NA
dat_coal <- dat_coal[,-1]
#complete observation
na1 <- apply(dat_coal, 1, function(x){sum(is.na(x)) < 0.1})
dat_coal <- dat_coal[na1,]
dat_coal_s <- dat_coal
for(i in 1:ncol(dat_coal_s)){
dat_coal_s[,i] <- scale(dat_coal_s[,i])
}
# corr <- cor(dat_coal_s)
# corrplot.mixed(corr, tl.pos = 'n', diag = 'n')
lm0 <- lm(price~., data = dat_coal_s)
lm_a <- step(lm0)
lm_b <- step(lm0, scope = list(criterion = "BIC"))
summary(lm0)
summary(lm_a)
summary(lm_b)
#hard input
index <- which(names(dat_coal_s) %in% names(lm_b$model))
lm1 <- lm(price~., data = dat_coal[,index[-c(2,4,6,9)]])
summary(lm1)
sink('动力煤价格2.txt')
summary(lm1)
print(lm1$coefficients)
sink()
#model for netincome ####
x <- read.csv('产业链因子0808.csv')
names(x)[1] <- '时间'
x$时间 <- as.POSIXlt(x$时间, format='%Y/%m/%d')
for(i in 2:ncol(x)){
x[,i] <- as.numeric(x[,i])
}
names(x)
str(x)
#12-14 原煤产量
x$原煤产量 <- rowSums(x[,12:15])
x <- x[,-c(12:15)]
str(x)
#3螺纹钢删掉
x <- x[,-3]
x1 <- read.csv('煤炭数据5-尿素.csv')
x1$时间 <- as.POSIXlt(x1$时间, format='%Y/%m/%d')
x1$尿素平均价 <- rowMeans(x1[,2:6])
str(x1)
x <- Ave(x, type = 'season')
x1 <- Ave(x1, type = 'season')
x <- merge(x, x1[,c(1,7)], by='time')
netin[,1] <- as.yearqtr(netin[,1])
p_coal <- read.csv('煤炭数据6.csv')
str(p_coal)
names(p_coal)[1] <- 'time'
p_coal$time <- as.POSIXlt(p_coal$time, format = '%Y/%m/%d')
p_coal <- Ave(p_coal)
#检验之后选用9,"秦皇岛港.平仓价.山西优混.Q5500K."
# p_coal <- merge(netin[,1:2], p_coal, by='time')
# p_coal <- p_coal[,-1]
#
# sst <- var(p_coal$ave)
# r_sq <- rep(0, ncol(p_coal)-1)
# for(i in 2:ncol(p_coal)){
# r_sq[i-1] <- 1-var(lm(ave~., p_coal[,c(1,i)])$residuals)/sst
# }
# names(r_sq) <- names(p_coal)[-1]
#
# r_sq[order(r_sq, decreasing = T)]
p_coal <- p_coal[,c(1,9)]
x <- merge(x, p_coal, by='time' )
dat_netin <- merge(netin[,1:2], x, by='time')
t <- dat_netin[,1]
dat_netin <- dat_netin[,-1]
na1 <- apply(dat_netin,1, function(x){sum(is.na(x))>0.5})
lm0 <- lm(ave~., dat_netin)
lm_a <- step(lm0)
summary(lm0)
summary(lm_a)
sink('煤炭企业净利润.txt')
summary(lm_a)
print(lm_a$coefficients)
sink()
df <- data.frame(time=t[!na1],
actual=dat_netin$ave[!na1],
pred=lm_a$fitted.values)
# df <- melt(df, id='time')
# ggplot(df, aes(time, value, color=variable)) + geom_line()
write.csv(df, file = '煤炭行业模型拟合.csv')
index <- which(names(x) %in% names(lm_a$model)[-1])
predict(lm_a,x[x$time == '2017 Q2',index])
|
3443a1c541dc0c6ac2112f8de9186853b13940b7
|
3fe1a27ecb52798609e99b5c7a2af1b95166c03d
|
/man/raster.kmeans.Rd
|
9ce4926e7ba3bc0ee45ffe5d4f0086bb01c396e0
|
[] |
no_license
|
mbedward/ecbtools
|
6cbbfbe3c18734b75c42f0bd292aeab69f7bee69
|
16c5df2857f83fadfaac35447e21388daa69bfdb
|
refs/heads/master
| 2023-02-16T19:29:39.773238
| 2021-01-17T20:26:25
| 2021-01-17T20:26:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,967
|
rd
|
raster.kmeans.Rd
|
\name{raster.kmeans}
\alias{raster.kmeans}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
K-means Classification on a RasterStack
}
\description{
This function performs a k-means unsupervised classification of a stack of rasters. Number of clusters can be specified, as well as number of iterations and starting sets. An optional geographic weighting system can be turned on that constrains clusters to a geographic area, by including coordinates in the clustering. All variables are normalized before clustering is performed.
}
\usage{
raster.kmeans(x, k = 12, iter.max = 100, nstart = 10, geo = T, geo.weight = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A RasterStack object, or a string pointing to the directory where all raster layers are stored. All rasters should have the same extent, resolution and coordinate system.
}
\item{k}{
Number of clusters to classify to.
}
\item{iter.max}{
Maximum number of iterations allowed
}
\item{nstart}{
Number of random sets to be chosen.
}
\item{geo}{
True/False - should geographic weighting be used?
}
\item{geo.weight}{
A weighting multiplier indicating the strength of geographic weighting relative to the other variables. A value of 1 gives equal weight.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A raster object, of the same extent and coordinates as the input data, containing the cluster classification.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Grant Williamson
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ kmeans }
\keyword{ classify }
\keyword{ cluster }
\keyword{ raster }% __ONLY ONE__ keyword per line
|
d44017ab86dd67a63e87d5ae718274fb9ef024cb
|
41178f6b6b2e386393a0d5bc2b8128a89ec185a5
|
/R/permutation_functions.R
|
3980215f2bd37650a3625cb9a430c45c37eee0fe
|
[] |
no_license
|
kferris10/Kwproj
|
0b374d518db772942d6df1bf31058cc808b0692f
|
55f634dd6bb5000f6037ff203701f45a11200ce7
|
refs/heads/master
| 2021-01-19T17:22:34.723221
| 2015-04-06T03:52:16
| 2015-04-06T03:52:16
| 31,190,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,534
|
r
|
permutation_functions.R
|
#' Difference in Densities
#'
#' Calculates the difference in pitch density when pitchers are responsible for
#' the men on based and when they are not responsible. Densities are calculated
#' using the \code{\link{kde2d}} function in the \code{\link{MASS}} package.
#' @param npoints the number of grid points in each direction. Densities are
#' calculated at each one of these grid points.
#' @param data the data.frame containing pitch location and pitcher
#' responsibility. Defaults to \code{pit_LvL}.
dens_diff <- function(npoints, data = pit_LvL) {
# calculating densities when pitchers are responsible for men on base
resp_dens <- data %>%
filter(resp_pit == "resp") %$%
MASS::kde2d(px, pz, n = npoints)
# calculating densities when pitchers are not responsible for men on base
not_dens <- data %>%
filter(resp_pit == "not") %$%
MASS::kde2d(px, pz, n = npoints)
# calculating observed difference in densities
not_dens$z - resp_dens$z
}
#' Permuted Differences
#'
#' Permutes the data B times and calculates the difference in densities for each
#' of the B permutations.
#' @param B the number of permutations to run
#' @param npoints the number of grid points in each direction. Densities are
#' calculated at each one of these grid points.
#' @param data the data.frame containing pitch location and pitcher
#' responsibility. Defaults to \code{pit_LvL}.
perm_diffs <- function(B, npoints, data = pit_LvL) {
diffs <- array(NA, dim = c(npoints, npoints, B))
for(i in 1:B) {
# permuted labels
perm_vec <- sample(data$resp_pit)
# permuted densities
rperm_dens <- data %>%
filter(perm_vec == "resp") %$%
MASS::kde2d(px, pz, n = npoints)
nperm_dens <- data %>%
filter(perm_vec == "not") %$%
MASS::kde2d(px, pz, n = npoints)
# differences
diffs[, , i] <- nperm_dens$z - rperm_dens$z
}
diffs
}
#' Calulate Permuted P-values
#'
#' For each density point on the grid, finds the proportion of permuted
#' densities which are more extreme that the observed density.
#' @param obs_diff The observed difference in densities. Obatined from \code{\link{dens_diff}}.
#' @param perms An array of the permuted densities. Obtained from \code{\link{perm_diffs}}.
calc_pvals <- function(obs_diff, perms) {
# number of grid points
npoints <- nrow(obs_diff)
# obtaining p-values
pvals <- matrix(NA, nrow = npoints, ncol = npoints)
for(i in 1:npoints) {
for(j in 1:npoints) {
dat <- data.frame(perm_vals = perms[i, j, ])
obs_val <- obs_diff[i, j]
pvals[i, j] <- mean(abs(dat$perm_vals) > abs(obs_val))
}
}
pvals
}
#' Obtain p-values from Permutations
#'
#' Wrapper which uses \code{\link{dens_diff}}, \code{\link{perm_diffs}}, and
#' \code{\link{calc_pvals}} to obtain p-values at each grid point.
#' @param B the number of permutations to run
#' @param npoints the number of grid points in each direction. Densities are
#' calculated at each one of these grid points.
#' @param data the data.frame containing pitch location and pitcher
#' responsibility. Defaults to \code{pit_LvL}.
#' @param obs_diff The observed difference in densities. If specified manually,
#' should come from \code{\link{obs_diff}}. Defaults to NULL, in which case
#' it will be calculated using \code{\link{obs_diff}}.
#' @param perm_dens The permuted densities. If specified, must be a three
#' dimensional array returned from \code{\link{perm_dens}}. Defaults to NULL,
#' in which case it will be calculated using \code{\link{perm_dens}}.
perm_pvals <- function(B, npoints, data = pit_LvL,
obs_diff = NULL, perm_dens = NULL) {
# observed difference
if(is.null(obs_diff)) obs_diff <- dens_diff(npoints, data)
# permutations
if(is.null(perm_dens)) perm_dens <- perm_diffs(B, npoints, data)
# obtaining p-values
calc_pvals(obs_diff, perm_dens)
}
#' Turns matrix of p-values into a \code{\link{tbl}} into long format.
#'
#' A nice way to turn a matrix of p-values into long format.
#' @param dat matrix of p-values. Each row/column combination corresponds to a
#' horizontal/vertical location in the strikezone. Obtained from
#' \code{\link{perm_pvals}}.
pvals_to_long <- function(dat, npoints = nrow(dat)) {
# turning into long data frame and returning
dat %>%
data.frame() %>%
tbl_df() %>%
gather(location, pval) %>%
mutate(col = rep(1:npoints, each = npoints),
row = rep(1:npoints, npoints)) %>%
select(row, col, pval)
}
|
c34341c6eed48567ec82fc136df2a9061b8198c0
|
f58a1c8b5043afb99cfcb386dbddc30fb43becf5
|
/man/gatherData.Rd
|
89e1a632a98c6a75fed509461413b6dd0c373000
|
[] |
no_license
|
MassBank/RMassBank
|
b00fe69f7c0026c13b7fe337522ceaa469ee8340
|
3b61006a1a4bac9c94e780ad82834a1dae9ce417
|
refs/heads/main
| 2023-08-16T18:02:42.074467
| 2023-05-04T09:51:53
| 2023-05-04T09:51:53
| 9,348,686
| 12
| 15
| null | 2023-07-28T09:20:16
| 2013-04-10T15:01:01
|
R
|
UTF-8
|
R
| false
| true
| 1,822
|
rd
|
gatherData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createMassBank.R
\name{gatherData}
\alias{gatherData}
\title{Retrieve annotation data}
\usage{
gatherData(id)
}
\arguments{
\item{id}{The compound ID.}
}
\value{
Returns a list of type \code{list(id= \var{compoundID}, ...,
'ACCESSION' = '', 'RECORD_TITLE' = '', )} etc. %% ...
}
\description{
Retrieves annotation data for a compound from the internet services CTS, Pubchem, Chemspider and
Cactvs, based on the SMILES code and name of the compounds stored in the
compound list.
}
\details{
Composes the "upper part" of a MassBank record filled with chemical data
about the compound: name, exact mass, structure, CAS no., links to PubChem,
KEGG, ChemSpider. The instrument type is also written into this block (even
if not strictly part of the chemical information). Additionally, index
fields are added at the start of the record, which will be removed later:
\code{id, dbcas, dbname} from the compound list, \code{dataused} to indicate
the used identifier for CTS search (\code{smiles} or \code{dbname}).
Additionally, the fields \code{ACCESSION} and \code{RECORD_TITLE} are
inserted empty and will be filled later on.
}
\examples{
# Gather data for compound ID 131
\dontrun{gatherData(131)}
}
\references{
Chemical Translation Service:
\url{http://uranus.fiehnlab.ucdavis.edu:8080/cts/homePage}
cactus Chemical Identifier Resolver:
\url{http://cactus.nci.nih.gov/chemical/structure}
MassBank record format:
\url{http://www.massbank.jp/manuals/MassBankRecord_en.pdf}
Pubchem REST:
\url{https://pubchem.ncbi.nlm.nih.gov/pug_rest/PUG_REST.html}
Chemspider InChI conversion:
\url{https://www.chemspider.com/InChI.asmx}
}
\seealso{
\code{\link{mbWorkflow}}
}
\author{
Michael Stravs
}
|
b2fdeaa0b5d9b28b9985f7660cb0dad0999eeca7
|
ef86e9a140f9c234e37849399ce5d8aeb7d727f1
|
/R/VisualizeVariable.R
|
d9a31b4558d5e6386ec019e8ccef86626f70ed47
|
[
"MIT"
] |
permissive
|
MartinLBarron/CodebookGenerator
|
926d7f47f3323a3393182926ac1e5462a4b394db
|
03d3f7cece9bac3595545e8fb28c346dceefcbc1
|
refs/heads/master
| 2021-04-06T10:44:03.499843
| 2019-03-31T02:22:07
| 2019-03-31T02:22:07
| 124,551,406
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,368
|
r
|
VisualizeVariable.R
|
# build a string that is the code that will be run to produce visual
VisualizeVariable_string <- function(var, varName){
varClass <- class(var)
numUnique <- length(unique(var))
#if there are fewer than 10 levels or it's a character/factor, we'll use a bar
if (numUnique<=10 | varClass %in% c("factor","character")){
vis <- sprintf("codebookDF['%s']<-factor(codebookDF[['%s']])\n
ggplot(data=codebookDF)+
geom_bar(aes(%s))+
theme_minimal()",
paste0(varName,"_new"),
varName,
paste0(varName,"_new"))
} else {
#all other variables get a histogram
vis <- sprintf("var <- codebookDF[['%s']]\nbw=(max(var, na.rm=T)-min(var, na.rm=T))/30\nm <- mean(var, na.rm=T)\nsd <- sd(var, na.rm=T)\nn <- length(!is.na(var))\n
class(codebookDF[['%s']]) <- NULL
ggplot(data=codebookDF)+
geom_histogram(aes(%s),color='black',bins=30)+
theme_minimal()+
stat_function(fun = function(x, mean, sd, n, bw){
dnorm(x = x, mean = mean, sd = sd) * bw * n},
args = c(mean = m, sd = sd, n = n, bw = bw), color='red')
",varName, varName, varName)
}
}
df <- iris
varName <- "Sepal.Width"
VisualizeVariable <- function(df, varName){
varClass <- class(df[,varName])
numUnique <- length(unique(df[,varName]))
#if there are fewer than 10 levels or it's a character/factor, we'll use a bar
if (numUnique<=10 | varClass %in% c("factor","character")){
codebookDF[,paste0(varname,"_new")]<-factor(codebookDF['%s'])
ggplot(data=codebookDF)+
geom_bar(aes(paste0(varname,"_new")))+
theme_minimal()
} else {
#all other variables get a histogram
var <- codebookDF[['%s']]
bw=(max(var, na.rm=T)-min(var, na.rm=T))/30
m <- mean(var, na.rm=T)
sd <- sd(var, na.rm=T)
n <- length(!is.na(var))
class(codebookDF[[varName]]) <- NULL
ggplot(data=codebookDF)+
geom_histogram(aes(varName),color='black',bins=30)+
theme_minimal()+
stat_function(fun = function(x, mean, sd, n, bw){
dnorm(x = x, mean = mean, sd = sd) * bw * n},
args = c(mean = m, sd = sd, n = n, bw = bw), color='red')
}
}
|
62c563d62c644c33cb558fd2740538b83ecfb0aa
|
004752a3f8ab357fd3c5ca0332955f94bdd25957
|
/RScripts/Data processing/Prior to reconditioning 2020/Define fleets.R
|
63ae80a2c5b3d54d4314d14a20e0f5fd4640ecc4
|
[] |
no_license
|
pl202/abft-mse
|
29a6b7da92e7606a771a12fa8c3305474a2e4f37
|
7fb761b53aff271f0bd19b77659a16255b67b6b9
|
refs/heads/master
| 2022-09-20T07:09:04.432760
| 2022-08-02T22:45:22
| 2022-08-02T22:45:22
| 97,934,118
| 0
| 0
| null | 2017-07-21T10:04:33
| 2017-07-21T10:04:33
| null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
Define fleets.R
|
# Define fleets.r
# August 2016
# R Script for defining fleets for this operating model
Fleets<-new('list') # note you must put ALL flag types ahead of specific fleet types of same gear - sequential fleet assignment
Fleets$name<-c("LLOTH","LLJPN","BBold_E","BBold_SE","BBnew","PSMedRec","PSMedLOld","PSMedSOld","TPOld","TPnew","RRCan","RRUSA")
Fleets$gearTC<-c("LL", "LL", "BB", "BB", "BB", "PS" , "PS", "PS", "TP", "TP", "RR", "RR")
Fleets$flag<- c("ALL","JPN", "ALL", "ALL", "ALL", "ALL", "ALL", "ALL", "ALL", "ALL", "CAN", "USA")
Fleets$FyS<- c( 1994, 1960, 1960, 1960, 2009, 2009, 1960, 1960, 1960, 2009, 1988, 1988)
Fleets$FyE<- c( 2016, 2016, 2008, 2008, 2016, 2016, 2008, 2008, 2008, 2016, 2016, 2016)
Fleets$Loc<-new('list') # What areas?
Fleets$Loc[[1]]<-1:7 # LLOTH
Fleets$Loc[[2]]<-1:7 # LLJPN
Fleets$Loc[[3]]<-6 # BBold_E
Fleets$Loc[[4]]<-4 # BBold_SE
Fleets$Loc[[5]]<-1:7 # BBnew
Fleets$Loc[[6]]<-7 # PSMedRec
Fleets$Loc[[7]]<-7 # PSMedLOld
Fleets$Loc[[8]]<-7 # PSMedSOld
Fleets$Loc[[9]]<-1:7# TPOld
Fleets$Loc[[10]]<-1:7# TPnew
Fleets$Loc[[11]]<-1:7# RRCan
Fleets$Loc[[12]]<-1:7# RRUSA
Fleets$Q<-new('list') # What quarters of the year?
Fleets$Q[[1]]<-1:4 # LLOTH
Fleets$Q[[2]]<-1:4 # LLJPN
Fleets$Q[[3]]<-1:4 # BBold_E
Fleets$Q[[4]]<-1:4 # BBold_SE
Fleets$Q[[5]]<-1:4 # BBnew
Fleets$Q[[6]]<-1:4 # PSMedRec
Fleets$Q[[7]]<-2 # PSMedLOld
Fleets$Q[[8]]<-c(1,3,4) # PSMedSOld
Fleets$Q[[9]]<-1:4 # TPOld
Fleets$Q[[10]]<-1:4 # TPnew
Fleets$Q[[11]]<-1:4 # RRCan
Fleets$Q[[12]]<-1:4 # RRUSA
save(Fleets,file=paste(getwd(),"/Data/Processed/Conditioning/Fleets",sep=""))
|
6506aa30f7720a2386539e4d78eeff90d8796b21
|
3b4b9d957c2b6b7624e2ed1d0925667dc24b0150
|
/plot1.R
|
f04ff95223ce83ba443ae431ff478d94257ea9cb
|
[] |
no_license
|
santoshgsk/ExData_Plotting1
|
3993f5e66d1426e555c09cd0d74c67884928b580
|
dadb662fcd2dd344182056e8009dcc733f79acb0
|
refs/heads/master
| 2020-07-10T00:04:41.228795
| 2014-05-10T20:54:27
| 2014-05-10T20:54:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,217
|
r
|
plot1.R
|
# Reading only the subset data for Feb 1st, Feb 2nd of 2007
subsetData <- read.csv("household_power_consumption.txt", sep=";", skip=66637, nrow=2880, na.strings=c("?"), colClasses=c(rep("character",2), rep("numeric", 7)))
# To associate the column names, reading the header line into a temporary data frame
forColNames <- read.csv("household_power_consumption.txt", sep=";", header=T, nrow=1)
# Assigning the actual column names to the subset data
colnames(subsetData) <- colnames(forColNames)
# Pasting the date and time and reading them as Date structures
dateTime <- strptime(paste(subsetData[,1], subsetData[,2]), format="%d/%m/%Y %H:%M:%S")
# Appending the Date structures read into an additional column named "datatime"
subsetData$datetime <- dateTime
# Setting the global parameters before printing the plot
par(mfrow=c(1,1), mar=c(4,4,2,2))
# Open the png file device with the appropriate file name
png(filename="plot1.png")
# plotting the Plot No. 1 as illustrated in the Course Project Instructions
# It is a histogram plot of Global Active Power
hist(subsetData$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
# closing the file device
dev.off()
|
787222b528dda33260d662408abf2e09f3ba2ec6
|
54619814c48f9f41c427ced3da8159c47316e601
|
/exploring_FISH.R
|
5e49bb4db4743f668431248cd1433f2de081eb93
|
[] |
no_license
|
amarseg/RNA_seq
|
f40f11d97c3a64a18ba9e413bbdaec3453214c53
|
3d07eb8b77e0f03a6c8ef610798f3af3a434c03e
|
refs/heads/master
| 2021-05-04T10:56:34.146765
| 2017-08-14T09:39:11
| 2017-08-14T09:39:11
| 36,794,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
exploring_FISH.R
|
##Processing FISH QUANT data
library('plyr')
library('lattice')
par(mfrow = c(1,1))
probe_list <- read.delim('C:/Users/am4613/OneDrive/Summaries_as_timecourses/Probe_list.txt', header = T, strings = F)
setwd('C:/Users/am4613/Documents/FISH_QUANT/Results_mature/Collated_results/')
file_list <- dir()
file_list <- file_list[grep(file_list, pattern = 'Results')]
data <- list()
for(i in 1:length(file_list))
{
data[[i]] <- read.delim(file_list[i], header = T, sep = '\t')
}
data_df <- ldply(data, data.frame)
data_df$Channel <- as.factor(data_df$Channel)
data_df$Gene <- NA
data_df$Acession <- NA
for(i in 1:nrow(probe_list))
{
gene <- probe_list[i,]
data_df[data_df$Mix == gene$Mix & data_df$Channel == gene$Channel,]$Gene <- gene$Gene
data_df[data_df$Mix == gene$Mix & data_df$Channel == gene$Channel,]$Acession <- gene$Accession
}
medians <- aggregate(data_df$Spots ~ data_df$Channel*data_df$Mix, data_df, median)
medians_v <- medians[,3]
mypanel <- function(x,y,...)
{
panel.xyplot(x,y,...)
panel.text(30,33, labels = medians_v)
}
densityplot(~data_df$Spots|data_df$Gene)
write.table(data_df,'results_FQ.txt', sep = '\t')
write.table(medians, 'median_result_FQ.txt', sep = '\t')
|
87ce7d97da526dffce2ade55c506de18fd3d9cc3
|
3b9cf7794cd1a906f028536c399538b002fbcff0
|
/man/doF3Test.Rd
|
149a3a52544c4aa1996a25e8ec169544c94921d3
|
[] |
no_license
|
martinsikora/admixr
|
d46029ff534591e8eee625fbe05c1e559bd8b8a3
|
e14f63bc44c5b88be085ea2652174541cb2857ee
|
refs/heads/master
| 2020-12-30T14:56:37.288491
| 2015-05-15T17:34:40
| 2015-05-15T17:34:40
| 35,686,620
| 16
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
rd
|
doF3Test.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fstats.R
\name{doF3Test}
\alias{doF3Test}
\title{F3 test}
\usage{
doF3Test(p, idxM, blockIdx)
}
\arguments{
\item{p}{allele counts array, dimensions SNPs x individuals x 2}
\item{idxM}{matrix of dimensions 1 x 3 with population labels for test
configuration f3(test;ref1,ref2)}
\item{blockIdx}{index vector of chromosome blocks for jackknife}
}
\value{
a data frame with f3 results
\itemize{
\item p1, p2, p3 .. test configuration
\item f3 .. estimate
\item se, Z .. standard error and Z score from block jackknife
\item nSites .. number of informative SNPs used in the test
}
}
\description{
Takes an array of allele counts input and does f3 test as in
Patterson et al. 2012 on specified input populations; corresponds
to "f3 outgroup" statistic if 'test' is an outgroup population
}
|
948bec1973acc19cdca565a85a83e353f1bcce85
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/drgee/man/drgee.Rd
|
dec78b882c05780d2408994516c3979c88f6cb61
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 13,870
|
rd
|
drgee.Rd
|
\encoding{latin1}
\name{drgee}
\alias{drgee}
\alias{coef.drgee}
\alias{vcov.drgee}
\alias{print.drgee}
\alias{summary.drgee}
\alias{print.summary.drgee}
%\alias{naiveVcov.drgee}
\alias{clusterRobustVcov.drgee}
\title{Doubly Robust Generalized Estimating Equations}
\description{
\code{drgee} is used to estimate an exposure-outcome effect adjusted
for additional covariates. The estimation is based on regression
models for the outcome, exposure or a combination of both.
For clustered data the models may
have cluster-specific intercepts.
}
\usage{
drgee(outcome, exposure,
oformula, eformula, iaformula = formula(~1),
olink = c("identity", "log", "logit"),
elink = c("identity", "log", "logit"),
data, subset = NULL, estimation.method = c("dr", "o", "e"),
cond = FALSE, clusterid, clusterid.vcov, rootFinder = findRoots,
intercept = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{outcome}{
The outcome as variable or as a character string naming a variable
in the \code{data} argument. If missing, the outcome is assumed
to be the response of \code{oformula}.
}
\item{exposure}{
The exposure as variable or as a character string naming a variable
in the \code{data} argument. If missing, the exposure is assumed
to be the response of \code{eformula}.
}
\item{oformula}{
An expression or formula for the outcome nuisance model.
}
\item{eformula}{
An expression or formula for the exposure nuisance model.
}
\item{iaformula}{
An expression or formula where the RHS should contain the variables
that "interact" (i.e. are supposed to be multiplied with) with the
exposure in the main model. "1" will always added. Default value is no
interactions, i.e. \code{iaformula = formula(~1)}.
}
\item{olink}{
A character string naming the link function in the outcome nuisance
model. Has to be \code{"identity"}, \code{"log"} or
\code{"logit"}.Default is \code{"identity"}.
}
\item{elink}{
A character string naming the link function in the exposure nuisance
model. Has to be \code{"identity"}, \code{"log"} or
\code{"logit"}. Default is \code{"identity"}.
}
\item{data}{
A data frame or environment containing the variables used.
If missing, variables are expected to be found in the
calling environment of the calling environment.
}
\item{subset}{
An optional vector defining a subset of the data to be used.
}
\item{estimation.method}{
A character string naming the desired estimation method. Choose
\code{"o"} for O-estimation,
\code{"e"} for E-estimation or
\code{"dr"} for DR-estimation. Default is \code{"dr"}.
}
\item{cond}{
A logical value indicating whether the nuisance models should have
cluster-specific intercepts. Requires a \code{clusterid} argument.
}
\item{rootFinder}{
A function to solve a system of non-linear equations. Default
is \code{findRoots}.
}
\item{clusterid}{
A cluster-defining variable or a character string naming a
cluster-defining variable in the \code{data} argument. If it is not
found in the \code{data} argument, it will be searched for in the
calling frame. If missing, each observation will be considered to be
a separate cluster. This argument is required when \code{cond = TRUE}.
}
\item{clusterid.vcov}{
A cluster-defining variable or a character string naming a
cluster-defining variable in the \code{data} argument to be used for
adding contributions from the same cluster. These clusters can be
different from the clusters defined by \code{clusterid}. However,
each cluster defined by \code{clusterid} needs to be contained in
exactly one cluster defined by \code{clusterid.vcov}. This variable
is useful when the clusters are hierarchical.
}
\item{intercept}{
A boolean to choose whether the nuisance parameters in doubly robust
conditional logistic regression should be fitted with a model with an
intercept. Only used for doubly robust condtional logistic regression.
}
\item{\dots}{
Further arguments to be passed to the function \code{rootFinder}.
}
}
\details{
\code{drgee} estimates the parameter \eqn{\beta}{beta} in a main
model \eqn{g\{E(Y|A,L)\}-g\{E(Y|A=0,L)\}=\beta^T \{A\cdot X(L)\}}{g\{E(Y|A,L)-g\{E(Y|A,L)\}=beta^T (A * X(L))\}},
where \eqn{Y} is the outcome of interest, \eqn{A} is the exposure of
interest, and \eqn{L} is a vector of covariates that we wish to
adjust for. \eqn{X(L)} is a vector valued function of \eqn{L}. Note that \eqn{A
\cdot X(L)}{A*X(L)} should be interpreted as a columnwise
multiplication and that \eqn{X(L)} will always contain a column of 1's.
Given a specification of an outcome nuisance model \eqn{g\{E(Y|A=0,L)=\gamma^T
V(L)}{g\{E(Y|A=0,L)=gamma^T V(L)} (where \eqn{V(L)} is a function of \eqn{L})
O-estimation is performed. Alternatively, leaving \eqn{g\{E(Y|A=0,L)}
unspecified and using an exposure nuisance model \eqn{h\{E(A|L)\}=\alpha^T
Z(L)}{h\{E(Y|L)\}=alpha^T Z(L)} (where \eqn{h} is a link
function and \eqn{Z(L)} is a function of \eqn{L}) E-estimation
is performed. When \eqn{g} is logit, the exposure nuisance
model is required be of the form
\eqn{logit\{E(A|Y=0,L)\}=\alpha^T Z(L)}{logit\{E(A|Y=0,L)\}=alpha^T Z(L)}.
In this case the exposure needs to binary.
Given both an outcome and an exposure nuisance model, DR-estimation can be
performed. DR-estimation gives a consistent estimate of the parameter
\eqn{\beta}{beta} when either the outcome nuisance model or
the exposure nuisance model
is correctly specified, not necessarily both.
Usage is best explained through an example. Suppose that we are
interested in the parameter vector \eqn{(\beta_0,
\beta_1)}{(beta_0,beta_1)} in a main model
\eqn{logit\{E(Y|A,L_1,L_2)\}-logit\{E(Y|A=0,L_1,L_2)\}=\beta_0 A + \beta_1
A \cdot L_1}{logit\{E(Y|A,L_1,L_2)-logit\{E(Y|A=0,L_1,L_2)\}=beta_0 A + beta_1 A *
L_1} where \eqn{L_1} and \eqn{L_2} are the covariates that we wish
to adjust for. To adjust for \eqn{L_1} and \eqn{L_2}, we can use an outcome
nuisance model \eqn{E(Y|A=0,L_1,L_2;\gamma_0, \gamma_1)=\gamma_0 + \gamma_1
L_1}{E(Y|A=0,L_1,L_2;gamma_0, gamma_1)=gamma_0 + gamma_1 L_1} or an
exposure nuisance model \eqn{logit\{E(A|Y=0,L_1,L_2)\}=\alpha_0+\alpha_1
L_1+\alpha_2 L_2}{logit\{E(A|Y=0,L_1,L_2)\}=alpha_0+alpha_1
L_1+alpha_2 L_2} to calculate estimates of \eqn{\beta_0}{beta_0} and \eqn{\beta_1}{beta_1}
in the main model. We specify the outcome nuisance model as \code{oformula=Y~L_1}
and \code{olink = "logit"}. The exposure nuisance model is specified as
\code{eformula = A~L_1+L_2} and \code{elink = "logit"}.
Since the outcome \eqn{Y} and the exposure \eqn{A} are
identified as the LHS of \code{oformula} and \code{eformla}
respectively and since the outcome link is specified in the
\code{olink} argument,
the only thing left to specify for the main model is the
(multiplicative) interactions \eqn{A\cdot X(L)=A\cdot
(1,L_1)^T}{A X(L)=A (1,L_1)^T}. This
is done by specifying \eqn{X(L)} as
\code{iaformula = ~L_1}, since \eqn{1} is always included in \eqn{X(L)}.
We can then perform O-estimation, E-estimation or DR-estimation by
setting \code{estimation.method} to \code{"o"},
\code{"e"} or \code{"dr"} respectively. O-estimation uses only the
outcome nuisance model, and E-estimation uses only the exposure
nuisance model. DR-estimation uses both nuisance models, and gives a
consistent estimate of \eqn{(\beta_0,\beta_1)}{(beta_0,beta_1)} if either nuisance model is correct, not necessarily both.
When \code{estimation.method = "o"}, the RHS of \code{eformula} will be
ignored. The \code{eformula} argument can also be replaced by an \code{exposure}
argument specifying what the exposure of interest is.
When \code{estimation.method = "e"}, the RHS of \code{oformula} will be
ignored. The \code{oformula} argument can also be replaced by an \code{outcome}
argument specifying what the outcome of interest is.
When \code{cond = TRUE} the nuisance models will be assumed to have
cluster-specific intercept. These intercepts will not estimated.
When E-estimation or DR-estimation is chosen with
\code{olink = "logit"}, the exposure link will be
changed to \code{"logit"}. Note that this choice
of outcome link does not work for DR-estimation
when \code{cond = TRUE}.
Robust variance for the estimated parameter is calculated
using the function \code{robVcov}. A cluster robust variance is calculated when
a character string naming a cluster variable is
supplied in the \code{clusterid} argument.
For E-estimation when \code{cond = FALSE} and \eqn{g} is the identity
or log link, see Robins et al. (1992).
For DR-estimation when \code{cond = TRUE} and \eqn{g} is the identity
or log link, see Robins (1999). For DR-estimation when
\eqn{g} is the logit link, see Tchetgen et al. (2010).
O-estimation can also be performed using the \code{gee} function.
}
\value{
\code{drgee} returns an object of class \code{drgee} containing:
\item{coefficients }{Estimates of the parameters in the main model.}
\item{vcov }{Robust variance for all main model parameters.}
\item{coefficients.all }{Estimates of all estimated parameters.}
\item{vcov.all }{Robust variance of the all parameter estimates.}
\item{optim.object }{An estimation object returned from the function specified
in the \code{rootFinder}, if this function is called for the
estimation of the main model parameters.}
\item{optim.object.o }{An estimation object returned from the function specified
in the \code{rootFinder}, if this function is called for the
estimation of the outcome nuisance parameters.}
\item{optim.object.e }{An estimation object returned from the function specified
in the \code{rootFinder}, if this function is called for the
estimation of the outcome nuisance parameters.}
\item{call }{The matched call.}
\item{estimation.method }{The value of the input argument \code{estimation.method}.}
\item{data}{The original data object, if given as an input argument}
\item{oformula}{The original oformula object, if given as an input argument}
\item{eformula}{The original eformula object, if given as an input argument}
\item{iaformula}{The original iaformula object, if given as an input argument}
The class methods \code{coef} and \code{vcov} can be used to extract
the estimated parameters and their covariance matrix from a
\code{drgee} object. \code{summary.drgee} produces a summary of the
calculations.
}
\seealso{
\code{\link{gee}} for O-estimation, \code{\link{findRoots}} for
nonlinear equation solving and \code{\link{robVcov}} for
estimation of variance.
}
\author{
Johan Zetterqvist, Arvid \enc{Sjölander}{Sjolander}
}
\examples{
## DR-estimation when
## the main model is
## E(Y|A,L1,L2)-E(Y|A=0,L1,L2)=beta0*A+beta1*A*L1
## and the outcome nuisance model is
## E(Y|A=0,L1,L2)=gamma0+gamma1*L1+gamma2*L2
## and the exposure nuisance model is
## E(A|Y=0,L1,L2)=expit(alpha0+alpha1*L1+alpha2*l2)
library(drgee)
expit<-function(x) exp(x)/(1+exp(x))
n<-5000
## nuisance
l1<-rnorm(n, mean = 0, sd = 1)
l2<-rnorm(n, mean = 0, sd = 1)
beta0<-1.5
beta1<-1
gamma0<--1
gamma1<--2
gamma2<-2
alpha0<-1
alpha1<-5
alpha2<-3
## Exposure generated from the exposure nuisance model
a<-rbinom(n,1,expit(alpha0 + alpha1*l1 + alpha2*l2))
## Outcome generated from the main model and the
## outcome nuisance model
y<-rnorm(n,
mean = beta0 * a + beta1 * a * l1 + gamma0 + gamma1 * l1 + gamma2 * l2,
sd = 1)
simdata<-data.frame(y,a,l1,l2)
## outcome nuisance model misspecified and
## exposure nuisance model correctly specified
## DR-estimation
dr.est <- drgee(oformula = formula(y~l1),
eformula = formula(a~l1+l2),
iaformula = formula(~l1),
olink = "identity", elink = "logit",
data = simdata, estimation.method = "dr")
summary(dr.est)
## O-estimation
o.est <- drgee(exposure = "a", oformula = formula(y~l1),
iaformula = formula(~l1), olink = "identity",
data = simdata, estimation.method = "o")
summary(o.est)
## E-estimation
e.est <- drgee(outcome = "y", eformula = formula(a~l1+l2),
iaformula = formula(~l1), elink="logit",
data = simdata, estimation.method = "e")
summary(e.est)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{models}
\keyword{regression}
\keyword{cluster}
\references{
Orsini N., Belocco R., \enc{Sjölander}{Sjolander} A. (2013), Doubly
Robust Estimation in Generalized Linear Models, \emph{Stata Journal},
\bold{13}, 1, pp. 185--205
Robins J.M., Mark S.D., Newey W.K. (1992), Estimating Exposure
Effects by Modelling the Expectation of Exposure Conditional
on Confounders, \emph{Biometrics}, \bold{48}, pp. 479--495
Robins JM (1999), Robust Estimation in Sequentially Ignorable
Missing Data and Causal Inference Models, \emph{Proceedings of the
American Statistical Association Section on Bayesian Statistical
Science}, pp. 6--10
Tchetgen E.J.T., Robins J.M., Rotnitzky A. (2010), On Doubly Robust
Estimation in a Semiparametric Odds Ratio Model, \emph{Biometrika},
\bold{97}, 1, 171--180
Zetterqvist J., Vansteelandt S., Pawitan Y.,
\enc{Sjölander}{Sjolander} (2016), Doubly Robust Methods for Handling
Confounding by Cluster, \emph{Biostatistics}, \bold{17}, 2, 264--276
}
|
0937a027a05b2dc830e300d74114fbc3b16d91bb
|
9a5cd516300be561dc627ebb3fc07ead2707b502
|
/man/exportr.Rd
|
5c5fff4ea7b48ca8c07f04230d7e467de7ff93a7
|
[] |
no_license
|
cran/incadata
|
11535f59e08977e5cb0dca961ea16f5e723e4b2e
|
a80e811b5d22ae44d39231b5ed1994653dc01d27
|
refs/heads/master
| 2021-05-23T02:43:52.314286
| 2020-04-09T07:20:02
| 2020-04-09T07:20:02
| 82,063,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,353
|
rd
|
exportr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportr.R
\name{exportr}
\alias{exportr}
\title{Dump script together with functions from required packages}
\usage{
exportr(
script = NULL,
packages,
recursive = TRUE,
outfile = "./generated_r_script.R",
force = FALSE
)
}
\arguments{
\item{script}{connection with script (file) to append to function definitions}
\item{packages}{name of packages (as character) to be explicitly included.}
\item{recursive}{argument passed to \code{\link{package_dependencies}}}
\item{outfile}{filename for dump file}
\item{force}{this function works only in interactive mode by default but
output can be forced by this argument set to \code{TRUE}}
}
\value{
nothing (function called for its side effects)
}
\description{
If a package is not installed on the computer/server intended to run a
final script, this function can take the script and export it together with all
objects (functions, methods et cetera) from specified R packages. It might
thereafter be possible to transfer the script and to run it even if all
packages are not installed by the host.
}
\details{
Some packages use
external dependencies and/or compiled code. This is not handled by the
function. Hence, there is no guarantee that the script will actually work!
}
|
d6a829dc170f3b26ebda2a98801a2b67a3c3cbc5
|
6baba64a7bdb5879768da2302a23608be28cf3ee
|
/FormerLabMembers/ChrisBarbour/somalogic_msdss_modeling/scripts/msdss/msdss_ratios_forest.R
|
b366d85c41f5507ca0c930464f7221eced724ac5
|
[] |
no_license
|
bielekovaLab/Bielekova-Lab-Code
|
8db78141b6bebb0bbc08ea923655a676479992fa
|
369db2455344660e4012b605a4dc47c653a5c588
|
refs/heads/master
| 2023-04-20T01:01:50.556717
| 2021-05-04T12:44:37
| 2021-05-04T12:44:37
| 261,584,751
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,075
|
r
|
msdss_ratios_forest.R
|
source("./scripts/0-functions.R")
source("./scripts/0-package-funs.R")
data <- read_csv("./data/CLEANED_soma_untreated_20190508.csv")
soma_dt <- readRDS("./data/soma_adjusted_ratios_20190626.rds")
data <- data %>%
filter(model_cohort=="training") %>%
select(sampleid,msdss) %>%
as.data.table()
soma_dt <- merge(data,soma_dt,by="sampleid",sort=TRUE)
soma_dt <- soma_dt[,-1]
all_rats <- names(soma_dt)
# INPUTS GO HERE
remove_outliers <- "FALSE"
var_path <- "./scripts/msdss/iter0_ratios.txt"
parseCommandArgs()
if(var_path != ""){
var_list <- readLines(var_path)
soma_dt <- subset(soma_dt,select=which(all_rats %in% c("msdss",var_list)))
}
# Selecting tuning parameter
soma_dt <- as.data.frame(soma_dt)
nvars <- floor(3*sqrt(dim(soma_dt)[2]))
if(nvars >= dim(soma_dt)[2]-1){nvars <- floor(sqrt(dim(soma_dt)[2]))}
hold <- DUMX
ncpus <- detectBatchCPUs()
mod <- ranger(data=soma_dt, num.trees=40000,mtry=nvars,importance='impurity',num.threads=ncpus,
seed=DUMZ,dependent.variable.name="msdss")
saveRDS(mod,"DUMY1")
|
49a71502be7e76daca6f88bd9e3dd2de88c184f2
|
35da33a02f5e37cc4c860059dc969597e97ef800
|
/DecidingTree/Rfile/DecidingTreeTest.R
|
dc2f7804547224e319b3392f793f0da0a2e91626
|
[] |
no_license
|
yuanqingye/R_Projects
|
f36c176b6295818defb1429859b9faa3dac8862d
|
dc460dc56cb6a36b4bd7aeffeb1fb70e6791b1ec
|
refs/heads/master
| 2020-03-11T06:46:14.832080
| 2018-05-18T06:33:05
| 2018-05-18T06:33:05
| 129,839,300
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
DecidingTreeTest.R
|
library(rpart.plot)
data("bodyfat", package = "TH.data")
dim(bodyfat)
attributes(bodyfat)
bodyfat[1:5,]
set.seed(1234)
ind <- sample(2, nrow(bodyfat), replace=TRUE, prob=c(0.7, 0.3))
bodyfat.train <- bodyfat[ind==1,]
bodyfat.test <- bodyfat[ind==2,]
library(rpart)
myFormula <- DEXfat ~ age + waistcirc + hipcirc + elbowbreadth + kneebreadth
bodyfat_rpart <- rpart(myFormula, data = bodyfat.train,control = rpart.control(minsplit = 10))
plot(bodyfat_rpart)
text(bodyfat_rpart, use.n=T)
prp(bodyfat_rpart)
opt <- which.min(bodyfat_rpart$cptable[,"xerror"])
cp <- bodyfat_rpart$cptable[opt, "CP"]
bodyfat_prune <- prune(bodyfat_rpart, cp = cp)
prp(bodyfat_rpart)
printcp(bodyfat_rpart)
DEXfat_pred <- predict(bodyfat_prune, newdata=bodyfat.test)
xlim <- range(bodyfat$DEXfat)
plot(DEXfat_pred ~ DEXfat, data=bodyfat.test, xlab="Observed",ylab="Predicted", ylim=xlim, xlim=xlim)
abline(a=0, b=1)
set.seed(4321)
ind <- sample(2, nrow(iris), replace=TRUE, prob=c(0.7, 0.3))
trainData <- iris[ind==1,]
testData <- iris[ind==2,]
library(party)
ct = ctree(Species~.,data=trainData)
table(predict(ct,testData),testData$Species)
library(randomForest)
# Species ~ .指的是Species与其他所有属性之间的等式
rf <- randomForest(Species ~ ., data=trainData, ntree=100, proximity=TRUE)
table(predict(rf,testData), testData$Species)
|
949a3c20a696a8ceeee67e4f41983112c8c5fb14
|
6782bf87aa03c99aab044004cdb4cc1577e5f0f1
|
/PhenStat/PhenStatSource/R/RRFramework.R
|
36d99ec2b26ce3c850befb7fa0288cefceaaf08a
|
[
"Apache-2.0"
] |
permissive
|
jmason-ebi/stats_working_group
|
177c1aebe0747bda41f4c807d37702e740272ec3
|
41bb896421927c1e0bbdfd35e5b1a6fcfe98d579
|
refs/heads/master
| 2021-01-17T22:07:19.380155
| 2014-01-29T15:43:42
| 2014-01-29T15:43:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,844
|
r
|
RRFramework.R
|
## Copyright © 2011-2013 EMBL - European Bioinformatics Institute
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##------------------------------------------------------------------------------
## RRFramework.R contains ... functions for refernce range plus analysis
##------------------------------------------------------------------------------
# Test for normal distirbution
# Gaussian distribution - the reference interval is calculated as th emean +-2 SD
# Non-Gaussian distribution - percentiles top 97.5 and
# bottom 2.5 percentiles are used to limit of the reference range.
# sufficient numbers of samples are required (minimum 50, ideal 120)
RRTest <- function(phenList, depVariable,
outputMessages=TRUE, rangeThreshold=60, naturalVariation=95)
{
x <- phenList$dataset
RR_left_male <- NA
RR_right_male <- NA
valuesInRange_male <- NA
allValues_male <- NA
percentageOut_male <- NA
RR_left_female <- NA
RR_right_female <- NA
valuesInRange_female <- NA
allValues_female <- NA
percentageOut_female <- NA
numberofgenders <- length(levels(x$Gender))
# Defined control subset
GenotypeSubset <- subset(x, x$Genotype==phenList$refGenotype)
controlSubset <- GenotypeSubset[,c(depVariable)]
controlSubset <- na.omit(controlSubset)
# Reference interval is defined by using percentiles
if (length(controlSubset)>=50){
percentiles<-unname(quantile(controlSubset, c(.025, .975)))
RR_left_all<-percentiles[1]
RR_right_all<-percentiles[2]
}
else {
# Reference interval is defined by suing Gaussian distribution
# shapiro.test(controlSubset) is needed?
RR_left_all <- mean(controlSubset) - sd(controlSubset)
RR_right_all <- mean(controlSubset) + sd(controlSubset)
}
if(numberofgenders==2){
GenotypeSubset_male <- subset(GenotypeSubset,
GenotypeSubset$Gender=="Male")
GenotypeSubset_female <- subset(GenotypeSubset,
GenotypeSubset$Gender=="Female")
controlSubset_male <- GenotypeSubset_male[,c(depVariable)]
controlSubset_male <- na.omit(controlSubset_male)
# Reference interval is defined by using percentiles
if (length(controlSubset_male)>=50){
percentiles<-unname(quantile(controlSubset_male, c(.025, .975)))
RR_left_male<-percentiles[1]
RR_right_male<-percentiles[2]
}
else {
# Reference interval is defined by suing Gaussian distribution
# shapiro.test(controlSubset) is needed?
RR_left_male <- mean(controlSubset_male) - sd(controlSubset_male)
RR_right_male <- mean(controlSubset_male) + sd(controlSubset_male)
}
controlSubset_female <- GenotypeSubset_female[,c(depVariable)]
controlSubset_female <- na.omit(controlSubset_female)
# Reference interval is defined by using percentiles
if (length(controlSubset_female)>=50){
percentiles<-unname(quantile(controlSubset_female, c(.025, .975)))
RR_left_female<-percentiles[1]
RR_right_female<-percentiles[2]
}
else {
# Reference interval is defined by suing Gaussian distribution
# shapiro.test(controlSubset) is needed?
RR_left_female <- mean(controlSubset_female) - sd(controlSubset_female)
RR_right_female <- mean(controlSubset_female) + sd(controlSubset_female)
}
}
# RR plus test - all together
GenotypeSubset <- subset(x, x$Genotype==phenList$testGenotype)
mutantSubset <- GenotypeSubset[,c(depVariable)]
mutantSubset <- na.omit(mutantSubset)
valuesInRange <- sum(sapply(mutantSubset, function(x) (x>=RR_left && x<=RR_right)))
allValues <- length(mutantSubset)
percentageOut_all<-round(100*(allValues-valuesInRange)/allValues)
# RR plus test - males/females
if(numberofgenders==2){
GenotypeSubset_male <- subset(GenotypeSubset,
GenotypeSubset$Gender=="Male")
GenotypeSubset_female <- subset(GenotypeSubset,
GenotypeSubset$Gender=="Female")
mutantSubset_male <- GenotypeSubset_male[,c(depVariable)]
mutantSubset_male <- na.omit(mutantSubset_male)
mutantSubset_female <- GenotypeSubset_female[,c(depVariable)]
mutantSubset_female <- na.omit(mutantSubset_female)
valuesInRange_male <- sum(sapply(mutantSubset_male, function(x) (x>=RR_left_male && x<=RR_right_male)))
allValues_male <- length(mutantSubset_male)
percentageOut_male<-round(100*(allValues_male-valuesInRange_male)/allValues_male)
valuesInRange_female <- sum(sapply(mutantSubset_female, function(x) (x>=RR_left_female && x<=RR_right_female)))
allValues_female <- length(mutantSubset_female)
percentageOut_female<-round(100*(allValues_female-valuesInRange_female)/allValues_female)
}
stat_all_list <- c(RR_left_all,RR_right_all,valuesInRange,allValues,percentageOut_all)
#names(stat_all_list) <- c("RR left", "RR right"," values in range", "all values", "out of range %")
stat_male_list <- c(RR_left_male,RR_right_male,valuesInRange_male,allValues_male,percentageOut_male)
#names(stat_male_list) <- c("RR left", "RR right"," values in range", "all values", "out of range %")
stat_female_list <- c(RR_left_female,RR_right_female,valuesInRange_female,allValues_female,percentageOut_female)
#names(stat_female_list) <- c("RR left", "RR right"," values in range", "all values", "out of range %")
estimateAll <- "Genotype effect is significant"
if (percentageOut_all<rangeThreshold)
estimateAll <- "Genotype effect is not significant"
estimateMale <- "Genotype effect is significant for males only"
if (percentageOut_male<rangeThreshold)
estimateMale <- "Genotype effect is not significant for males only"
estimateFemale <- "Genotype effect is significant for females only"
if (percentageOut_female<rangeThreshold)
estimateFemale <- "Genotype effect is not significant for females only"
model <- c(estimateAll, stat_all_list, estimateMale, stat_male_list, estimateFemale, stat_female_list)
names(model) <- c("RR estimate", "RR left", "RR right", "values in range", "all values", "out of range %",
"RR estimate males only", "RR left males only", "RR right males only"," values in range males only",
"all values males only", "out of range % males only",
"RR estimate females only", "RR left females only", "RR right females only"," values in range females only",
"all values females only", "out of range % females only")
keep_weight <- NA
keep_gender <- NA
keep_interaction <- NA
keep_batch <- NA
keep_equalvar <- NA
interactionTest <- NA
result <- new("PhenTestResult",list(model.dataset=x, model.output=model,
depVariable=depVariable,method="RR",model.effect.batch=keep_batch,
model.effect.variance=keep_equalvar,model.effect.interaction=keep_interaction,
model.output.interaction=interactionTest,model.effect.gender=keep_gender,
model.effect.weight=keep_weight,numberGenders=numberofgenders))
return(result)
}
##------------------------------------------------------------------------------
## Parser model output summary and return in readable vector format
parserOutputSummaryRR<-function(phenTestResult)
{
result <- phenTestResult
model <- result$model.output
estimate <-model[1]
RR_left <- model[2]
RR_right <- model[3]
values_in_range <- model[4]
all_values <- model[5]
values_out_percentage <- model[6]
output <- c(estimate,RR_left, RR_right, values_in_range, all_values, values_out_percentage)
names(output) <- c("RR estimate","RR left", "RR right", "values in range", "all values", "out of range %")
return(output)
}
##------------------------------------------------------------------------------
|
0801d2ab0ad5a6e524623889f4756483e168bff4
|
3a42630716521b58a20d5a9445fd3eb1007188aa
|
/man/font.Rd
|
5cb60dd080dfa3ad5b96b2ef11bf10c3db0e4ae5
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mslegrand/svgR
|
2a8addde6b1348db34dee3e5145af976008bf8f0
|
e781c9c0929a0892e4bc6e23e7194fb252833e8c
|
refs/heads/master
| 2020-05-22T01:22:16.991851
| 2020-01-18T03:16:30
| 2020-01-18T03:16:30
| 28,827,655
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,696
|
rd
|
font.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_ElePages.R
\name{font}
\alias{font}
\title{An element which specifies the font.}
\description{
The font element defines a font to be used for text layout.
}
\section{Available Attributes (Named Parameters)}{
\describe{
\item{\emph{Complementary Attributes}}{\code{\link[=FontElementHorizOriginXAttribute]{horiz.origin.xy}}, \code{\link[=FontElementVertOriginXAttribute]{vert.origin.xy}}}
\item{\emph{Core Attributes}}{\code{\link[=IDAttribute]{id}}, \code{\link[=XMLBaseAttribute]{xml.base}}, \code{\link[=XMLLangAttribute]{xml.lang}}, \code{\link[=XMLSpaceAttribute]{xml.space}}}
\item{\emph{Unclassified}}{\code{\link[=ClassAttribute]{class}}, \code{\link[=ExternalResourcesRequiredAttribute]{externalResourcesRequired}}, \code{\link[=FontElementHorizAdvXAttribute]{horiz.adv.x}}, \code{\link[=FontElementHorizOriginXAttribute]{horiz.origin.x}}, \code{\link[=FontElementHorizOriginYAttribute]{horiz.origin.y}}, \code{\link[=StyleAttribute]{style}}, \code{\link[=FontElementVertAdvYAttribute]{vert.adv.y}}, \code{\link[=FontElementVertOriginXAttribute]{vert.origin.x}}, \code{\link[=FontElementVertOriginYAttribute]{vert.origin.y}}}
}
}
\section{Available Content Elements (Unnamed Parameters)}{
\describe{
\item{\emph{Descriptive Elements}}{\code{\link[=desc]{desc}}, \code{\link[=metadata]{metadata}}, \code{\link[=title]{title}}}
\item{\emph{Non-structural Container Elements}}{\code{\link[=glyph]{glyph}}, \code{\link[=missing-glyph]{missing.glyph}}}
\item{\emph{Uncategorized Elements}}{\code{\link[=font-face]{font.face}}, \code{\link[=hkern]{hkern}}, \code{\link[=vkern]{vkern}}}
}
}
\keyword{element}
|
678e4b3d6ffe0ee66e9e169d70b4a50fb1c99003
|
5138db926647742e4b39a6b58f8c8a992b2578c3
|
/setting/simulation_targets.R
|
1dc62f0dba0e0fe9a20c0691cbc97a98cab096ed
|
[] |
no_license
|
helenecharlotte/grfCausalSearch
|
9863f4a1009832b9b8f1aa03bb2f2f9e2c820e5f
|
e14badd8f15dfc4e1067b2198de0f945b5cb3fd8
|
refs/heads/master
| 2023-01-20T04:59:30.175316
| 2023-01-10T10:55:42
| 2023-01-10T10:55:42
| 217,540,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,288
|
r
|
simulation_targets.R
|
### simulation_targets.R ---
#----------------------------------------------------------------------
## Author: Thomas Alexander Gerds
## Created: May 5 2022 (11:08)
## Version:
## Last-Updated: Jan 10 2023 (09:56)
## By: Thomas Alexander Gerds
## Update #: 200
#----------------------------------------------------------------------
##
### Commentary:
#
# effects on crude and net probabilities
# independent censoring
# varying sample size
# varying percentage of censored (before horizon)
# varying treatment effects
# ranking performance
#
### Change Log:
#----------------------------------------------------------------------
##
### Code:
library(data.table)
## MCCORES <- 1
## MCCORES <- 5
MCCORES <- 25
setDTthreads(1)
fixed <- list(event.times = c("T1","T2"),
treatments = c("A1" = .4,"A2" = .3, "A3" = .3,"A4" = .4,"A5" = .5,"A6" = .2,"A7" = .7,"A8" = .8,"A9" = .9,"A10" = .1),
binary.covariates = c("X1" = .1,"X2" = .2,"X3" = .3,"X4" = .4,"X5" = .5),
normal.covariates = paste0("X",6:7),
quadratic.covariates = "X6")
fixed_target <- tar_target(FIXED,fixed,deployment = "main")
# basic setting: no covariate effects on censoring time
formula1 <- list(T1 ~ f(A4,.3) + f(A5,.7) + f(X1,1) + f(X2,.3) + f(X6,-.5),
T2 ~ f(A5, -.3)+ f(X1,-.1) + f(X2,.6) + f(X6,.1),
C ~ f(A1,0),
A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
# no other treatment effects on T1 or T2
formula_ranking <- list(T1 ~ f(X1,1) + f(X2,.3) + f(X6,.3),
T2 ~ f(X1,-.1) + f(X2,.6) + f(X6,.1),
C ~ f(A1,0),
A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
# covariate dependent censoring
formula_cens <- list(T1 ~ f(A4,.3) + f(A5,.7) + f(X1,1) + f(X2,.3) + f(X6,-.5),
T2 ~ f(A5, -.3)+ f(X1,-.1) + f(X2,.6) + f(X6,.1),
C ~ f(A1,.1)+f(A3,-0.3) + f(X2,-0.4)+f(X6,.1),
A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
# misspecified parametric models
formula_misspecified = list(T1 ~ f(A4,.3) + f(A5,.7) + f(X1,1) + f(X2,.3) + f(X6,.3)+f(X6_2,.4),
T2 ~ f(A5, -.3)+ f(X1,-.1) + f(X2,.6) + f(X6,.1),
C ~ f(A1,0),
A1 ~ f(X1,-1) + f(X6,.7) + f(A7,.2))
formula_settings <- list("formula1" = formula1,
"formula_ranking" = formula_ranking,
"formula_misspecified" = formula_misspecified,
"formula_cens" = formula_cens)
# crude treatment effect
varying_crude <- data.table::CJ(A1_T1 = 1.25,
A1_T2 = c(.8,1,1.25),
A2_T1 = 1,
A2_T2 = 1.25,
scale.censored = 1/40,
sample.size = c(500,1000,2000,5000),
horizon = 5,
setting = "formula1",
method = "causal_forest",
weighter = "ranger",
net = FALSE,
treat = "A1",
num.trees = 50)
# net effects
varying_net <- data.table::CJ(A1_T1 = c(.8,1,1.25),
A1_T2 = 0.8,
A2_T1 = 1,
A2_T2 = 1.25,
scale.censored = 1/40,
sample.size = 5000,
horizon = 5,
setting = "formula1",
method = "causal_forest",
weighter = "ranger",
net = c(FALSE,TRUE),
treat = "all",
num.trees = 50)
# effect censored
varying_censored <- data.table::CJ(A1_T1 = 1.25,
A1_T2 = 1,
A2_T1 = 1,
A2_T2 = 1.25,
scale.censored = c(-Inf,1/40,1/25),
sample.size = c(500,1000,2000,5000),
horizon = 2.5,
setting = c("formula1","formula_cens"),
method = "causal_forest",
weighter = "ranger",
net = FALSE,
treat = "A1",
num.trees = 50)
# misspecified parametric models
varying_misspecified <- data.table::CJ(A1_T1 = 1.25,
A1_T2 = 1,
A2_T1 = 1,
A2_T2 = 1.25,
scale.censored = 1/40,
## sample.size = 5000,
sample.size = c(500,1000,2000,5000),
horizon = 5,
setting = "formula_misspecified",
method = c("causal_forest","CSC","FGR"),
weighter = "ranger",
net = FALSE,
treat = "A1",
num.trees = 50)
varying_notmisspecified <- data.table::CJ(A1_T1 = 1.25,
A1_T2 = 1,
A2_T1 = 1,
A2_T2 = 1.25,
scale.censored = 1/40,
sample.size = c(500,1000,2000,5000),
horizon = 5,
setting = "formula1",
method = c("causal_forest","CSC","FGR"),
weighter = "ranger",
net = FALSE,
treat = "A1",
num.trees = 50)
# ranking treatments
varying_ranking <- data.table::CJ(A1_T1 = 1.25,
A1_T2 = 1,
A2_T1 = 1,
A2_T2 = c(.2,.8,1,1.25,2),
scale.censored = c(-Inf,1/40),
sample.size = c(500,1000,2000,5000),
horizon = 5,
setting = "formula_ranking",
method = "causal_forest",
weighter = "ranger",
net = c(FALSE,TRUE),
treat = "all",
num.trees = 100)
varying <- rbindlist(list(varying_crude[,theme := "crude_effect"],
varying_net[,theme := "net_effect"],
varying_censored[,theme := "censoring"],
varying_misspecified[,theme := "misspecified"],
varying_notmisspecified[,theme := "not_misspecified"],
varying_ranking[,theme := "ranking"]))
varying[sample.size == 5000,num.trees := 50]
varying_target <- tar_target(VARYING,
varying,
deployment = "main")
varying[,reps := 2002]
## varying[sample.size == 500,reps := 10000]
## varying[sample.size == 1000,reps := 5000]
## varying[sample.size == 2000,reps := 3000]
# REVIEW
#varying[net == 0&theme == "censoring"][,reps := 2002][net == 0&theme == "censoring" & sample.size == 5000,reps := 2002],
#varying[net == 0&scale.censored == -Inf&setting == "formula_cens"&theme == "censoring"][,reps := 2002],
varying[sample.size == 5000,reps := 1001]
## varying <- varying[!(sample.size == 5000&method%in%c("CSC","FGR"))]
## varying[sample.size != 5000,reps := 2002]
## varying <- varying[method != "FGR"]
## varying <- varying[sample.size != 5000]
# ---------------------------------------------------------------------
# Calculation of true ATE values
# ---------------------------------------------------------------------
# crude: real world with competing risks
# net: hypothetical world where competing risks are eliminated
truth_varying <- tar_map(
# truth is affected by % censored and true effect
values = unique(varying[,.(scale.censored,A1_T1,A1_T2,A2_T1,A2_T2,setting,horizon,theme)]),
tar_target(VARYING_TRUTH,{
## lavaModel;
## simulateData;
set <- FIXED
set$formula.list = formula_settings[[setting]]
y = theTruth(setting = set,
A1_T1 = A1_T1,
A1_T2 = A1_T2,
A2_T1 = A2_T1,
A2_T2 = A2_T2,
scale.censored = scale.censored,
horizon = horizon,
B = 50,
cores = MCCORES,
n = 100000)
y = cbind(y,
A1_T1 = A1_T1,
A1_T2 = A1_T2,
A2_T1 = A2_T1,
A2_T2 = A2_T2,
horizon = horizon,
theme = theme,
formula = setting)
y},
deployment = "main"))
truth <- tar_combine(TRUTH, truth_varying)
# ---------------------------------------------------------------------
# Estimation of ATE
# ---------------------------------------------------------------------
estimates <- tar_map(
# outer map generates data under varying parameters of the data generating model
values = varying,
unlist = FALSE,
tar_target(ESTIMATES,{
## lavaModel;
out = do.call("rbind",mclapply(1:reps,function(b){
set = FIXED
set$formula.list = formula_settings[[setting]]
simulated_data <- simulateData(setting = set,
A1_T1 = A1_T1,
A1_T2 = A1_T2,
A2_T1 = A2_T1,
A2_T2 = A2_T2,
n = sample.size,
scale.censored = scale.censored,
keep.latent = FALSE)
if (treat == "all"){
ff = Hist(time,event)~intervene(A1)+intervene(A2)+intervene(A3)+intervene(A4)+intervene(A5)+intervene(A6)+intervene(A7)+intervene(A8)+intervene(A9)+intervene(A10)+X1+X2+X3+X4+X5+X6+X7
} else{
ff = Hist(time,event)~intervene(A1)+A2+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7
}
if (weighter == "km"){
x <- causalhunter(formula=ff,
method = method,
weighter=weighter,
args.weight = list(num.trees = num.trees,alpha = 0.05,mtry = 17),
fit.separate = TRUE,
num.trees=num.trees,
CR.as.censoring = net,
data=simulated_data,
times=horizon,
formula.weight = Hist(time,event)~A1+A2)
}else{
x <- causalhunter(formula=ff,
method = method,
weighter=weighter,
args.weight = list(num.trees = num.trees,alpha = 0.05,mtry = 17),
fit.separate = TRUE,
num.trees=num.trees,
CR.as.censoring = net,
data=simulated_data,
times=horizon,
formula.weight = Hist(time,event)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+X1+X2+X3+X4+X5+X6+X7)
}
if (treat == "all"){
x[,rank := rank(-abs(ate))]
}else{
x[,rank := NA]
}
x <- cbind(x,data.table(n = sample.size,
net = as.numeric(net),
scale.censored = scale.censored,
num.trees = num.trees,
method = method,
A1_T1 = A1_T1,
A1_T2 = A1_T2,
A2_T1 = A2_T1,
A2_T2 = A2_T2,
horizon = horizon,
formula = setting,
weighter = weighter,
theme = theme))
x
},mc.cores = MCCORES)) # end of repetitions
gc()
out
},cue = tar_cue(mode = "never")) # end of parameter map
)
# combine
ate <- tar_combine(ESTIMATE_ATE,{
estimates
})
# ---------------------------------------------------------------------
# Summarize performance of estimators against true parameter values
# ---------------------------------------------------------------------
results <- tar_target(RESULTS,
summarizePerformance(truth = TRUTH,
estimate = ESTIMATE_ATE),
deployment = "main")
ranking <- tar_target(RANKING,
rankingPerformance(estimate = ESTIMATE_ATE),
deployment = "main")
plotframe <- tar_target(PLOTFRAME,{
e <- ESTIMATE_ATE[intervene == "A1"]
t <- TRUTH
## e <- tar_read("ESTIMATE_ATE")[intervene == "A1"]
## t <- tar_read("TRUTH")[intervene == "A1"]
## setnames(e,"time","horizon")
setkeyv(e,c("net","formula","intervene","horizon","scale.censored","A1_T1","A1_T2","A2_T1","A2_T2"))
setkeyv(t,c("net","formula","intervene","horizon","scale.censored","A1_T1","A1_T2","A2_T1","A2_T2"))
t.ate = t[cause == 1 & intervene == "A1",.(true.ate = mean(ate)),keyby = c("net","formula","horizon","A1_T1","A1_T2","A2_T1","A2_T2")]
t.cens = t[cause == 1 & intervene == "A1",.(censored.tau = mean(censored.tau)),keyby = c("formula","horizon","scale.censored")]
e = merge(e,t.ate,by = c("net","formula","horizon","A1_T1","A1_T2","A2_T1","A2_T2"),all.y = FALSE)
e = merge(e,t.cens,by = c("formula","horizon","scale.censored"),all.y = FALSE)
e[,n:=factor(n)]
e[,net:=factor(net)]
e[,method:=factor(method,levels=c("causal_forest","CSC","FGR"),labels=c("Causal forest","CSC","FGR"))]
e[,num.trees:=factor(num.trees)]
e[,horizon:=factor(horizon)]
e[,censored.tau:=factor(round(censored.tau,1))]
e[,A1_T1:=factor(A1_T1)]
e[,A1_T2:=factor(A1_T2)]
e[,A2_T1:=factor(A2_T1)]
e[,A2_T2:=factor(A2_T2)]
e
})
######################################################################
### simulation_targets.R ends here
|
61cf1dd58e5d40d78c3d236249ac5e0bb2a00209
|
e0d76b276a1666f7ed7a3f3c03fc5470ff8166fc
|
/tests/testthat/test.misclassificationPenalties.R
|
9db2bdf1f464c98107d2b342136849e33a2365f2
|
[] |
no_license
|
damirpolat/llama
|
dee1808f400a62e0d47430c042228653af6569a1
|
a5e1d5d4ca591e5558bb610baef9cff36f792ef4
|
refs/heads/master
| 2020-06-23T17:31:32.277154
| 2019-07-28T00:13:03
| 2019-07-28T00:13:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
r
|
test.misclassificationPenalties.R
|
test_that("misclassificationPenalties returns penalties", {
expect_equal(sum(misclassificationPenalties(dmeas, modelameas)), 10)
expect_equal(sum(misclassificationPenalties(dmeas, modelbmeas)), 0)
})
test_that("misclassificationPenalties works without test split", {
fold = data.frame(id=1:10, a=rep.int(1, 10), b=rep.int(0, 10))
d = list(data=fold, performance=c("a", "b"), minimize=T, ids=c("id"))
expect_equal(sum(misclassificationPenalties(d, modelameas)), 10)
expect_equal(sum(misclassificationPenalties(d, modelbmeas)), 0)
})
test_that("misclassificationPenalties allows to maximise", {
emeas = dmeas
emeas$minimize = F
expect_equal(sum(misclassificationPenalties(emeas, modelameas)), 0)
expect_equal(sum(misclassificationPenalties(emeas, modelbmeas)), 10)
})
test_that("misclassificationPenalties works for test splits", {
model = function(data) {
data.frame(id=data$data$id, algorithm="a", score=1, iteration=1)
}
class(model) = "llama.model"
attr(model, "hasPredictions") = FALSE
expect_equal(sum(misclassificationPenalties(dmeas, model)), 10)
})
test_that("misclassificationPenalties works for test splits that repeat", {
i = 0
model = function(data) {
i <<- i + 1
data.frame(id=data$data$id, algorithm="a", score=1, iteration=i)
}
class(model) = "llama.model"
attr(model, "hasPredictions") = FALSE
attr(model, "addCosts") = TRUE
d = dmeas
d$test = list(c(1,2), c(1,2), c(2,3))
expect_equal(sum(misclassificationPenalties(d, model)), 6)
})
test_that("misclassificationPenalties works with NA predictions", {
nasmeas = data.frame(algorithm=rep.int(NA, 5), score=0, iteration=1)
asmeas = data.frame(algorithm=rep.int("a", 5), score=1, iteration=1)
model = list(predictions=rbind(cbind(nasmeas, id=1:5), cbind(asmeas, id=6:10)))
class(model) = "llama.model"
attr(model, "hasPredictions") = TRUE
meass = misclassificationPenalties(dmeas, model)
expect_equal(length(meass), 10)
expect_equal(length(meass[meass == 0]), 5)
expect_equal(sum(meass), 5)
})
|
478ae004c267da20db3280346a79022d4e78901c
|
c94dda9aef39aa193f7660311d971508d0b3c940
|
/ch2/sqsolve.R
|
85c116dd4ddd1dd3cb93fa367c30cb6632320e60
|
[] |
no_license
|
ml-playground/nonlinear_opt_r
|
7cc123b8b1aff3a616b87a0ed5dc6ee5fd3a9fd9
|
67d209ed1df1f5d0b2f53fdbfd820d5a842178a3
|
refs/heads/master
| 2016-09-12T16:35:28.864114
| 2016-04-15T16:46:58
| 2016-04-15T16:46:58
| 56,057,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 122
|
r
|
sqsolve.R
|
x <- c(0.1, 0.8)
source("sqfn.R")
source("steepdesc.R")
asqsd <- stdesc(x, sq.f, sq.g, control=list(trace=1))
print(asqsd)
|
1d3987b4c253eb77b783cbf7fcce9facaa2de256
|
f7fe5eea9a5aa93d94b5682fc1ba5b1efb303c69
|
/man/conf_renderer.Rd
|
b1498fad662a1e8a1e5b1affbe7958b32ee31e7a
|
[
"MIT"
] |
permissive
|
JohnCoene/g2r
|
056080dbbec46b2b47210922dffac664aa355961
|
3062b102e3095c2e3e9be4c6b24b17bf28737598
|
refs/heads/master
| 2021-06-18T23:13:24.645610
| 2021-02-27T19:54:45
| 2021-02-27T19:54:45
| 182,956,100
| 160
| 12
|
NOASSERTION
| 2019-05-15T12:43:30
| 2019-04-23T07:12:19
|
R
|
UTF-8
|
R
| false
| true
| 424
|
rd
|
conf_renderer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/style.R
\name{conf_renderer}
\alias{conf_renderer}
\title{Renderer}
\usage{
conf_renderer(g2, renderer = c("svg", "canvas"))
}
\arguments{
\item{g2}{An object of class \code{g2r} as returned by \code{\link{g2r}}.}
\item{renderer}{Renderer, \code{canvas} or \code{svg}.}
}
\description{
Define renderer.
}
\note{
The "g" in g2r stands for svg.
}
|
7944ac341b1ae23858350d19b941f32c3b6ac839
|
c9dd079840c9e9d2158557a1fd67196643bdde3c
|
/binomial/tests/testthat/test-binomial.R
|
8e42b46d9ae050ba07637310e51547e0d6592769
|
[] |
no_license
|
stat133-sp19/hw-stat133-aditya234123
|
c5d3593ffd5cfc13a4f74a5fc61978497c1d79f4
|
0a3829d3c464fc94536eed40d482ad0691605c42
|
refs/heads/master
| 2020-04-28T03:51:35.310624
| 2019-04-26T22:46:33
| 2019-04-26T22:46:33
| 174,954,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,558
|
r
|
test-binomial.R
|
context("Testing binomial functions")
test_that("Testing bin_choose()", {
expect_equal(bin_choose(5,2), 10)
expect_equal(bin_choose(c(5,2), c(2, 1)), c(10, 2))
expect_equal(length(bin_choose(c(5,2, 20), c(2, 1, 10))), 3)
expect_error(bin_choose(2,5))
})
test_that("Testing bin_probability()", {
expect_equal(bin_probability(2,5,0.5), 0.3125)
expect_equal(bin_probability(0:2,5,0.5), c(0.03125, 0.15625, 0.31250))
expect_equal(round(bin_probability(55, 100, 0.45), 3), 0.011)
expect_error(bin_probability(2,5,3))
expect_error(bin_probability(5,2,0.5))
})
test_that("Testing bin_distribution()", {
expect_equal(class(bin_distribution(5,0.5)), c("bindis", "data.frame"))
expect_equal(bin_distribution(2,0.5)$probability, c(0.25, 0.50, 0.25))
expect_equal(length(bin_distribution(10, 0.5)$success), length(bin_distribution(10, 0.5)$probability))
expect_equal(length(bin_distribution(10, 0.5)$success), 11)
expect_equal(ncol(bin_distribution(10, 0.6)), 2)
})
test_that("Testing bin_cumulative()", {
expect_equal(class(bin_cumulative(5,0.5)), c("bincum", "data.frame"))
expect_equal(bin_cumulative(2,0.5)$probability, c(0.25, 0.50, 0.25))
expect_equal(bin_cumulative(2,0.5)$cumulative, c(0.25, 0.75, 1.00))
expect_equal(length(bin_cumulative(10, 0.5)$success), length(bin_cumulative(10, 0.5)$probability))
expect_equal(length(bin_cumulative(10, 0.5)$cumulative), length(bin_cumulative(10, 0.5)$probability))
expect_equal(length(bin_cumulative(10, 0.5)$success), 11)
expect_equal(ncol(bin_cumulative(10, 0.6)), 3)
})
|
ef41d50f92e893b380a0276d4339a9d5594c7879
|
a83e80443e3b31d5f66c8f07fa83fbe7b9396424
|
/R/fsd.perm.R
|
41ec636f39d84d60ca581771c631d71e10d6cb10
|
[] |
no_license
|
kuenzer/fsd
|
70432829172c0c21b1d6b178a5657ef6c07a9c8d
|
b09f30fdf2591094bdb1017c49594b583c1edf6e
|
refs/heads/master
| 2022-11-14T07:23:11.825765
| 2020-07-14T11:19:18
| 2020-07-14T11:19:18
| 201,284,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 927
|
r
|
fsd.perm.R
|
#' Permute the dimensions of a Functional Spatial Data Grid
#'
#' This function is used to permute the order of the dimensions of the spatial
#' grid of some functional spatial data.
#' @param fsdobj the functional spatial data.
#' @param perm the subscript permutation vector, i.e. a permutation of the
#' integers 1:r.
#' @seealso \code{\link{fsd.fd}}
#' @keywords fsd
#' @export
#' @examples
#' data("temp")
#' fsd.plot.data(fsd.perm(temp, c(2, 1, 3)))
fsd.perm = function (fsdobj, perm = NULL)
{
if (!inherits(fsdobj, "fsd.fd"))
stop("fsdobj needs to be an fsd object")
if (length(dim(fsdobj$coefs)) == 3)
perm = 2:1
else if (length(dim(fsdobj$coefs)) <= 2)
return(fsdobj)
if (!is.numeric(perm) || length(perm) != length(dim(fsdobj$coefs)) - 1)
stop("Invalid permutation")
perm = c(1, 1 + perm)
fsdobj$coefs = aperm(fsdobj$coefs, perm)
fsdobj$fdnames = fsdobj$fdnames[perm]
fsdobj
}
|
effd007c0f58a1033ed492057247cbc917d290e4
|
154b48b9f4063797871af647f1d14b5cff7c6e81
|
/R/bacisCheckDIC.R
|
d1c6e583987bef4763bd1abba1278b0f4ff23257
|
[] |
no_license
|
cran/bacistool
|
0bacc13f50d02b98cfa7e2b08521c343d08b393e
|
a7a336e0ee17189c6962392491b861017d7cf890
|
refs/heads/master
| 2021-06-05T23:02:34.730292
| 2020-06-05T21:00:05
| 2020-06-05T21:00:05
| 145,900,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
bacisCheckDIC.R
|
bacisCheckDIC<- function(numGroup = 5,
tau1 = NA,
tau2 = .001,
phi1 = 0.1,
phi2 = 0.3,
MCNum = 50000,
nDat = c(25, 25, 25, 25, 25),
xDat = c(2, 3, 7, 6, 10),
seed = NA
)
{
if (is.na(seed))
{
set.seed( as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31) )
}
else{
set.seed(seed)
}
if (is.na(tau1))
{
sd <- (logit(phi2) - logit(phi1)) / 6
tau1 <- 1 / sd / sd
cat("The value of tau1 is set at:", tau1, "\n")
}
#print(tau1)
xLim <- max(xDat / nDat) + 0.3
#print(xDat)
t <- ModelOne(
xDat = xDat,
nDat = nDat,
numGroup = numGroup,
pp1 = phi1,
pp2 = phi2,
tau1 = tau1,
tau2 = tau2,
clusterCutoff = 0.5,
MCNum = MCNum
)
dic<-t$DIC
value<-sum(dic[[1]])+sum(dic[[2]])
return(value)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.