blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
b681bac38aae37d3fc344ed360267ad426fb98b6
2b2ccfa703130414be1e04797c0e9c4adfed2cd3
/mtic.R
82722ef6548d69fa971e00beccae3f439631a79e
[]
no_license
slfan2013/MetNorm
957fd7f48b33e6679cb74a7198a2b270be66424f
9822a8fa338094b3225799be31536eb67f1e2f34
refs/heads/master
2023-05-03T18:12:58.711736
2021-05-24T05:27:19
2021-05-24T05:27:19
273,600,656
0
0
null
null
null
null
UTF-8
R
false
false
708
r
mtic.R
cat("<!--------- mTIC --------->\n") norm_skip = FALSE if(!'compoundType' %in% colnames(f)){ cat(paste0("warning: 'compountType' is not in the dataset. mTIC skipped.\n")) norm_skip = TRUE }else{ mTIC_column = f[['compoundType']] if(!'known' %in% unique(f[['compoundType']])){ cat(paste0("'known' (case-sensitive) is not found in the 'compoundType'. mTIC skipped.\n")) norm_skip = TRUE } } if(!norm_skip){ start = Sys.time() index = mTIC_column %in% "known" sums = apply(e_raw[index,], 2, sum, na.rm=T) mean_sums = mean(sums, na.rm = TRUE) e_norm = t(t(e_raw)/(sums/mean_sums)) cat("<!--------- mTIC done.--------->\n") }else{ cat("<!--------- mTIC skipped.--------->\n") }
49d8c32a3872d115ee849fb12e368d0b758cb8b8
5e6caa777731aca4d6bbc88fa92348401e33b0a6
/man/data_justifications.Rd
24090e8b00d307005c423bb035803ae7950334c7
[ "MIT" ]
permissive
metamelb-repliCATS/aggreCAT
e5c57d3645cb15d2bd6d2995992ad62a8878f7fb
773617e4543d7287b0fca4a507ba4c94ee8f5e60
refs/heads/master
2023-05-22T19:51:20.949630
2023-03-31T05:21:39
2023-03-31T05:21:39
531,484,296
6
1
NOASSERTION
2023-03-31T05:03:05
2022-09-01T11:15:36
R
UTF-8
R
false
true
1,205
rd
data_justifications.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{data_justifications} \alias{data_justifications} \title{Free-text justifications for expert judgements} \format{ A table with 5630 rows and 9 columns: \describe{ \item{round}{character string identifying whether the round was 1 (pre-discussion) or 2 (post-discussion)} \item{paper_id}{character string of the paper ids (25 papers total)} \item{user_name}{character string of anonymized IDs for each participant (25 participants included in this dataset)} \item{question}{character string for the question type, with five options: flushing_freetext, involved_binary, belief_binary, direct_replication, and comprehension} \item{justification}{character string with participant's free-text rationale for their responses} \item{justification_id}{character string with a unique ID for each row} \item{vote_count}{numeric of recorded votes (all 0 or 1)} \item{vote_sum}{numeric of summed vote counts(all 0 or 1)} \item{group}{character string of group IDs that contained the participants} } } \usage{ data_justifications } \description{ Free-text justifications for expert judgements } \keyword{datasets}
24863be783aadc06f8d194d32b6682ed58c89f36
caca7203b28507ec914b0be0042f96eb66db71ab
/code/model2_interaction.R
61d2a3166cfb1bbe750c0a7094f550b4fe606d6b
[]
no_license
boulbi777/returns-to-schooling-on-earning
e74e86ef94e8e7439a329f5239648746c8b67c6e
b9a64399e0cdf6ccc743276e30c99a59d81d58d0
refs/heads/master
2022-12-19T21:53:07.746490
2020-09-04T14:55:51
2020-09-04T14:55:51
293,286,960
0
0
null
null
null
null
UTF-8
R
false
false
266
r
model2_interaction.R
mod2 = lm(logsal~age + age2 + female + for. + reg1 + reg2 + reg3 + reg4 +female*for., data=data2) mod2_results = lm_analysis(data2,mod2,p=9) print("Model with White method robust covariance :") mod2_rob = commarobust(mod2, se_type="HC0") print(summary(mod2_rob))
1543ab0551215ac6417f41d467f70b93aeb3fc23
8cb5580776ef5384e86e2a6f41d1a9e5cb52dc47
/script/script_raw/Caret/BIC/9_SM_Full_BIC.R
0e402d6f3c7e3aae653a86a4397c7b2bdb76411a
[]
no_license
MikyPiky/Project2
00836c6a00b2a2c57fb030ffe5111352b5dbe5ff
1e4b14dbcff75af095acdd2afd4126f3410d1fb7
refs/heads/master
2021-01-23T12:38:37.942441
2017-06-16T17:11:14
2017-06-16T17:11:14
93,184,104
1
0
null
null
null
null
UTF-8
R
false
false
36,292
r
9_SM_Full_BIC.R
################################# #### SiloMaize in September #### ################################# ' ###################### ## File Discription ## The purpose of this script is to estimate the impact of weather fluctuations in the month mentionend above on yearly crop yield. This is done by the following the steps: - Create data frame with siloMaize as dependent and variables of the month above as independent variables - Create stepwise function which is based on drought categories of german drought monitor - Remove comIds with less than 7 observations to avoid leveage issues - Remove log trend of indepedent variable - Delete outliers which appear to be measurement error - Use BIC to choose the degrees of the polynomial and to compare various model configurations - Loop through polynomials configuration of each model; highest possible polynomial is of degree 3 - Compare models graphically - Explore Models - Model with lowest BIC in general: Tavg, SMI - Model with lowest BIC of standard configuration: Tavg, Prec, SMI - Model with lowest BIC with SMI: Tavg, SMI - Correct Standard Errors with either Driscoll Kray or Cameron et al /Thompson estimator The --vcovHC– function estimates three heteroskedasticity-consistent covariance estimators: • "white1" - for general heteroskedasticity but no serial correlation. Recommended for random effects. • "white2" - is "white1" restricted to a common variance within groups. Recommended for random effects. • "arellano" - both heteroskedasticity and serial correlation. Recommended for fixed effects. The following options apply*: • HC0 - heteroskedasticity consistent. The default. • HC1,HC2, HC3 – Recommended for small samples. HC3 gives less weight to influential observations. • HC4 - small samples with influential observations • HAC - heteroskedasticity and autocorrelation consistent (type ?vcovHAC for more details) Solution for serial correlation: Cluster by groups. Solution for cross sectional correlation: Cluster by time Ich arbeitet vorerst mir Driscoll Kraay und weighting von 1 (maxlag=0). Die Ergebnisse sollten solide sein, da Cameron/Thompson ähnlich ist ## Input ## - aus 4km_tmax: Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_sm.csv (komplete data.frame) ## Output ## - Yield_Covariates_SM_Sep.csv (auf September reduzierter Data.Frame) - Export Data frame for use in BIC_Graphic: file="./data/data_raw/BIC/BIC_SM_Sep.csv") - Export Data Frame of Fixed Effects to be used in Script FixedEffects_Graphic: "./figures/figures_exploratory/FixedEffects/Silomaize/..." ' ################### ## Load Packages ## library(plm) library(boot) library(gtools) library(lme4) library(lmtest) library(car) library(sp) library(rgdal) library(raster) library(rasterVis) library(maptools) library(reshape) library(stringr) library(classInt) library(RColorBrewer) library(stargazer) library(ggplot2) #################################################################################################################################################################### ################################################################################################################# #### Create data frame with siloMaize as dependent and variables of the month above as independent variables #### ################################################################################################################# ## Read in large Dataframe for Maize ## Yield_Covariates <- read.csv("~/Documents/projects/correlation/data/data_processed/Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_sm.csv") Yield_Covariates$X <- NULL ## For publication worth regression output need to change data names ## 'Get rid of variables which are not necessary: other months and other not needed variables' names(Yield_Covariates) names <- names(Yield_Covariates) names_Sep <- grep(c("*_Sep"), names) names_Sep Yield_Covariates_Sep <- Yield_Covariates[,names_Sep] names(Yield_Covariates_Sep) dim(Yield_Covariates_Sep) ## Delete all but SMI, Prec, Tavg and Pet names(Yield_Covariates_Sep) Yield_Covariates_Sep <- Yield_Covariates_Sep[,c(1:4)] ## Establish first part of data frame_ time and spatial reference plus Silomaize ## names(Yield_Covariates[,c(2,1,3:5,7)]) Yield_Covariates_SM <- Yield_Covariates[,c(2,1,3:5,7)] # Achtung, darauf achten, dass comId und year in der richtigen Reihenfolge sind. names(Yield_Covariates_SM) head(Yield_Covariates_SM) Yield_Covariates_SM_Sep <- cbind(Yield_Covariates_SM, Yield_Covariates_Sep) names(Yield_Covariates_SM_Sep) names(Yield_Covariates_SM_Sep) <- c( "comId" , "year","com","stateId","state","siloMaize","SMI", "Prec","Tavg", "Pet") names(Yield_Covariates_SM_Sep) ######################################### #### Create stepwise function of SMI #### ######################################### ' Drought Monitor Spezification ' Yield_Covariates_SM_Sep$SMI_GDM <- cut(Yield_Covariates_SM_Sep$SMI, breaks = c(0, 0.1, 0.2, 0.3, 0.7, 0.8, 0.9, 1), , labels = c("severe drought","moderate drought","abnormal dry", "normal","abnormal wet" ,"abundant wet", "severe wet")) ############# ## Na-omit ## sum(is.na(Yield_Covariates_SM_Sep) ) dim(Yield_Covariates_SM_Sep) Yield_Covariates_SM_Sep_nna <- na.omit(Yield_Covariates_SM_Sep) dim(Yield_Covariates_SM_Sep_nna) ## Check for NAs any(is.na(Yield_Covariates_SM_Sep_nna)) ## Reset Rownames rownames(Yield_Covariates_SM_Sep_nna) <- NULL ## Further work with DataFrame without Yield_Covariates_SM_Sep index ## Yield_Covariates_SM_Sep <- Yield_Covariates_SM_Sep_nna ######################################################################### ## Remove comIds with less than 7 observations to avoid leveage issues ## ######################################################################### ##################################################### ## Delete all comIds with less than 7 observations ## sum(table(Yield_Covariates_SM_Sep$comId) < 7 ) table(Yield_Covariates_SM_Sep$comId) < 7 ## comIds mit weniger als 7 Beoachtungen: ## list <- c(3101, 3102, 3158, 3402, 5114, 5117, 5314,5315, 5334,5378, 5512, 5911, 5916, 6413, 7131, 7135, 7233, 7331, 7332, 7337, 7338, 7339, 8111,12052, 14612, 15001, 15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15091, 16051, 16052 ) length(list) list[[1]] temp <- Yield_Covariates_SM_Sep for (i in 1:34) { print(Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId==list[i],]) temp <- (temp[!temp$comId==list[i],]) } ## Number of deleted rows dim(temp)-dim(Yield_Covariates_SM_Sep) ## Further use old name for data.frame Yield_Covariates_SM_Sep <- temp ################################ ## Befehle nach jedem löschen ## Yield_Covariates_SM_Sep <- na.omit(Yield_Covariates_SM_Sep) rownames(Yield_Covariates_SM_Sep) <- NULL Yield_Covariates_SM_Sep <- plm.data(Yield_Covariates_SM_Sep, index=c("comId", "year")) Yield_Covariates_SM_Sep[,c("comId","stateId")] <- lapply(Yield_Covariates_SM_Sep[,c("comId","stateId")], factor ) ################################################# #### Remove log trend of indepedent variable #### ################################################# 'Fit log of yield on log of time and use the residuals of that for yields' logtrend <- lm(log(siloMaize) ~ log(as.integer(year)), data= Yield_Covariates_SM_Sep) ########################## ## Issue with Outliers ### ########################## par(mfrow = c(2,2)) plot(logtrend) ## Look Outliers Values ## Yield_Covariates_SM_Sep[c(1276, 3262, 3283, 3171,3255),] ## Look at other values of outliers com # Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId == "6532",] # 2008 hier scheint nur ein Jahr ein Messfehler zu sein: diesen Lösche ich Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId == "12067",] # 2006 verändere ich nicht Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId == "12069",] # 2003 verändere ich nicht Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId == "12060",] # 1999 verändere ich nicht Yield_Covariates_SM_Sep[Yield_Covariates_SM_Sep$comId == "12067",] # 2006 verändere ich nicht ## Delete outliers ## ' Da sich die Patterns bei den BIC Vergleichen nicht ändert, kümmere ich micht nicht weiter um die Outlier. Ich nehme nur sehr offensichtliche Messfehler raus.' Yield_Covariates_SM_Sep <- Yield_Covariates_SM_Sep[!(Yield_Covariates_SM_Sep$comId == "6532" & Yield_Covariates_SM_Sep$year == "2008"),] Yield_Covariates_SM_Sep <- na.omit(Yield_Covariates_SM_Sep) rownames(Yield_Covariates_SM_Sep) <- NULL ################################################# #### Remove log trend of indepedent variable #### logtrend <- lm(log(siloMaize) ~ log(as.integer(year)), data= Yield_Covariates_SM_Sep) summary(logtrend) Yield_Covariates_SM_Sep$siloMaize_logtrend <- resid(logtrend) ####################################### ## Prepare dataframe for plm package ## 'Change Indexing so that it can be used in plm package' Yield_Covariates_SM_Sep <- plm.data(Yield_Covariates_SM_Sep, index=c("comId", "year")) str(Yield_Covariates_SM_Sep) ## Transform comId and stateId to factor ## Yield_Covariates_SM_Sep[,c("comId","stateId")] <- lapply(Yield_Covariates_SM_Sep[,c("comId","stateId")], factor ) lapply(Yield_Covariates_SM_Sep, class) ############################################### ##### Save Yield_Covariates_SM_September extern #### write.csv(Yield_Covariates_SM_Sep, file="./data/data_raw/Yield_Covariates_SM_Sep.csv") ####################################################### #### BIC to choose the degrees of the polynomials #### ####################################################### ## create a matrix which contains all possible degree combinations, here for three variables ## degree <- permutations(n=3,r=2,v=c(1:3),repeats.allowed=T) degree ################################################ ## Formulas for Model Variations to be tested ## ## with SMI formula_Sep_sm_detrendlog_SMIPrecTavg <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) + dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) formula_Sep_sm_detrendlog_SMIPrecPet <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) + dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) formula_Sep_sm_detrendlog_SMIPrec <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) formula_Sep_sm_detrendlog_SMIPet <- siloMaize_logtrend ~ poly(Pet, degree[r, 2], raw = T) + dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) formula_Sep_sm_detrendlog_SMITavg <- siloMaize_logtrend ~ poly(Tavg, degree[r, 2], raw = T) + dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) formula_Sep_sm_detrendlog_SMI <- siloMaize_logtrend ~ dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId) ## no SMI formula_Sep_sm_detrendlog_PrecTavg <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) + dummy(comId) formula_Sep_sm_detrendlog_PrecPet <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) + dummy(comId) formula_Sep_sm_detrendlog_Prec <- siloMaize_logtrend ~ poly(Prec, degree[r, 1], raw = T) + dummy(comId) formula_Sep_sm_detrendlog_Pet <- siloMaize_logtrend ~ poly(Pet, degree[r, 2], raw = T) + dummy(comId) formula_Sep_sm_detrendlog_Tavg <- siloMaize_logtrend ~ poly(Tavg, degree[r, 2], raw = T) + dummy(comId) ## Print formula # formula_Sep_sm_detrendlog_SMIPrecTavg # formula_Sep_sm_detrendlog_SMIPrecPet # formula_Sep_sm_detrendlog_SMIPrec # formula_Sep_sm_detrendlog_SMIPet # formula_Sep_sm_detrendlog_SMITavg # formula_Sep_sm_detrendlog_SMI # formula_Sep_sm_detrendlog_PrecTavg # formula_Sep_sm_detrendlog_PrecPet # formula_Sep_sm_detrendlog_Prec # formula_Sep_sm_detrendlog_Pet # formula_Sep_sm_detrendlog_Tavg ################################################################################################# # Loop through the container list to cover all permutations of posssible degree of freedoms of ## # of the polynomials of the variables ## ################################################################################################# ################################################## ## Loop through various variable configurations ## BIC_SMIPrecTavg <- rep(0,9) for(r in 1:9){ glm.fit_SMIPrecTavg <- glm(formula = formula_Sep_sm_detrendlog_SMIPrecTavg, data = Yield_Covariates_SM_Sep) BIC_SMIPrecTavg[r] <- BIC(glm.fit_SMIPrecTavg) } BIC_SMIPrecPet <- rep(0,9) for(r in 1:9){ glm.fit_SMIPrecPet <- glm(formula = formula_Sep_sm_detrendlog_SMIPrecPet, data = Yield_Covariates_SM_Sep) BIC_SMIPrecPet[r] <- BIC(glm.fit_SMIPrecPet) } BIC_SMIPrec <- rep(0,9) for(r in 1:9){ glm.fit_SMIPrec <- glm(formula = formula_Sep_sm_detrendlog_SMIPrec, data = Yield_Covariates_SM_Sep) BIC_SMIPrec[r] <- BIC(glm.fit_SMIPrec) } BIC_SMIPet <- rep(0,9) for(r in 1:9){ glm.fit_SMIPet <- glm(formula = formula_Sep_sm_detrendlog_SMIPet, data = Yield_Covariates_SM_Sep) BIC_SMIPet[r] <- BIC(glm.fit_SMIPet) } BIC_SMITavg <- rep(0,9) for(r in 1:9){ glm.fit_SMITavg <- glm(formula = formula_Sep_sm_detrendlog_SMITavg, data = Yield_Covariates_SM_Sep) BIC_SMITavg[r] <- BIC(glm.fit_SMITavg) } BIC_SMI <- rep(0,9) for(r in 1:9){ glm.fit_SMI <- glm(formula = formula_Sep_sm_detrendlog_SMI, data = Yield_Covariates_SM_Sep) BIC_SMI[r] <- BIC(glm.fit_SMI) } BIC_PrecTavg <- rep(0,9) for(r in 1:9){ glm.fit_PrecTavg <- glm(formula = formula_Sep_sm_detrendlog_PrecTavg, data = Yield_Covariates_SM_Sep) BIC_PrecTavg[r] <- BIC(glm.fit_PrecTavg) } BIC_PrecPet <- rep(0,9) for(r in 1:9){ glm.fit_PrecPet <- glm(formula = formula_Sep_sm_detrendlog_PrecPet, data = Yield_Covariates_SM_Sep) BIC_PrecPet[r] <- BIC(glm.fit_PrecPet) } BIC_Prec <- rep(0,9) for(r in 1:9){ glm.fit_Prec <- glm(formula = formula_Sep_sm_detrendlog_Prec, data = Yield_Covariates_SM_Sep) BIC_Prec[r] <- BIC(glm.fit_Prec) } BIC_Pet <- rep(0,9) for(r in 1:9){ glm.fit_Pet <- glm(formula = formula_Sep_sm_detrendlog_Pet , data = Yield_Covariates_SM_Sep) BIC_Pet [r] <- BIC(glm.fit_Pet ) } BIC_Tavg <- rep(0,9) for(r in 1:9){ glm.fit_Tavg <- glm(formula = formula_Sep_sm_detrendlog_Tavg , data = Yield_Covariates_SM_Sep) BIC_Tavg [r] <- BIC(glm.fit_Tavg ) } ## Compare BIC values ## BIC <- c(BIC_SMIPrecTavg, BIC_SMIPrecPet, BIC_SMIPrec, BIC_SMIPet, BIC_SMITavg, BIC_SMI, BIC_Prec, BIC_Tavg, BIC_Pet, BIC_PrecTavg, BIC_PrecPet) BIC par(mfrow=c(1,1)) plot(BIC) ########################### ## Plot BIC with ggplot2 ## ########################### ############################################## ## Create Dataframe for plotting in ggplot2 ## ## repeat name of modelconfiguration ## list <-c("01_SMIPrecTavg", "02_SMIPrecPet", "03_SMIPrec", "04_SMIPet", "05_SMITavg", "06_SMI", "07_Prec", "08_Tavg", "09_Pet", "10_PrecTavg", "11_PrecPet") list2 <- 1:11 model <- NULL model_index <- NULL for (i in 1:11) { x <- rep(list[i],9) y <- rep(list2[i],9) model <- append(model, x) model_index <- as.numeric(append(model_index, y)) } ################################### ## Combine data in on data.frame ## BIC <- as.data.frame(BIC) model <- as.data.frame(model) model_index <- as.data.frame(model_index) index <- 1:99 month <-rep("September",99) BIC_Sep <- cbind(BIC, model ,model_index, index, month) ####################### ## Delete Duplicates ## which(duplicated(BIC_Sep$BIC)) list3 <- c(20,21,23,24,26,27,31,32,33,34,35,36,40,41,42,43,44,45,47,48,49,50,51,52,53,54,56,57,59,60,62,63,67,68,69,70,71,72,76,77,78,79,80,81) length(list3) temp <- BIC_Sep for (i in 1:44) { print(BIC_Sep[BIC_Sep$index ==list3[i],]) temp <- (temp[!temp$index==list3[i],]) } dim(BIC_Sep) dim(temp) ################################ ## Correct created data.frame ## rownames(temp) <- NULL BIC_Sep <- temp lapply(BIC_Sep, class) ############################ ## Plot data with ggplot2 ## g <- ggplot(BIC_Sep,aes(y=BIC, x=index)) g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark() g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark() + facet_wrap( ~ month) BIC_Sep ## Export Data frame for use in BIC_Grafic BIC_SM_Sep <- BIC_Sep class(BIC_SM_Sep) write.csv(BIC_SM_Sep, file="./data/data_raw/BIC/BIC_SM_Sep.csv") ################################################################ ################################### Explore Models ############# ################################################################ ################### ## Load Data Set ## # Yield_Covariates_SM_Sep <- read.csv( file="./data/data_raw/Yield_Covariates_SM_Sep.csv") # names(Yield_Covariates_SM_Sep) # Yield_Covariates_SM_Sep$X <- NULL ####################################### ## Prepare dataframe for plm package ## 'Change Indexing so that it can be used in plm package' Yield_Covariates_SM_Sep <- plm.data(Yield_Covariates_SM_Sep, index=c("comId", "year")) ## Transform comId and stateId to factor ## Yield_Covariates_SM_Sep[,c("comId","stateId")] <- lapply(Yield_Covariates_SM_Sep[,c("comId","stateId")], factor ) str(Yield_Covariates_SM_Sep) ################################# ############################### ## Results with smallest BIC ## ############################### plot(BIC_SMITavg) which.min(BIC_SMITavg) r = 3 best_formula <- formula_Sep_sm_detrendlog_SMITavg degree ################### ## GLM Ergebniss ## glm.fit_SM_BEST_Sep <- glm(formula = best_formula, data = Yield_Covariates_SM_Sep) summary(glm.fit_SM_BEST_Sep) 'AIC: -6600.2' #################### ## PLM Ergebnisse ## plm.fit_SM_BEST_Sep <- plm(formula = update(best_formula, .~. - dummy(comId)), data = Yield_Covariates_SM_Sep, effect="individual", model=("within"), index = c("comId","year")) summary(plm.fit_SM_BEST_Sep) 'Adj. R-Squared: 0.071596' fixef <- fixef(plm.fit_SM_BEST_Sep) fixef <- as.data.frame(as.matrix(fixef)) head(fixef) fixef <- cbind(rownames(fixef), fixef) rownames(fixef) <- NULL names(fixef) <- c("comId", "FE") fixef write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Silomaize/plm.fit_SM_BEST_Sep_FE.csv") ################## ## LM Ergebniss ## lm.fit_SM_BEST_Sep <-lm(formula = best_formula, data = Yield_Covariates_SM_Sep) summary(lm.fit_SM_BEST_Sep) 'Adjusted R-squared: 0.6449' ################################################ ## Assessing Influence (Leverage*discrepancy) ## cutoff_SM_Sep <- 4/((nrow(Yield_Covariates_SM_Sep)-length(lm.fit_SM_BEST_Sep$coefficients)-1)) cutoff_SM_Sep plot(lm.fit_SM_BEST_Sep, which=4) cook_Sep <- cooks.distance(lm.fit_SM_BEST_Sep) nrow(Yield_Covariates_SM_Sep[cook_Sep > cutoff_SM_Sep,]) # 189 year_cooks_SM_Sep <- table(Yield_Covariates_SM_Sep$year[cook_Sep > cutoff_SM_Sep ]) year_cooks_SM_Sep '1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 7 4 8 7 52 4 6 25 24 8 17 23 ' com_cooks_SM_Sep <- sort(table(Yield_Covariates_SM_Sep$com[cook_Sep > cutoff_SM_Sep ] ) ) tail(com_cooks_SM_Sep,20) ' Schmalkalden-Meiningen, Kreis S\xf6mmerda, Kreis Stendal, Landkreis Hagen, Kreisfreie Stadt 2 2 2 3 Leverkusen, Kreisfreie Stadt M\xe4rkischer Kreis Neum\xfcnster, Kreisfreie Stadt Oberbergischer Kreis 3 3 3 3 Oder-Spree, Landkreis Teltow-Fl\xe4ming, Landkreis Uckermark, Landkreis Wittenberg, Landkreis 3 3 3 3 Barnim, Landkreis Dahme-Spreewald, Landkreis Elbe-Elster, Landkreis Spree-Nei\xdfe, Landkreis 4 4 4 4 Frankfurt (Oder), Kreisfreie Stadt Oberspreewald-Lausitz, Landkreis Olpe, Kreis Siegen-Wittgenstein, Kreis 5 5 6 7 ' ######################## ## Heteroskedasdicity ## bptest(glm.fit_SM_BEST_Sep) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity. bptest(plm.fit_SM_BEST_Sep) ' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity ' ## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind. bptest(plm.fit_SM_BEST_Sep, studentize = TRUE) 'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors' ###################################### ## Tests for serial autocorrelation ## pwartest(plm.fit_SM_BEST_Sep) pbgtest(plm.fit_SM_BEST_Sep) ' both, H_1 of serial autocorrelation cannot be rejected ' ################################# ## Correct the Standard Errors ## ################################# ## Correct Standard Errors used in table ## coeftest(plm.fit_SM_BEST_Sep) ## Robust covariance matrix estimators a la White ## # coeftest(plm.fit_SM_BEST_Sep,vcov=vcovHC(plm.fit_SM_BEST_Sep,method = "arellano", type = "HC0")) cov0_SM_BEST_Sep <- vcovHC(plm.fit_SM_BEST_Sep,method = "arellano", type = "HC0", cluster="group") Wh.se_serial_SM_BEST_Sep <- sqrt(diag(cov0_SM_BEST_Sep)) cov0.1_SM_BEST_Sep <- vcovHC(plm.fit_SM_BEST_Sep,method = "arellano", type = "HC0", cluster="time") Wh.se_cross_SM_BEST_Sep <- sqrt(diag(cov0.1_SM_BEST_Sep)) # # ## Beck Katz: # # coeftest(plm.fit_SM_BEST_Sep, vcov = function(x) vcovBK(plm.fit_SM_BEST_Sep,method = "arellano", type = "HC0")) # cov1 <- vcovBK(plm.fit_SM_BEST_Sep,method = "arellano", type = "HC0", cluster="time") # BK.se <- sqrt(diag(cov1)) # ## Driscoll Kraay ## # summary(plm.fit_SM_BEST_Sep) coeftest(plm.fit_SM_BEST_Sep, vcov=function(x) vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0")) cov2_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0") DK.se_SM_BEST_Sep <- sqrt(diag(cov2_SM_BEST_Sep)) # # cov2.1_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0", maxlag=1) # DK2.1.se_SM_BEST_Sep <- sqrt(diag(cov2.1_SM_BEST_Sep)) # cov2.2_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0", maxlag=2) # DK2.2.se_SM_BEST_Sep <- sqrt(diag(cov2.2_SM_BEST_Sep)) # # cov2.3_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0", maxlag=3) # DK2.3.se_SM_BEST_Sep <- sqrt(diag(cov2.3_SM_BEST_Sep)) # # cov2.4_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0", maxlag=4) # DK2.4.se_SM_BEST_Sep <- sqrt(diag(cov2.4_SM_BEST_Sep)) # cov2.5_SM_BEST_Sep <- vcovSCC(plm.fit_SM_BEST_Sep,method = "arellano",type = "HC0", maxlag=5) DK2.5.se_SM_BEST_Sep <- sqrt(diag(cov2.5_SM_BEST_Sep)) ## Cameron et al /Thompson : doouble-clustering estimator ## # coeftest(plm.fit_SM_BEST_Sep, vcovDC(plm.fit_SM_BEST_Sep, method = "arellano", type = "HC0")) cov3_SM_BEST_Sep <- vcovDC(plm.fit_SM_BEST_Sep, method = "arellano", type = "HC0") CT.se_SM_BEST_Sep <- sqrt(diag(cov3_SM_BEST_Sep)) 'Our estimator is qualitatively similar to the ones presented in White and Domowitz (1984), for time series data, and Conley (1999), for spatial data. ' ## Generate Table with Output ## se <- list(NULL, Wh.se_cross_SM_BEST_Sep, Wh.se_serial_SM_BEST_Sep, DK.se_SM_BEST_Sep, DK2.5.se_SM_BEST_Sep, CT.se_SM_BEST_Sep) labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson") stargazer(plm.fit_SM_BEST_Sep, plm.fit_SM_BEST_Sep, plm.fit_SM_BEST_Sep, plm.fit_SM_BEST_Sep, plm.fit_SM_BEST_Sep,plm.fit_SM_BEST_Sep, se = se, dep.var.caption = "Model with smallest BIC - September", dep.var.labels = "log(Silomaize)", style="default", model.numbers = FALSE, column.labels = labels1, type="text", out="./figures/figures_exploratory/BIC/Silomaize/SM_Sep_best.txt" ) ######################################################### ## Results with smallest BIC of Standard Configuration ## ######################################################### plot(BIC_SMIPrecTavg) which.min(BIC_SMIPrecTavg) r = 6 bestStandard_formula <- formula_Sep_sm_detrendlog_SMIPrecTavg 'Hier ist zwar 9 am besten, aufgrund von Singularität nehme ich aber 6. Der Abstand zwischen 9 und 6 ist auch kleiner als sechs Einheiten, daher ist dies nach Wikipedia auch berechtigt.' ################### ## GLM Ergebniss ## glm.fit_SM_bestStandard_Sep <- glm(formula = bestStandard_formula, data = Yield_Covariates_SM_Sep) summary(glm.fit_SM_bestStandard_Sep) 'AIC: -6609.1' #################### ## PLM Ergebnisse ## plm.fit_SM_bestStandard_Sep <- plm(formula = update(bestStandard_formula, .~. - dummy(comId)), data = Yield_Covariates_SM_Sep, effect="individual", model=("within"), index = c("comId","year")) summary(plm.fit_SM_bestStandard_Sep) 'Adj. R-Squared: 0.082556' ## Generate Fixed Effects data.frame and export it ## fixef <- fixef(plm.fit_SM_bestStandard_Sep) fixef <- as.data.frame(as.matrix(fixef)) head(fixef) fixef <- cbind(rownames(fixef), fixef) rownames(fixef) <- NULL names(fixef) <- c("comId", "FE") write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Silomaize/plm.fit_SM_bestStandard_Sep_FE.csv") ################## ## LM Ergebniss ## lm.fit_SM_bestStandard_Sep <-lm(formula = bestStandard_formula, data = Yield_Covariates_SM_Sep) summary(lm.fit_SM_bestStandard_Sep) 'Adjusted R-squared: 0.6459 ' ######################## ## Heteroskedasdicity ## bptest(glm.fit_SM_bestStandard_Sep) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity. bptest(plm.fit_SM_bestStandard_Sep) ' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity ' ## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind. bptest(plm.fit_SM_bestStandard_Sep, studentize = TRUE) 'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors' ######################### #### Autocorrelation #### ###################################### ## Tests for serial autocorrelation ## pwartest(plm.fit_SM_bestStandard_Sep) ' Hier serielle Korrelation festzustellen' pbgtest(plm.fit_SM_bestStandard_Sep) 'Solution for serial correlation: Cluster by groups. Solution for cross sectional correlation: Cluster by time' ################################# ## Correct the Standard Errors ## ## Correct Standard Errors used in table ## coeftest(plm.fit_SM_bestStandard_Sep) ## Robust covariance matrix estimators a la White # coeftest(plm.fit_SM_bestStandard_Sep,vcov=vcovHC(plm.fit_SM_bestStandard_Sep,method = "arellano", type = "HC0")) cov0_SM_bestStandard_Sep <- vcovHC(plm.fit_SM_bestStandard_Sep,method = "arellano", type = "HC0", cluster="group") Wh.se_serial_SM_bestStandard_Sep <- sqrt(diag(cov0_SM_bestStandard_Sep)) cov0.1_SM_bestStandard_Sep <- vcovHC(plm.fit_SM_bestStandard_Sep,method = "arellano", type = "HC0", cluster="time") Wh.se_cross_SM_bestStandard_Sep <- sqrt(diag(cov0.1_SM_bestStandard_Sep)) # ## Beck Katz ## # # coeftest(plm.fit_SM_bestStandard_Sep, vcov = function(x) vcovBK(plm.fit_SM_bestStandard_Sep,method = "arellano", type = "HC0")) # cov1_SM_bestStandard_Sep <- vcovBK(plm.fit_SM_bestStandard_Sep,method = "arellano", type = "HC0", cluster="time") # BK.se_SM_bestStandard_Sep <- sqrt(diag(cov1_SM_bestStandard_Sep)) ## Driscoll Kraay: ## summary(plm.fit_SM_bestStandard_Sep) cov2_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0") DK.se_SM_bestStandard_Sep <- sqrt(diag(cov2_SM_bestStandard_Sep)) # cov2.1_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0", maxlag=1) # DK2.1.se_SM_bestStandard_Sep <- sqrt(diag(cov2.1_SM_bestStandard_Sep)) # cov2.2_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0", maxlag=2) # DK2.2.se_SM_bestStandard_Sep <- sqrt(diag(cov2.2_SM_bestStandard_Sep)) # # cov2.3_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0", maxlag=3) # DK2.3.se_SM_bestStandard_Sep <- sqrt(diag(cov2.3_SM_bestStandard_Sep)) # # cov2.4_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0", maxlag=4) # DK2.4.se_SM_bestStandard_Sep <- sqrt(diag(cov2.4_SM_bestStandard_Sep)) # cov2.5_SM_bestStandard_Sep <- vcovSCC(plm.fit_SM_bestStandard_Sep,method = "arellano",type = "HC0", maxlag=5) DK2.5.se_SM_bestStandard_Sep <- sqrt(diag(cov2.5_SM_bestStandard_Sep)) ## Cameron et al /Thompson : doouble-clustering estimator # coeftest(plm.fit_SM_bestStandard_Sep, vcovDC(plm.fit_SM_bestStandard_Sep, method = "arellano", type = "HC0")) cov3_SM_bestStandard_Sep <- vcovDC(plm.fit_SM_bestStandard_Sep, method = "arellano", type = "HC0") CT.se_SM_bestStandard_Sep <- sqrt(diag(cov3_SM_bestStandard_Sep)) 'Our estimator is qualitatively similar to the ones presented in White and Domowitz (1984), for time series data, and Conley (1999), for spatial data. ' ## Generate Table with Output ## se <- list(NULL, Wh.se_cross_SM_bestStandard_Sep, Wh.se_serial_SM_bestStandard_Sep, DK.se_SM_bestStandard_Sep, DK2.5.se_SM_bestStandard_Sep, CT.se_SM_bestStandard_Sep) labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson") stargazer(plm.fit_SM_bestStandard_Sep, plm.fit_SM_bestStandard_Sep, plm.fit_SM_bestStandard_Sep, plm.fit_SM_bestStandard_Sep, plm.fit_SM_bestStandard_Sep,plm.fit_SM_bestStandard_Sep, se = se, dep.var.caption = "Model with smallest BIC of Standard Configuration - September", dep.var.labels = "log(Silomaize)", style="default", model.numbers = FALSE, column.labels = labels1, type="text", out="./figures/figures_exploratory/BIC/Silomaize/SM_Sep_bestStandard.txt" ) ######################################## ## Results with smallest BIC with SMI ## ######################################## plot(BIC_SMITavg) which.min(BIC_SMITavg) r = 3 bestSMI_formula <- formula_Sep_sm_detrendlog_SMITavg ################### ## GLM Ergebniss ## glm.fit_SM_bestSMI_Sep <- glm(formula = bestSMI_formula, data = Yield_Covariates_SM_Sep) summary(glm.fit_SM_bestSMI_Sep) 'AIC: -6600.2' #################### ## PLM Ergebnisse ## plm.fit_SM_bestSMI_Sep <- plm(formula = update(bestSMI_formula, .~. - dummy(comId)), data = Yield_Covariates_SM_Sep, effect="individual", model=("within"), index = c("comId","year")) summary(plm.fit_SM_bestSMI_Sep) 'Adj. R-Squared: 0.079843' fixef <- fixef(plm.fit_SM_bestSMI_Sep) fixef <- as.data.frame(as.matrix(fixef)) head(fixef) fixef <- cbind(rownames(fixef), fixef) rownames(fixef) <- NULL names(fixef) <- c("comId", "FE") write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Silomaize/plm.fit_SM_bestSMI_Sep_FE.csv") ################## ## LM Ergebniss ## lm.fit_SM_bestSMI_Sep <-lm(formula = bestSMI_formula, data = Yield_Covariates_SM_Sep) summary(lm.fit_SM_bestSMI_Sep) 'Adjusted R-squared: 0.6449' ######################## ## Heteroskedasdicity ## bptest(glm.fit_SM_bestSMI_Sep) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity. bptest(plm.fit_SM_bestSMI_Sep) ' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity ' ## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind. bptest(plm.fit_SM_bestSMI_Sep, studentize = TRUE) 'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors' ######################### #### Autocorrelation #### ###################################### ## Tests for serial autocorrelation ## pwartest(plm.fit_SM_bestSMI_Sep) pbgtest(plm.fit_SM_bestSMI_Sep) 'Hier serielle Korrelation festzustellen' ########################################### ## Correct Standard Errors used in table ## coeftest(plm.fit_SM_bestSMI_Sep) ## Robust covariance matrix estimators a la White ## # coeftest(plm.fit_SM_bestSMI_Sep,vcov=vcovHC(plm.fit_SM_bestSMI_Sep,method = "arellano", type = "HC0")) cov0_SM_bestSMI_Sep <- vcovHC(plm.fit_SM_bestSMI_Sep,method = "arellano", type = "HC0", cluster="group") Wh.se_serial_SM_bestSMI_Sep <- sqrt(diag(cov0_SM_bestSMI_Sep)) cov0.1_SM_bestSMI_Sep <- vcovHC(plm.fit_SM_bestSMI_Sep,method = "arellano", type = "HC0", cluster="time") Wh.se_cross_SM_bestSMI_Sep <- sqrt(diag(cov0.1_SM_bestSMI_Sep)) # # ## Beck Katz: # # coeftest(plm.fit_SM_bestSMI_Sep, vcov = function(x) vcovBK(plm.fit_SM_bestSMI_Sep,method = "arellano", type = "HC0")) # cov1 <- vcovBK(plm.fit_SM_bestSMI_Sep,method = "arellano", type = "HC0", cluster="time") # BK.se <- sqrt(diag(cov1)) ## Driscoll Kraay ## # summary(plm.fit_SM_bestSMI_Sep) cov2_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0") DK.se_SM_bestSMI_Sep <- sqrt(diag(cov2_SM_bestSMI_Sep)) # cov2.1_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0", maxlag=1) # DK2.1.se_SM_bestSMI_Sep <- sqrt(diag(cov2.1_SM_bestSMI_Sep)) # cov2.2_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0", maxlag=2) # DK2.2.se_SM_bestSMI_Sep <- sqrt(diag(cov2.2_SM_bestSMI_Sep)) # # cov2.3_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0", maxlag=3) # DK2.3.se_SM_bestSMI_Sep <- sqrt(diag(cov2.3_SM_bestSMI_Sep)) # # cov2.4_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0", maxlag=4) # DK2.4.se_SM_bestSMI_Sep <- sqrt(diag(cov2.4_SM_bestSMI_Sep)) # cov2.5_SM_bestSMI_Sep <- vcovSCC(plm.fit_SM_bestSMI_Sep,method = "arellano",type = "HC0", maxlag=5) DK2.5.se_SM_bestSMI_Sep <- sqrt(diag(cov2.5_SM_bestSMI_Sep)) ## Cameron et al /Thompson : doouble-clustering estimator ## cov3_SM_bestSMI_Sep <- vcovDC(plm.fit_SM_bestSMI_Sep, method = "arellano", type = "HC0") CT.se_SM_bestSMI_Sep <- sqrt(diag(cov3_SM_bestSMI_Sep)) ################################ ## Generate Table with Output ## se <- list(NULL, Wh.se_cross_SM_bestSMI_Sep, Wh.se_serial_SM_bestSMI_Sep, DK.se_SM_bestSMI_Sep, DK2.5.se_SM_bestSMI_Sep, CT.se_SM_bestSMI_Sep) labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson") stargazer(plm.fit_SM_bestSMI_Sep, plm.fit_SM_bestSMI_Sep, plm.fit_SM_bestSMI_Sep, plm.fit_SM_bestSMI_Sep, plm.fit_SM_bestSMI_Sep,plm.fit_SM_bestSMI_Sep, se = se, dep.var.caption = "Model with smallest BIC with SMI - September", dep.var.labels = "log(Silomaize)", style="default", model.numbers = FALSE, column.labels = labels1, type="text", out="./figures/figures_exploratory/BIC/Silomaize/SM_Sep_bestSM.txt" )
4f2d2084d614b13c006e06a5597377f2630a0a00
f8072ec717f72b2afa71c730ee2a8a7f6532fe22
/KPMG data insights.R
32e2c965941addd7a26e7069a2fb661fbc078689
[]
no_license
eamonnadams/KPMG_Data_Consulting_Data_Analysis
bed4e991fcf04abd6c5734e5792f7adeb8ba62b0
f5b397237b05e69c1913742c54e02597fea5df61
refs/heads/master
2022-12-28T10:37:30.745625
2020-10-13T09:24:47
2020-10-13T09:24:47
281,916,403
0
0
null
null
null
null
UTF-8
R
false
false
10,914
r
KPMG data insights.R
#Install and load all necessary packages and libraries install.packages("DataExplorer") install.packages("magrittr") install.packages("lubridate") install.packages("e1071") install.packages("rfm") install.packages("caret") install.packages("pROC") library(readxl) library(dplyr) library(ggplot2) library(DataExplorer) library(magrittr) library(lubridate) library(e1071) library(rfm) library(caret) library(pROC) getwd() #Import data from excel transactions <- read_excel("KPMG_VI_New_raw_data_update_final_Insights.xlsx",2) CustomerDemographics <- read_excel("KPMG_VI_New_raw_data_update_final_Insights.xlsx",4) CustomerAddress <- read_excel("KPMG_VI_New_raw_data_update_final_Insights.xlsx", 5) New_customers <- read_excel("KPMG_VI_New_raw_data_update_final_Insights.xlsx", 3) df1 <- merge(x=transactions,CustomerDemographics, by = "customer_id") df <- merge(x=df1,CustomerAddress,by="customer_id") distinct_customers <- distinct(df,customer_id, .keep_all = TRUE ) #Review data structure str(df) summary_df <- summary(df) introduce(df) #introduction of data plot_intro(df) #Metrics #Replacing missing data plot_missing(df) final_df <- set_missing(df, list(0L, "unknown")) final_df <- final_df %>% filter(gender %in% c("Male","Female") & brand != "unknown" & job_industry_category != "unknown" & job_title != "unknown") plot_missing(final_df) #EDA analysis #Bar plots to visualise frquency distributions for all discrete features plot_bar(final_df) #Histograms to visualize distributions for all continuous features plot_histogram(final_df) ##Categorical data vs continuous data #total number of purchase in 3 years per gender final_df %>% select(gender,past_3_years_bike_related_purchases) %>% group_by(gender) %>% summarize(total_past_purchases = sum(past_3_years_bike_related_purchases)) %>% ggplot(aes(gender,total_past_purchases,fill=gender)) + geom_bar(stat = "identity") + geom_text(aes(label =total_past_purchases)) + ggtitle("Total Past 3 year purchase by Gender") #percentage number of purchase in 3 years per gender final_df %>% select(gender,past_3_years_bike_related_purchases) %>% group_by(gender) %>% summarize(total_past_purchases = sum(past_3_years_bike_related_purchases)) %>% mutate(percent = signif(total_past_purchases/sum(total_past_purchases)*100,4))%>% ggplot(aes(gender,percent,fill=gender)) + geom_bar(stat = "identity") + geom_text(aes(label =percent))+ ggtitle("Percentage Past 3 year purchase by Gender") #percentage sales per gender final_df %>% select(gender,list_price) %>% group_by(gender) %>% summarize(total_sales = sum(list_price)) %>% mutate(percent_sales = signif(total_sales/sum(total_sales)*100,4))%>% ggplot(aes(gender,percent_sales,fill=gender)) + geom_bar(stat = "identity") + geom_text(aes(label =percent_sales))+ ggtitle("Percentage sales by Gender") #Tenure by age final_df %>% select(Age,tenure) %>% ggplot(aes(Age,tenure)) + geom_point() + geom_smooth() + ggtitle("Tenure by Age") #Cars owned per State final_df %>% select(owns_car,state) %>% ggplot(aes(state,stat_count = owns_car,fill = owns_car)) + geom_bar(position = "dodge") + ggtitle("Cars owned by state") #Brands amounts sold per gender final_df %>% select(gender,brand) %>% ggplot(aes(brand,stat_count = gender,fill = gender)) + geom_bar(position = "dodge") + ggtitle("Brand amounts by gender") #wealth segment per state final_df %>% select(wealth_segment,state) %>% ggplot(aes(state,stat_count = wealth_segment,fill = wealth_segment)) + geom_bar(position = "dodge") + ggtitle("Wealth segment per state") ##feature engineering #checking the skewness of Age distribution skewness(final_df$Age) hist(final_df$Age) #distribution of Age #checking the skewness of the list_price skewness(final_df$list_price) hist(final_df$list_price) #distribution of list_price #checking the skewness of standard_cost skewness(final_df$standard_cost) hist(final_df$standard_cost) #checking the skewness of past_3_years_bike_related_purchases skewness(final_df$past_3_years_bike_related_purchases) hist(final_df$past_3_years_bike_related_purchases) ##RFM analysis analysis_date <- lubridate::as_date('2018-01-01') rfm_recencydate <- final_df %>% mutate(analysis_date) %>% mutate(recency_days = (analysis_date)-as.Date(transaction_date)) %>% select(customer_id,recency_days)%>% group_by(customer_id)%>% summarize(recency_days=min(as.numeric(recency_days))) #Recent date calculation rfm_orders <- final_df %>% group_by(customer_id) %>% summarise(number_of_orders = as.numeric(n())) #number of orders calculation rfm_recentvisit <- final_df %>% select(customer_id,transaction_date) %>% group_by(customer_id) %>% summarize(most_recent_visit = max((transaction_date))) %>% mutate(most_recent_visit = as.Date(most_recent_visit)) #recent visit calculation class(rfm_recentvisit$most_recent_visit) rfm_revenue <- final_df %>% group_by(customer_id) %>% summarize(revenue=sum(list_price)) #revenue calculation #rfm customer data table using merging of tables rfm_data_consumer1 <- merge(x=rfm_revenue,rfm_recentvisit,by = "customer_id") rfm_data_consumer2 <- merge(x=rfm_data_consumer1,rfm_orders,by = "customer_id") rfm_data_consumer_final <- merge(x=rfm_data_consumer2,rfm_recencydate,by = "customer_id") #rfm data for distinct customer ids class(rfm_data_consumer_final$customer_id) class(rfm_data_consumer_final$revenue) class(rfm_data_consumer_final$most_recent_visit) class(rfm_data_consumer_final$number_of_orders) class(rfm_data_consumer_final$recency_days) analysis_date <- lubridate::as_date("2018-01-01") #Define analysis date options(max.print = 1000000) rfm_table <- rfm_table_customer(rfm_data_consumer_final, customer_id, number_of_orders, recency_days,revenue, analysis_date) #rfm table formation #RFM visualization rfm_heatmap(rfm_table) rfm_bar_chart(rfm_table) #distributions of RFM score combinations rfm_histograms(rfm_table) #rfm distribution rfm_order_dist(rfm_table) #distribution of customers across orders rfm_rm_plot(rfm_table) #Recency vs Monetary comparison rfm_fm_plot(rfm_table) #Frequency vs Monetary comparison rfm_rf_plot(rfm_table) #Recency vs Frequency comparison #segmentation categories segment_names <- c("Champions","Loyal Customers", "Potential Loyalists", "New Customers", "Promising","Need Attention", "About to Sleep", "At Risk", "Can't Lose Them", "Hibernating", "Lost") recency_lower <- c(4,2,3,4,3,3,2,1,1,2,1) recency_upper <- c(5,4,5,5,4,4,3,2,1,2,2) frequency_lower <- c(4,3,1,1,1,2,1,2,4,1,1) frequency_upper <- c(5,5,3,1,1,3,2,5,5,2,2) monetary_lower <- c(4,3,1,1,1,2,1,2,4,1,1) monetary_upper <- c(5,5,3,1,1,3,2,5,5,2,2) #segments table with the RFM scores and segments segments <- rfm_segment(rfm_table,segment_names,recency_lower, recency_upper,frequency_lower, frequency_upper,monetary_lower, monetary_upper) head(segments) #distribution of customers across the segments segments %>% count(segment) %>% arrange(desc(n)) %>% rename(Segment = segment, Count = n) rfm_plot_median_recency(segments) #median recency rfm_plot_median_frequency(segments) #median frequency rfm_plot_median_monetary(segments) #median monetary #Hypothesis test using a t-test #Ho: mu > 3 #one-sided 95% confidence interval for mu X_r <- sample(rfm_table$recency_bins, 1000, replace = TRUE)#sample size 1000 of recency score mean(X_r) sd(X_r) #t test of X_r against a null hypothesis that population mean mu_r is 3 t.test(X_r, mu = 3, alternative = "two.sided") X_f <- sample(rfm_table$frequency_bins, 1000, replace = TRUE)#sample size 1000 of recency score mean(X_f) sd(X_f) #t test of X_r against a null hypothesis that population mean mu_r is 3 t.test(X_f, mu = 3, alternative = "two.sided") X_f <- sample(rfm_table$monetary_bins, 1000, replace = TRUE)#sample size 1000 of recency score mean(X_f) sd(X_f) #t test of X_r against a null hypothesis that population mean mu_r is 3 t.test(X_f, mu = 3, alternative = "two.sided") #Converting segments to binomial variables 1 and 0, 1 for target and 0 for not target segment_new <- segments %>% mutate(recency_s = ifelse(recency_score > 3, "HIGH", "LOW"), frequency_s = ifelse(frequency_score > 3, "FREQUENT", "INFREQUENT"), monetary_s = ifelse(monetary_score > 3,"HIGH", "MEDIUM"), segment_s = ifelse(segment %in% c("Champions","Loyal Customers","Potential Loyalists", "New Customers", "Promising", "Need Attention", "Can't Lose Them"),1,0)) #Split data into training and test set set.seed(123) final_table <- merge(x=segment_new, final_df,by = "customer_id") final_table <- final_table %>% filter(gender %in% c("Male","Female") & brand %in% c("Norco Bicycles", "Trek Bicycles", "OHM Cycles", "WeareA2B","Giant Bicycles", "Solex")) data2 = sort(sample(nrow(final_table), nrow(final_table)*.7)) #creating training data set by selecting the output row values train <- final_table[data2,] #creating test data set by not selecting the output row values test <- final_table[-data2,] test_f <- test %>% filter(gender %in% c("Male","Female")) train_f <- train %>% filter(gender %in% c("Male","Female")) dim(train) dim(test) ##multiple logistic regression model logistics_model <- glm(segment_s ~ recency_s + frequency_s+monetary_s + gender + cubic_Age + wealth_segment + past_3_years_bike_related_purchases, data=train_f, family = "binomial") # to predict using the logistics regression model, probabilities obtained test_f[1:10,] logistics_model_prob <- predict(logistics_model, test_f, type = "response") head(logistics_model_prob,20) #convert probabilities to binomial answers prediction <- ifelse(logistics_model_prob > 0.5, 1,0) head(prediction,10) head(test_f$segment_s,10) #test of model summary(logistics_model) ROC_2 <- roc(test_f$segment_s, logistics_model_prob) plot(ROC_2, col = "blue") auc(ROC_2) mean(prediction == test_f$segment_s) #export final_table to excel write.table(final_table, file="FinalCustomerTable.csv",row.names = FALSE,sep=",") #Most valuable new customers Most_valuable_new_customers <- New_customers %>% filter(wealth_segment %in% c("Mass Customer","High Net Worth") & job_industry_category %in% c("Financial Services","Manufacturing", "Health","Retail", "Property")) write.table(Most_valuable_new_customers, file="TargetCustomerTable.csv",row.names = FALSE,sep=",")
8051c6e258f01f5a73131f4080afffeb53251b39
47a8dff9177da5f79cc602c6d7842c0ec0854484
/man/CaseMatch.Rd
ecec3a405d0b7a9c5a68b7b7fefcb67ed4d8c6fb
[ "MIT" ]
permissive
satijalab/seurat
8949973cc7026d3115ebece016fca16b4f67b06c
763259d05991d40721dee99c9919ec6d4491d15e
refs/heads/master
2023-09-01T07:58:33.052836
2022-12-05T22:49:37
2022-12-05T22:49:37
35,927,665
2,057
1,049
NOASSERTION
2023-09-01T19:26:02
2015-05-20T05:23:02
R
UTF-8
R
false
true
602
rd
CaseMatch.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{CaseMatch} \alias{CaseMatch} \title{Match the case of character vectors} \usage{ CaseMatch(search, match) } \arguments{ \item{search}{A vector of search terms} \item{match}{A vector of characters whose case should be matched} } \value{ Values from search present in match with the case of match } \description{ Match the case of character vectors } \examples{ data("pbmc_small") cd_genes <- c('Cd79b', 'Cd19', 'Cd200') CaseMatch(search = cd_genes, match = rownames(x = pbmc_small)) } \concept{utilities}
79d7a4972615183e22a104e0e3a345238720d3db
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/NISTunits/examples/NISTnanometerTOangstrom.Rd.R
097005a1cd108a2daaced3e0fd47b73ebee087bc
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
208
r
NISTnanometerTOangstrom.Rd.R
library(NISTunits) ### Name: NISTnanometerTOangstrom ### Title: Convert nanometer to angstrom ### Aliases: NISTnanometerTOangstrom ### Keywords: programming ### ** Examples NISTnanometerTOangstrom(10)
85e75668aa93bc1a32262e492dd5412b04089e09
cf606e7a3f06c0666e0ca38e32247fef9f090778
/test/integration/example-models/ARM/Ch.6/6.7_MoreComplexGLM.R
dcfc0d6a3f69317b8ad1e77607a827608ac683b2
[ "BSD-3-Clause", "LicenseRef-scancode-free-unknown" ]
permissive
nhuurre/stanc3
32599a71d5f82c759fd6768b8b699fb5f2b2d072
5612b357c1cd5a08cf2a57db97ce0e789bb87018
refs/heads/master
2023-07-05T02:27:08.083259
2020-11-12T15:37:42
2020-11-12T15:37:42
222,684,189
0
0
BSD-3-Clause
2019-11-19T11:50:39
2019-11-19T11:50:38
null
UTF-8
R
false
false
730
r
6.7_MoreComplexGLM.R
library(rstan) library(ggplot2) source("earnings1.data.R", echo = TRUE) ## Mixed discrete/continuous data # Logistic regression with interactions (earnings1.stan) # glm (earn.pos ~ height + male, family=binomial(link="logit")) dataList.1 <- c("N","earn_pos","height","male") earnings1.sf1 <- stan(file='earnings1.stan', data=dataList.1, iter=1000, chains=4) print(earnings1.sf1) source("earnings2.data.R", echo = TRUE) # Logistic regression with interactions (earnings2.stan) # lm (log.earn ~ height + male, subset=earn>0) dataList.2 <- c("N","earnings","height","sex") earnings2.sf1 <- stan(file='earnings2.stan', data=dataList.2, iter=1000, chains=4) print(earnings2.sf1)
5b98dfe56b10485a85c162852a29bde38fdf9672
f67f13d5025accaa03855b00bada13e558636e71
/Code/R/plotting.R
f3de1efa1be851e2f52d7dab54d276576f6e92cc
[]
no_license
ajanigyasi/master
97f818bc5140ad109f739310cae8746df08ef11c
ec255bc4c3436742f89a71557d00e685355aafe7
refs/heads/master
2020-04-11T08:43:22.047156
2015-05-24T11:54:20
2015-05-24T11:54:20
61,918,256
1
0
null
2016-06-24T23:43:37
2016-06-24T23:43:37
null
UTF-8
R
false
false
322
r
plotting.R
source('dataSetGetter.R') firstDay = getDataSet('20150129', '20150129', '../../Data/Autopassdata/Singledatefiles/Dataset/raw/', 'dataset') firstDay$dateAndTime = strptime(firstDay$dateAndTime, format='%Y-%m-%d %H:%M:%S') plot(firstDay$dateAndTime, firstDay$trafficVolume, type='l', ylab='', xlab='Time of day', main=NULL)
ba4a648934d435717a114ca6d9829a2973f6d3b1
210683b5347b6f584b258f26c7d48ab51a518fe3
/man/Reduce0exact.Rd
cfca82f4e3ac2e25fb3c0d509fde12b1d0576b63
[ "MIT" ]
permissive
statisticsnorway/SSBtools
6b95eab7f46c1096cd7d6ee3f61d3898150d49d0
aa2728571e0840e1965f3e7ed0f1984c818ca7a1
refs/heads/master
2023-06-24T02:48:17.178606
2023-06-23T08:05:58
2023-06-23T08:05:58
137,074,899
5
0
Apache-2.0
2023-06-23T08:06:00
2018-06-12T13:21:36
R
UTF-8
R
false
true
4,389
rd
Reduce0exact.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Reduce0exact.R \name{Reduce0exact} \alias{Reduce0exact} \title{Reducing a non-negative regression problem} \usage{ Reduce0exact( x, z = NULL, reduceByColSums = FALSE, reduceByLeverage = FALSE, leverageLimit = 0.999999, digitsRoundWhole = 9, y = NULL, yStart = NULL, printInc = FALSE ) } \arguments{ \item{x}{A matrix} \item{z}{A single column matrix} \item{reduceByColSums}{See Details} \item{reduceByLeverage}{See Details} \item{leverageLimit}{Limit to determine perfect fit} \item{digitsRoundWhole}{\code{\link{RoundWhole}} parameter for fitted values (when \code{leverageLimit} and \code{y} not in input)} \item{y}{A single column matrix. With \code{y} in input, \code{z} in input can be omitted and estimating \code{y} (when \code{leverageLimit}) is avoided.} \item{yStart}{A starting estimate when this function is combined with iterative proportional fitting. Zeros in yStart will be used to reduce the problem.} \item{printInc}{Printing iteration information to console when TRUE} } \value{ A list of five elements: \itemize{ \item \code{x}: A reduced version of input \code{x} \item \code{z}: Corresponding reduced \code{z} \item \code{yKnown}: Logical, specifying known values of \code{y} \item \code{y}: A version of \code{y} with known values correct and others zero \item \code{zSkipped}: Logical, specifying omitted columns of \code{x} } } \description{ The linear equation problem, \code{z = t(x) \%*\% y} with y non-negative and x as a design (dummy) matrix, is reduced to a smaller problem by identifying elements of \code{y} that can be found exactly from \code{x} and \code{z}. } \details{ Exact elements can be identified in three ways in an iterative manner: \enumerate{ \item By zeros in \code{z}. This is always done. \item By columns in x with a singe nonzero value. Done when \code{reduceByColSums} or \code{reduceByLeverage} is \code{TRUE}. \item By exact linear regression fit (when leverage is one). Done when \code{reduceByLeverage} is \code{TRUE}. The leverages are computed by \code{hat(as.matrix(x), intercept = FALSE)}, which can be very time and memory consuming. Furthermore, without \code{y} in input, known values will be computed by \code{\link{ginv}}. } } \examples{ # Make a special data set d <- SSBtoolsData("sprt_emp") d$ths_per <- round(d$ths_per) d <- rbind(d, d) d$year <- as.character(rep(2014:2019, each = 6)) to0 <- rep(TRUE, 36) to0[c(6, 14, 17, 18, 25, 27, 30, 34, 36)] <- FALSE d$ths_per[to0] <- 0 # Values as a single column matrix y <- Matrix(d$ths_per, ncol = 1) # A model matrix using a special year hierarchy x <- Hierarchies2ModelMatrix(d, hierarchies = list(geo = "", age = "", year = c("y1418 = 2014+2015+2016+2017+2018", "y1519 = 2015+2016+2017+2018+2019", "y151719 = 2015+2017+2019", "yTotal = 2014+2015+2016+2017+2018+2019")), inputInOutput = FALSE) # Aggregates z <- t(x) \%*\% y sum(z == 0) # 5 zeros # From zeros in z a <- Reduce0exact(x, z) sum(a$yKnown) # 17 zeros in y is known dim(a$x) # Reduced x, without known y and z with zeros dim(a$z) # Corresponding reduced z sum(a$zSkipped) # 5 elements skipped t(a$y) # Just zeros (known are 0 and unknown set to 0) # It seems that three additional y-values can be found directly from z sum(colSums(a$x) == 1) # But it is the same element of y (row 18) a$x[18, colSums(a$x) == 1] # Make use of ones in colSums a2 <- Reduce0exact(x, z, reduceByColSums = TRUE) sum(a2$yKnown) # 18 values in y is known dim(a2$x) # Reduced x dim(a2$z) # Corresponding reduced z a2$y[which(a2$yKnown)] # The known values of y (unknown set to 0) # Six ones in leverage values # Thus six extra elements in y can be found by linear estimation hat(as.matrix(a2$x), intercept = FALSE) # Make use of ones in leverages (hat-values) a3 <- Reduce0exact(x, z, reduceByLeverage = TRUE) sum(a3$yKnown) # 26 values in y is known (more than 6 extra) dim(a3$x) # Reduced x dim(a3$z) # Corresponding reduced z a3$y[which(a3$yKnown)] # The known values of y (unknown set to 0) # More than 6 extra is caused by iteration # Extra checking of zeros in z after reduction by leverages # Similar checking performed also after reduction by colSums } \author{ Øyvind Langsrud }
fd6337f4cf3f30a59ceccab54d7370f2dc745403
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
/gbd_2017/mortality_code/mortality_estimation/shared_functions/ltcore/R/lx_to_qx_wide.R
38ac0da7dffe16381829a9bff3f4247314f74c63
[]
no_license
Nermin-Ghith/ihme-modeling
9c8ec56b249cb0c417361102724fef1e6e0bcebd
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
refs/heads/main
2023-04-13T00:26:55.363986
2020-10-28T19:51:51
2020-10-28T19:51:51
null
0
0
null
null
null
null
UTF-8
R
false
false
2,044
r
lx_to_qx_wide.R
#' Convert a dataset of person-years lived (lx) to probabilities of death (qx) #' #' Given a dataset with columns named lx# where # is a numeric age (e.g. lx0, lx1, etc.), convert to qx. #' Modifies the existing dataset, adding variables qx#. #' qx_current_age = 1 - (lx_next_age / lx_current_age), e.g. qx5 = lx10 / lx5 #' #' @param dt data.table including: variables lx## covering all ages specified in lx_ages. Assumes 5-year jumps in age groups except ages 0 and 1. #' @param keep_lx logical for whether to preserve lx variables. Default: F. #' @param lx_ages numeric vector of ages to generate qx for. Default: 0, 1, 5, 10, 15, 20...110 #' @param assert_na logical, whether to check for NA values in the qx variables. Default: T. #' #' @return None. Performs conversions in-place -- will modify original dataset fed into it #' @export #' #' @examples #' data <- data.table(test=1, test2=2, lx0=1, lx1=.1, lx5=.05, lx10=.025, lx15=.02) #' lx_to_qx_wide(data, keep_lx=T, lx_ages = c(0,1,5,10,15)) #' #' @import data.table #' @import assertable lx_to_qx_wide <- function(dt, keep_lx = F, lx_ages = c(0, 1, seq(5, 110, 5)), assert_na = T) { for(age in lx_ages) { if(!paste0("lx",age) %in% colnames(dt)) stop(paste0("Need column lx", age, " in dataset -- set lx_ages if you are non-standard")) # If it's the final age and lx is non-NA for that group, then assume qx=1; otherwise, take lx of current age group - lx of next age group if(age == max(lx_ages)) { dt[!is.na(get(paste0("lx",age))), (paste0("qx",age)) := 1] if(!paste0("qx", age) %in% colnames(dt)) dt[, (paste0("qx",age)) := NA] } else { # print(age) next_age <- lx_ages[match(age, lx_ages) + 1] dt[, (paste0("qx",age)) := 1 - (get(paste0("lx", next_age)) / get(paste0("lx", age)))] } } lx_vars <- paste0("lx", lx_ages) if(keep_lx == F) dt[, (lx_vars) := NULL] # qx = 1 at terminal age group, as long as lx for the age group is not NA if(assert_na == T) assertable::assert_values(dt, paste0("qx", lx_ages), "not_na", quiet=T) return(dt) }
8acb217ffa7658c6a20a28ce46e5736482f14099
c2a575de83e16caad042dfd38c705014cafe2abb
/R/app_ui.R
851d2b56709d736505d42026fe5feb713a912d27
[ "MIT" ]
permissive
laurabiggins/ShinyProteomics
b5275d51cfbe11ea2ca7b65b1f7b986d19a78b9c
c96978758be4011c78facaad0f15d8042e2ff080
refs/heads/master
2023-01-05T18:06:36.427931
2020-11-06T11:32:03
2020-11-06T11:32:03
206,538,346
0
0
null
null
null
null
UTF-8
R
false
false
3,485
r
app_ui.R
#' @import shiny app_ui <- function() { tagList( golem_add_external_resources(), fluidPage( br(), withTags( div(class="title_block", h1("Cell surface proteome of human pluripotent states"), br(), h4("Plasma membrane profiling identifies differences in cell surface protein expression between naïve and primed human pluripotent stem cells"), h4("Wojdyla et al."), br() ) ), actionButton("browser", "browser"), # for debugging br(), withTags( div(class="table_area", h4("Enter gene name, protein name, gene ontology term or other keyword"), p("Search across all fields in the box below or use search boxes in individual columns within the table"), br(), div(id="search_and_table"), DT::dataTableOutput("mytable"), br(), fluidRow( column(3, downloadButton(outputId = "download_table", label = "Download Table") ), column(2, actionButton(inputId = "clear_filters", label = "Clear Filters") ), column(2, actionButton(inputId = "clear_plots", label = "Clear selected rows") ) ) ) ), br(), withTags( div(class="plots", h3("Select up to 6 rows in the table to display plots") ) ), br(), fluidRow( column(2, plotOutput(outputId = "protein_abundance_plot1", height = "240px") ), column(2, plotOutput(outputId = "protein_abundance_plot2", height = "240px") ), column(2, plotOutput(outputId = "protein_abundance_plot3", height = "240px") ), column(2, plotOutput(outputId = "protein_abundance_plot4", height = "240px") ), column(2, plotOutput(outputId = "protein_abundance_plot5", height = "240px") ), column(2, plotOutput(outputId = "protein_abundance_plot6", height = "240px") ) ), br(), fluidRow( column(2, uiOutput("download_button_plot1") ), column(2, uiOutput("download_button_plot2") ), column(2, uiOutput("download_button_plot3") ), column(2, uiOutput("download_button_plot4") ), column(2, uiOutput("download_button_plot5") ), column(2, uiOutput("download_button_plot6") ) ), br(), br(), p("Paper citation details"), br()#, # sliderTextUI("one"), # sliderTextUI("two"), # fluidRow( # column(2, # custom_barplotUI("protein_abundance_plot1.1") # ), # column(2, # custom_barplotUI("protein_abundance_plot2.1") # ) # ) ) ) } #' @import shiny golem_add_external_resources <- function(){ addResourcePath( 'www', system.file('app/www', package = 'ShinyProteomics') ) tags$head( golem::activate_js(), golem::favicon(ico = "www/favicon.png"), tags$script(src = "www/script.js"), tags$link(rel="stylesheet", type="text/css", href="www/custom.css") # Or for example, you can add shinyalert::useShinyalert() here ) }
06ef27e8636b6d273ca8044dc4fb4fd1f5037e59
8b5df82132ab2643d855efe2767e829b490ad7f8
/caret-methods.r
0f1762c7d39042c0049667d9722579ec6dee1d94
[]
no_license
anton-rusanov/kaggle-titanic
6e400cb4939b34dfae3772eda5030549862af705
f1cdf28f9733944c7c83482c18a39b8363be2997
refs/heads/master
2020-05-07T17:09:39.775468
2015-08-20T08:09:21
2015-08-20T08:09:21
37,762,606
0
0
null
null
null
null
UTF-8
R
false
false
518
r
caret-methods.r
library(gbm) source('commons.r') ## Trains the Support Vector Machine model and predicts 'Survived' for the test set. predict_with_caret_svm <- function(training, test, formula, label) { train_and_predict_with_caret('svmRadial', training, test, formula, label, prob.model = TRUE) } ## Trains a Stochastic Gradient Boosting model and predicts 'Survived' for the test set. predict_with_caret_gbm <- function(training, test, formula, label) { train_and_predict_with_caret('gbm', training, test, formula, label) }
c28b1afd697459cc4c8921c1dce108c91e3a23c0
57474b3df08d704fa651998d3a381609d884f3b8
/pollutantmean.R
1771ad8421222f6c53985ca6ab9a95725de77add
[]
no_license
AudiencePropensities/ProgrammingAssignment2
d0c4fc5960df193c3b8bf879409a218e4480f883
2d73655c4f242e2fd2072565a479f880b12a2000
refs/heads/master
2021-01-17T19:12:51.239513
2014-07-27T22:04:23
2014-07-27T22:04:23
null
0
0
null
null
null
null
UTF-8
R
false
false
798
r
pollutantmean.R
pollutantmean <- function(directory, pollutant, id= 1:332) { name<-list.files(directory)[id] data.name<-lapply(paste0("specdata/",name),read.csv,header=T) total<-do.call(rbind, data.name) a<-mean(na.omit(total[[pollutant]])) print(round(a,digits=3)) } pollutantmean("specdata","nitrate", 23) ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values)
8b90ecea3eed6218aef599c6c3aa4cfb777e916f
09c95562e72ddbc816cfcede64a5892bc339f954
/man/calc_combo.Rd
7686109b97d669d7a6641a09713939ea22cbfa20
[]
no_license
jhchou/medianeffect
00445fb273ef84de5546785169672421ef1b54c1
08cb14dda118f79cc5aeaa0094c750cfffef8049
refs/heads/master
2021-02-05T10:39:27.438120
2020-03-03T18:32:13
2020-03-03T18:32:13
243,771,351
0
0
null
null
null
null
UTF-8
R
false
true
859
rd
calc_combo.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/medianeffect.R \name{calc_combo} \alias{calc_combo} \title{Drug combination calculations} \usage{ calc_combo(drug_combo, ..., fa = double()) } \arguments{ \item{drug_combo}{Drug effect fixed-ratio combination object} \item{...}{Drug effect objects} \item{fa}{Vector of fraction affected (fa) at which calculations will be made (optional)} } \description{ Given drug combination object (either fixed or non-fixed ratio) and arbitrary number of single-drug effect objects, calculate all parameters needed for later calculation of combination or dose reduction index, at either a vector of specified fa values or using actual doses / fa from drug_combo. Returns unique id, total dose in combination, fa, single drug doses needed for fa, and dose in the combination for each drug. }
b587615ee5a7865cb0e5a588483b58ab795ae564
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
/chilling/04_make_figures/color_code_all_locations.R
e19bae714d64721058d3bc8fbff27bfc1d0c14a8
[ "MIT" ]
permissive
HNoorazar/Ag
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
24fea71e9740de7eb01782fa102ad79491257b58
refs/heads/main
2023-09-03T18:14:12.241300
2023-08-23T00:03:40
2023-08-23T00:03:40
146,382,473
3
6
null
2019-09-23T16:45:37
2018-08-28T02:44:37
R
UTF-8
R
false
false
16,296
r
color_code_all_locations.R
rm(list=ls()) .libPaths("/data/hydro/R_libs35") .libPaths() library(data.table) library(dplyr) library(tidyr) library(tidyverse) options(digit=9) options(digits=9) source_path_1 = "/Users/hn/Documents/GitHub/Ag/chilling/4th_draft/chill_core.R" source(source_path_1) ########################################################################################## ### ### ### Define Functions here ### ### ### ########################################################################################## define_path <- function(model_name){ if (model_name == "dynamic"){ in_dir <- paste0(main_in_dir, model_specific_dir_name[1]) } else if (model == "utah"){ in_dir <- paste0(main_in_dir, model_specific_dir_name[2]) } } clean_process <- function(dt){ dt <- subset(dt, select=c(chill_season, sum_J1, sum_F1, sum_M1, sum_A1, lat, long, warm_cold, scenario, model, year)) dt <- dt %>% filter(year <= 2005 | year >= 2025) time_periods = c("Historical", "2025_2050", "2051_2075", "2076_2099") dt$time_period = 0L dt$time_period[dt$year <= 2005] <- time_periods[1] dt$time_period[dt$year >= 2025 & dt$year <= 2050] <- time_periods[2] dt$time_period[dt$year > 2050 & dt$year<=2075] <- time_periods[3] dt$time_period[dt$year > 2075] <- time_periods[4] dt$time_period = factor(dt$time_period, levels=time_periods, order=T) dt$scenario[dt$scenario == "rcp45"] <- "RCP 4.5" dt$scenario[dt$scenario == "rcp85"] <- "RCP 8.5" dt$scenario[dt$time_period == "Historical"] <- "Historical" dt$location <- paste0(dt$lat, "_", dt$long) jan_data <- subset(dt, select=c(sum_J1, warm_cold, scenario, model, time_period, chill_season, location)) %>% data.table() feb_data <- subset(dt, select=c(sum_F1, warm_cold, scenario, model, time_period, chill_season, location)) %>% data.table() mar_data <- subset(dt, select=c(sum_M1, warm_cold, scenario, model, time_period, chill_season, location)) %>% data.table() apr_data <- subset(dt, select=c(sum_A1, warm_cold, scenario, model, time_period, chill_season, location)) %>% data.table() return (list(jan_data, feb_data, mar_data, apr_data)) } ############################################# ### ### ### Driver ### ### ### ############################################# # main_in_dir = "/Users/hn/Desktop/Desktop/Kirti/check_point/chilling/non_overlapping/" # model_names = c("dynamic") # , "utah" # model_specific_dir_name = paste0(model_names, "_model_stats/") # file_name = "summary_comp.rds" # mdata <- data.table(readRDS(paste0(main_in_dir, model_specific_dir_name, file_name))) # setnames(mdata, old=c("Chill_season"), new=c("chill_season")) main_in_dir = "/Users/hn/Desktop/Desktop/Ag/check_point/chilling/" out_dir = main_in_dir begins <- c("sept", "mid_sept", "oct", "mid_oct", "nov", "mid_nov") begin <- "sept" for (begin in begins){ out_dir <- file.path(main_in_dir, begin, "/color_code_table/") if (dir.exists(file.path(out_dir)) == F) { dir.create(path = file.path(out_dir), recursive = T) } mdata <- data.table(readRDS(paste0(main_in_dir, begin, "_summary_comp.rds"))) mdata <- mdata %>% filter(model != "observed") param_dir <- "/Users/hn/Documents/GitHub/Ag/chilling/parameters/" LocationGroups_NoMontana <- read.csv(paste0(param_dir, "LocationGroups_NoMontana.csv"), header=T, sep=",", as.is=T) LocationGroups_NoMontana <- within(LocationGroups_NoMontana, remove(lat, long)) mdata <- remove_montana(mdata, LocationGroups_NoMontana) information <- clean_process(mdata) jan_data = information[[1]] feb_data = information[[2]] mar_data = information[[3]] apr_data = information[[4]] rm(information, mdata) jan_result = count_years_threshs_met_all_locations(dataT = jan_data, due="Jan") feb_result = count_years_threshs_met_all_locations(dataT = feb_data, due="Feb") mar_result = count_years_threshs_met_all_locations(dataT = mar_data, due="Mar") apr_result = count_years_threshs_met_all_locations(dataT = apr_data, due="Apr") ##################### ##################### Add climate type back to data ##################### locatin_add <- subset(LocationGroups_NoMontana, select=c(warm_cold, location)) jan_result <- dplyr::left_join(jan_result, LocationGroups_NoMontana, by="location") feb_result <- dplyr::left_join(feb_result, LocationGroups_NoMontana, by="location") mar_result <- dplyr::left_join(mar_result, LocationGroups_NoMontana, by="location") apr_result <- dplyr::left_join(apr_result, LocationGroups_NoMontana, by="location") ##################### ##################### RCP 8.5 ##################### ######**************************** ################################## JAN ######**************************** quan_per <- jan_result %>% group_by(time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER 8.5 data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_cool_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Cooler Areas", scenario=="RCP 4.5") %>% data.table() %>% select(c(warm_cold, time_period, thresh_range, quan_25)) data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_cool_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARMER data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_warm_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_warm_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest, quan_per) ######**************************** ################################## FEB ######**************************** quan_per_feb <- feb_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######## COOLER data <- quan_per_feb %>% filter(scenario == "RCP 8.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_cool_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per_feb %>% filter(time_period == "Historical", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_cool_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARMER data <- quan_per_feb %>% filter(scenario == "RCP 8.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_warm_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per_feb %>% filter(time_period == "Historical", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_warm_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest, quan_per_feb) ######**************************** ################################## MARCH ######**************************** quan_per <- mar_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period,quan_25) write.table(dattest, file = paste0(out_dir, "march_cool_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "march_cool_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "march_warm_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "march_warm_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest, quan_per) ######**************************** ################################## April ######**************************** quan_per <- apr_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period,quan_25) write.table(dattest, file = paste0(out_dir, "april_cool_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "april_cool_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per %>% filter(scenario == "RCP 8.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "april_warm_85.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) data <- quan_per %>% filter(time_period == "Historical", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "april_warm_hist.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest, quan_per) ##################### ##################### RCP 4.5 ##################### ######**************************** ################################## JAN ######**************************** quan_per <- jan_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold == "Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_cool_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "jan_warm_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######**************************** ################################## FEB ######**************************** quan_per_feb <- feb_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######## COOLER data <- quan_per_feb %>% filter(scenario == "RCP 4.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_cool_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per_feb %>% filter(scenario == "RCP 4.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "feb_warm_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######**************************** ################################## MARCH ######**************************** quan_per <- mar_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "march_cool_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "march_warm_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######**************************** ################################## April ######**************************** quan_per <- apr_result %>% group_by(warm_cold, time_period, scenario, thresh_range) %>% summarise(quan_25 = quantile(frac_passed, probs = 0.25)) %>% data.table() ######### COOLER data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold=="Cooler Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "april_cool_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) ######## WARM data <- quan_per %>% filter(scenario == "RCP 4.5", warm_cold=="Warmer Areas") %>% data.table() data <- data[order(time_period, thresh_range), ] dattest <- data %>% spread(time_period, quan_25) write.table(dattest, file = paste0(out_dir, "april_warm_45.csv"), row.names = FALSE, col.names = TRUE, sep = ",") rm(data, dattest) }
ac514150d45f1d48ecab6cd358dad493b748bfb0
f8c9804e50a61d544250ecf5a1a03b357819a23a
/man/mrds-opt.Rd
8410d301230b3e45952b03a720e971cbd1bb134a
[]
no_license
cran/mrds
c086ead932cd9e39c9aa7ee734bc55f0d2e8d425
dfa8dff4d44565c0123ef6f3f1e9f0b152b6155c
refs/heads/master
2023-07-27T08:16:08.397331
2023-07-06T10:30:15
2023-07-06T10:30:15
17,697,669
0
0
null
null
null
null
UTF-8
R
false
true
3,898
rd
mrds-opt.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mrds-package.R \docType{methods} \name{mrds-opt} \alias{mrds-opt} \title{Tips on optimisation issues in \code{mrds} models} \description{ Occasionally when fitting an `mrds` model one can run into optimisation issues. In general such problems can be quite complex so these "quick fixes" may not work. If you come up against problems that are not fixed by these tips, or you feel the results are dubious please go ahead and contact the package authors. } \section{Debug mode}{ One can obtain debug output at each stage of the optimisation using the \code{showit} option. This is set via \code{control}, so adding \code{control=list(showit=3)} gives the highest level of debug output (setting \code{showit} to 1 or 2 gives less output). } \section{Re-scaling covariates}{ Sometimes convergence issues in covariate (MCDS) models are caused by values of the covariate being very large, so a rescaling of that covariate is then necessary. Simply scaling by the standard deviation of the covariate can help (e.g. \code{dat$size.scaled <- dat$scale/sd(dat$scale)} for a covariate \code{size}, then including \code{size.scaled} in the model instead of \code{size}). It is important to note that one needs to use the original covariate (size) when computing Horvitz-Thompson estimates of population size if the group size is used in that estimate. i.e. use the unscaled size in the numerator of the H-T estimator. } \section{Factor levels}{ By default R will set the base factor level to be the label which comes first alphabetically. Sometimes this can be an issue when that factor level corresponds to a subset of the data with very few observations. This can lead to very large uncertainty estimates (CVs) for model parameters. One way around this is to use \code{\link{relevel}} to set the base level to a level with more observations. } \section{Initial values}{ Initial (or starting) values can be set via the \code{initial} element of the \code{control} list. \code{initial} is a list itself with elements \code{scale}, \code{shape} and \code{adjustment}, corresponding to the associated parameters. If a model has covariates then the \code{scale} or \code{shape} elements will be vectors with parameter initial values in the same order as they are specific in the model formula (using \code{showit} is a good check they are in the correct order). Adjustment starting values are in order of the order of that term (cosine order 2 is before cosine order 3 terms). One way of obtaining starting values is to fit a simpler model first (say with fewer covariates or adjustments) and then use the starting values from this simpler model for the corresponding parameters. Another alternative to obtain starting values is to fit the model (or some submodel) using Distance for Windows. Note that Distance reports the scale parameter (or intercept in a covariate model) on the exponential scale, so one must \code{log} this before supplying it to \code{ddf}. } \section{Bounds}{ One can change the upper and lower bounds for the parameters. These specify the largest and smallest values individual parameters can be. By placing these constraints on the parameters, it is possible to "temper" the optimisation problem, making fitting possible. Again, one uses the \code{control} list, the elements \code{upperbounds} and \code{lowerbounds}. In this case, each of \code{upperbounds} and \code{lowerbounds} are vectors, which one can think of as each of the vectors \code{scale}, \code{shape} and \code{adjustment} from the "Initial values" section above, concatenated in that order. If one does not occur (e.g. no shape parameter) then it is simple omitted from the vector. } \author{ David L. Miller <dave@ninepointeightone.net> }
d7c05d8043e8a716e637c89fcc9da0f28cd48905
c0936db82e1500f9e5721414305b110587029449
/plot4.R
cae26c96343c0ca8e404e4286735b4cda24e1368
[]
no_license
mgiusto/ExData_Plotting1
8702d58d1a0bca69128cc7096b3169c9a739d069
4e11dd5808591c505e35eae923065884ec3afa65
refs/heads/master
2021-01-16T22:03:36.588240
2014-05-08T20:26:20
2014-05-08T20:26:20
null
0
0
null
null
null
null
UTF-8
R
false
false
1,243
r
plot4.R
#general configurations Sys.setlocale("LC_TIME", "C") #load data data=read.csv(file="household_power_consumption.txt",colClasses=c(rep("character",7)),header=TRUE,sep=";",nrow=70000) data = data[data$Date %in% c("1/2/2007", "2/2/2007"),] for (i in 3:dim(data)[2]){ data[,i] = as.numeric(data[,i],dec=".") } data$datetime=strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S") data$datetime=as.POSIXct(data$datetime) ##generate plot4.png png("plot4.png",width=480,height=480,units="px",bg = "transparent") #set image with 4 plots par(mfrow = c(2,2)) #generate top-left plot with(data,plot(Global_active_power~datetime,type="l",xlab="",ylab="Global Active Power")) #generate top-right plot with(data,plot(Voltage~datetime,type="l",xlab = "datetime", ylab="Voltage")) #generate bottom-left plot plot(data$Sub_metering_1~data$datetime,type="l",xlab="",ylab="Energy sub metering") lines(data$Sub_metering_2~data$datetime,col="red") lines(data$Sub_metering_3~data$datetime,col="blue") legend("topright",lty=c(1,1,1), col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex=0.9,bty="n") #generate bottom-left plot with(data,plot(Global_reactive_power~datetime,type="l",xlab = "datetime")) dev.off()
85d42319c60ffc2977afc8b5649a4beb36cb1529
6a28ba69be875841ddc9e71ca6af5956110efcb2
/Introductory_Statistics_by_Sheldon_M._Ross/CH7/EX7.5/Ex7_5.R
607fbb90ac01c574361f621ce4faef881676b47c
[]
permissive
FOSSEE/R_TBC_Uploads
1ea929010b46babb1842b3efe0ed34be0deea3c0
8ab94daf80307aee399c246682cb79ccf6e9c282
refs/heads/master
2023-04-15T04:36:13.331525
2023-03-15T18:39:42
2023-03-15T18:39:42
212,745,783
0
3
MIT
2019-10-04T06:57:33
2019-10-04T05:57:19
null
UTF-8
R
false
false
153
r
Ex7_5.R
#Page No.313 n=900 left_handed=60 p=left_handed/n print(p) px2=400/1000 print(px2) px2x1=399/1000 print(px2x1) px2px1_0=400/999 print(px2px1_0)
adf21904c3e7c7a02710f7f10f96008fc721fc6b
8f549e33631a13e2b3c05fd02605f31a6f5c079c
/R/OnlyBluntTraumaPatients.R
11649824a8340d5aa7da7d07a7002fffa9950248
[ "MIT" ]
permissive
martingerdin/bengaltiger
07e60275560af5ed3c6df090f94a8d427796e29e
2662bb36540699a51e6558b542008d07035a98e1
refs/heads/master
2021-07-03T12:29:20.911428
2020-02-25T11:45:47
2020-02-25T11:45:47
144,838,020
3
4
MIT
2020-09-02T10:24:17
2018-08-15T10:12:31
R
UTF-8
R
false
false
2,520
r
OnlyBluntTraumaPatients.R
#' Only patients with blunt trauma #' #' Keeps only patients with blunt trauma in the sample. #' @param study.sample Data frame. The study sample. No default. #' @param injury.type.variable.name Character vector of length 1. The name of #' the age variable. Defaults to "ti". #' @param blunt.value Character or numeric vector of length 1. The value of the #' injury type variable that indicates that a patients had blunt trauma. #' Defaults to "Blunt". #' @param remove.missing Logical vector of length 1. If TRUE all observations #' with missing injury type, as detected by is.na, are removed from the #' sample. Defaults to TRUE. #' @export OnlyBluntTraumaPatients <- function(study.sample, injury.type.variable.name = "ti", blunt.value = "Blunt", remove.missing = TRUE) { ## Error handling if (!is.data.frame(study.sample)) stop("study.sample has to be a data frame") if (!is.character(injury.type.variable.name) | !IsLength1(injury.type.variable.name)) stop("injury.type.variable.name has to be a character vector of length 1") if ((!is.numeric(blunt.value) & !is.character(blunt.value)) | !IsLength1(blunt.value)) stop("blunt.value has to be a character or numeric vector of length 1") if (!is.logical(remove.missing) | !IsLength1(remove.missing)) stop("remove.missing has to be a logical vector of length 1") ## Create subsample subsample <- study.sample ## Remove missing n.missing <- 0 if (remove.missing) { subsample <- subsample[!is.na(subsample[, injury.type.variable.name]), ] n.missing <- nrow(study.sample) - nrow(subsample) } ## Remove patients with penetrating trauma subsample <- subsample[subsample[, injury.type.variable.name] == blunt.value, ] n.excluded <- nrow(study.sample) - nrow(subsample) - n.missing ## Collate return list total.n.excluded <- n.excluded if (remove.missing) total.n.excluded <- total.n.excluded + n.missing exclusion.text <- paste0(total.n.excluded, " patients had penetrating trauma") if (remove.missing) { exclusion.text <- paste0(total.n.excluded, " excluded: \n\n", "- ", n.missing, " had missing injury type \n\n", "- ", n.excluded, " patients had penetrating trauma \n\n") } return.list <- list(exclusion.text = exclusion.text, subsample = subsample) return(return.list) }
4a080e4bcda716b4b24659a786d3224b5596d86f
870e79c2458d684f512a6613a61a71222341eab4
/R/zzz.R
cd03109b0f60dcd933e5cf4c9a4e0ae8c1db45f0
[]
no_license
bbuchsbaum/pronouncingR
ec38b5c1f1a1979855c588864e3e41b4a34b2ee2
e0a62c15593a29df28612703be46c3beaab989fc
refs/heads/master
2021-01-16T08:51:35.016279
2020-02-26T21:38:42
2020-02-26T21:38:42
243,048,648
0
0
null
null
null
null
UTF-8
R
false
false
321
r
zzz.R
pronouncing <- NULL pincelate <- NULL pin <- NULL .onLoad <- function(libname, pkgname) { # use superassignment to update global reference to scipy pronouncing <<- reticulate::import("pronouncing", delay_load = TRUE) pincelate <<- reticulate::import("pincelate", delay_load = TRUE) pin <<- pincelate$Pincelate() }
bb7c17caabf8815746c68c72921f516d195f948c
154f590295a74e1ca8cdde49ecbb9cbb0992147e
/R/dh5.R
6071103daa277b5e67d364e65a6722da59537c87
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-public-domain-disclaimer", "CC0-1.0" ]
permissive
klingerf2/EflowStats
2e57df72e154581de2df3d5de3ebd94c3da0dedf
73891ea7da73a274227212a2ca829084149a2906
refs/heads/master
2017-12-07T10:47:25.943426
2016-12-28T20:52:42
2016-12-28T20:52:42
null
0
0
null
null
null
null
UTF-8
R
false
false
1,455
r
dh5.R
#' Function to return the DH5 hydrologic indicator statistic for a given data frame #' #' This function accepts a data frame that contains a column named "discharge" and calculates #' DH5; Annual maximum of 90-day moving average flows. Compute the maximum of a 90-day moving average flow for #' each year. DH5 is the mean (or median-Use Preference option) of these values (cubic feet per second-temporal). #' #' @param qfiletempf data frame containing a "discharge" column containing daily flow values #' @param pref string containing a "mean" or "median" preference #' @return dh5 numeric containing DH5 for the given data frame #' @export #' @examples #' qfiletempf<-sampleData #' dh5(qfiletempf) dh5 <- function(qfiletempf, pref = "mean") { qfiletempf <- qfiletempf[order(qfiletempf$date),] noyears <- aggregate(qfiletempf$discharge, list(qfiletempf$wy_val), FUN = median, na.rm=TRUE) colnames(noyears) <- c("Year", "momax") noyrs <- length(noyears$Year) max90daybyyear <- rep(0,noyrs) for (i in 1:noyrs) { subsetyr <- subset(qfiletempf, as.numeric(qfiletempf$wy_val) == noyears$Year[i]) day90mean <- rollmean(subsetyr$discharge, 90, align = "right", na.pad = TRUE) max90daybyyear[i] <- max(day90mean, na.rm=TRUE) } if (pref == "median") { dh5 <- round(median(max90daybyyear),digits=2) } else { dh5 <- round(mean(max90daybyyear),digits=2) } return(dh5) }
b674930e222fd44678bd0026e2a628df18e176b6
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
/B_analysts_sources_github/Ironholds/dalit-dash/utils.R
9ce22e91d50a55533ff89476e1447d7d849c4cd3
[]
no_license
Irbis3/crantasticScrapper
6b6d7596344115343cfd934d3902b85fbfdd7295
7ec91721565ae7c9e2d0e098598ed86e29375567
refs/heads/master
2020-03-09T04:03:51.955742
2018-04-16T09:41:39
2018-04-16T09:41:39
128,578,890
5
0
null
null
null
null
UTF-8
R
false
false
371
r
utils.R
library(httr) library(ores) # Handles the actual querying logic query_wp <- function(params, error_message){ result <- httr::GET("https://en.wikipedia.org/w/api.php", query = params, httr::user_agent("Dalit Dashboard service")) if(result$status_code != 200){ stop(error_message) } return(httr::content(result)) }
22a1747a7b35dc0ad5afe617c7aae2cf06de3a12
ef424746a3ea4ed6e167f03d359b39da48a0fc21
/R/DEPRECATED/MICRO-Tile-Parameters.R
77de0e448a8282c2fc4f4d678d0e58ce9fff65c0
[]
no_license
smitdave/MASH
397a1f501c664089ea297b8841f2cea1611797e4
b5787a1fe963b7c2005de23a3e52ef981485f84c
refs/heads/master
2021-01-18T18:08:25.424086
2017-08-17T00:18:52
2017-08-17T00:18:52
86,845,212
0
3
null
2017-08-17T00:18:52
2017-03-31T17:42:46
R
UTF-8
R
false
false
4,097
r
MICRO-Tile-Parameters.R
# #################################################################################### # # # # MASH # # R6-ified # # MICRO Tile Class Parameters # # Hector Sanchez & David Smith, Hector Sanchez, Sean Wu # # May 31, 2017 # # # #################################################################################### # # # #################################################################################### # # Parameter Generation Functions # #################################################################################### # # #' MICRO: Generate Parameters for \code{\link{MicroTile}} Object # #' # #' This function is a specific instantiation of a generic system to generate parameters for a # #' chosen microsimulation Tile. Any user-specified function can be written to generate parameters, as long as the # #' return list is in the same format. # #' # #' @param nFeed number of feeding sites # #' @param nAqua number of aquatic habitats # #' @param pointGen character to select spatial point pattern generation function # #' * "poisson": \code{\link{pointsPoisson}} # #' * "clustered": \code{\link{pointsClustered}} # #' * "overdispersed": \code{\link{pointsOverdispersed}} # #' * "lattice": \code{\link{pointsLattice}} # #' @param module character # #' * "emerge": initialize parameters for Emerge module of Aquatic Ecology # #' * "EL4P": initialize parameters for EL4P module of Aquatic Ecology # #' @param modulePars additional list of named parameters to be passed to Aquatic Ecology module specific parameter generating functions # #' * Emerge: for details see \code{\link{makeLambda_MicroEmerge}} # #' * EL4P: # #' @param hazV mean value for feeding site vegetation landing hazard (if 0 it is set to 0 for all sites) # #' @param hazW mean value for feeding site outside wall landing hazard (if 0 it is set to 0 for all sites) # #' @param hazI mean value for feeding site indoor wall landing hazard (if 0 it is set to 0 for all sites) # #' @param haz mean value for aquatic habitat landing hazard (if 0 it is set to 0 for all sites) # #' @param searchFeed vector of searchWt for feeding sites (if \code{NULL} initialize to Gamma(1,1) distribution) # #' @param searchAqua vector of searchWt for aquatic habitats (if \code{NULL} initialize to Gamma(1,1) distribution) # #' @param enterP vector of house entry probabilities or single numeric value for all sites (if \code{NULL} initialize to Beta(9,1) distribution) # #' @param xLim x-axis bounds for simulated points # #' @param yLim y-axis bounds for simulated points # #' @param aquaSD standard deviation of aquatic habitat scatter around feeding sites # #' @param hhSize average number of humans at feeding sites # #' @param hhMin minimum number of humans at feeding sites # #' @param bWeight numeric value of biting weights on \code{\link{Human}} (if \code{NULL} biting weights are Gamma(1,1) distributed) # #' @param ... additional named arguments for pointGen() # #' @return a named list of parameters # #' * Landscape_PAR: see \code{\link{Landscape.Parameters}} for details # #' * HumanPop_PAR: see \code{\link{HumanPop.Parameters}} for details # #' @md # #' # #' @export # MICRO.Tile.Parameters <- function( # nFeed, # nAqua, # pointGen = "poisson", # module, # modulePars, # hazV = 0, # hazW = 0, # hazI = 0, # haz = 0, # searchFeed = NULL, # searchAqua = NULL, # enterP = NULL, # xLim = c(0,1), # yLim = c(0,1), # aquaSD = 0.025, # hhSize = 7, # hhMin = 2, # bWeight = NULL, # ... # ){ # # Landscape_PAR = Landscape.Parameters(nFeed=nFeed,nAqua=nAqua,pointGen=pointGen,module=module,modulePars=modulePars, # hazV=hazV,hazW=hazW,hazI=hazI,haz=haz,searchFeed=searchFeed,searchAqua=searchAqua, # enterP=enterP,xLim=xLim,yLim=yLim,aquaSD=aquaSD,...) # HumanPop_PAR = HumanPop.Parameters(nSite = nFeed, bWeight = bWeight, siteSize = hhSize, siteMin = hhMin) # # MicroTile_PAR = list(Landscape_PAR=Landscape_PAR,HumanPop_PAR=HumanPop_PAR) # }
2ee279906a14c7e419e2586dd9b3e9caf91edd8f
315e6d13054a86421cfee1c7c9d9c90c180795f7
/plot1.r
17216ca93578235d4b9f1e575e2d65c8897d83b0
[]
no_license
pengguo-01/EPA-National-Emission-Inventory
63acc865c763516a14dc1336d5b95b9cc878e580
beee028128353d6af1b3502f6a64372c149e0b5c
refs/heads/master
2021-01-02T05:33:47.579903
2020-02-10T13:10:31
2020-02-10T13:10:31
239,511,878
0
0
null
null
null
null
UTF-8
R
false
false
580
r
plot1.r
setwd("C://Desktop//Download") NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") ##head(NEI) ##head(SCC) #summary(NEI) ##names(SCC) str(NEI) str(SCC) ## total emission from PM2.5 ## creat the plot of emisson ans save it TotalNei <- aggregate(Emissions ~year, NEI, sum) png("plot1.png", width = 480, height = 480) plot(TotalNei$year, TotalNei$Emissions, type = "o", col ="blue", main =expression("Total US" ~ PM[2.5]~"Emission"), ylab = expression("Total US"~ PM[2.5]~"emission"), xlab ="Year") dev.off()
bdf3a05fb728b5cc1d8bf9ac8ca5221eadbddbbc
e9b52725874e941de0f94be1cf1ddb3b3f7b04c7
/docs/stimuli/csvtojson.r
53b87edc27379582c092af0e0375b86d61f430c9
[]
no_license
bwaldon/probmust
528019e835795920e0f77eea34acfad9cfe75fea
0f9bab75cf2f12f2de33b3d302f04aca9087456d
refs/heads/master
2020-09-06T02:11:09.156280
2020-06-17T15:39:26
2020-06-17T15:39:26
220,283,294
0
0
null
null
null
null
UTF-8
R
false
false
320
r
csvtojson.r
library(tidyverse) library(jsonlite) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) stims <- read.csv("inposition.csv") stims_json <- toJSON(stims) write_file(stims_json, "inposition.json") stims <- read.csv("completestory.csv") stims_json <- toJSON(stims) write_file(stims_json, "completestory.json")
9df42a139197f07ac2593cfebe4c91e3933c09ae
e508c618f5f5b63540af455e1e38a2adfae03811
/plot2.R
dead769645f320434d4222b126c48d91b006b72f
[]
no_license
mtiberi/ExData_Plotting1
872092367f879cebecb008edae0dc1b7dc0d2225
85118109d7eeebbc3f45222fddda9ae98672f528
refs/heads/master
2020-12-14T09:02:15.308200
2015-12-12T08:17:41
2015-12-12T08:17:41
47,864,111
0
0
null
2015-12-12T04:26:11
2015-12-12T04:26:10
null
UTF-8
R
false
false
1,001
r
plot2.R
load.data<- function() { section<- "section.csv" if (!file.exists(section)) { if (!file.exists("household_power_consumption.txt")) { zipfile<- "household_power_consumption.zip" if (!file.exists(zipfile)) { download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", zipfile) } unzip(zipfile) } #the file is too big for my computer, i will only load #the data of February 1 and 2, 2007 system(paste0("head -1 household_power_consumption.txt >", section)) system(paste0("cat household_power_consumption.txt | grep -E '^1/2/2007|^2/2/2007' >>", section)) } d<- read.csv(section, sep=";") d$Time=strptime(paste(d$Date, d$Time, sep=" "), format="%d/%m/%Y %H:%M:%S") d$Time=as.POSIXct(d$Time) d } data<- load.data() png("./plot2.png", width=480, height=480) plot( data$Time, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)" ) dev.off()
d49e3d99bc4507bac30d010eab35a666665d9219
03e8e288378284cd3c8f467fc09b55e93c4ee605
/chart/pie_chart.R
989e3ea60eb23044a3031f2e040da4dfe802fea6
[]
no_license
JijoongHong/Business-and-Economics-Data-Analysis
eb2dbee6e9bbcdfe92d26f81f57903b1d0484a82
97f9dbddf0593179400756dfb58d88cd34cd38a6
refs/heads/main
2023-04-18T19:34:25.735952
2021-04-26T18:57:54
2021-04-26T18:57:54
361,833,520
0
0
null
null
null
null
UTF-8
R
false
false
401
r
pie_chart.R
install.packages("RColorBrewer") library(RColorBrewer) greens = brewer.pal(7, 'Greens') #색의 수, 팔레트 유형 city = c("seoul", "busan", "daegu", "incheon", "gwangju", "daejeon", "ulsan") pm25 = c(18,21,21,17,8,11,25) pct = round(pm25/sum(pm25)*100.0) city_label = paste(city, ",", pct, "%", sep="") pie(pm25, labels=city_label, col=greens, main="pm25", init.angle = 90, clockwise = T)
51054c9759c4a0953beac38b8e6fa79118ed7b96
31d01cb6fd40ae946822160aa67153026421cc8f
/ipmu-talk/pdc-betadens-setup.R
98be373ce108148e0c995009fa361887dc10da40
[]
no_license
geeeero/boatpaper
3dd66b634c2e28c078eee8ab1a94af1be6596f75
20eaf20706b66bdca088ceef2284d413c263eaee
refs/heads/master
2021-01-21T04:54:48.995452
2016-06-20T23:39:53
2016-06-20T23:39:53
19,751,779
0
0
null
null
null
null
UTF-8
R
false
false
1,856
r
pdc-betadens-setup.R
library(ggplot2) library(reshape2) library(grid) library(gridExtra) library(luck) source("../../lund_1512/lund-1512/course/04-01_BinomialData.R") source("../../lund_1512/lund-1512/course/04-02_Binomial.R") tuered <- rgb(0.839,0.000,0.290) tueblue <- rgb(0.000,0.400,0.800) tueyellow <- rgb(1.000,0.867,0.000) tuegreen <- rgb(0.000,0.675,0.510) tuewarmred <- rgb(0.969,0.192,0.192) tueorange <- rgb(1.000,0.604,0.000) tuedarkblue <- rgb(0.063,0.063,0.451) bottomlegend <- theme(legend.position = 'bottom', legend.direction = 'horizontal', legend.title = element_blank()) rightlegend <- theme(legend.title = element_blank()) nolegend <- guides(fill="none", color="none") pdcscale <- scale_color_manual(values=c(tuegreen, tuedarkblue), name=element_blank()) pdcscale2 <- scale_fill_manual(values=c(tuegreen, tuedarkblue), name=element_blank()) fmt_dcimals <- function(decimals=0){ function(x) format(x,nsmall = decimals,scientific = FALSE) } updateLuckY <- function (n0, y0, tau, n){ (n0*y0+tau)/(n0+n) } updateLuckN <- function (n0, n){ n0+n } nyupdate <- function (pr, data){ nn <- updateLuckN(pr[1], data[2]) yn <- updateLuckY(pr[1], pr[2], data[1], data[2]) c(nn,yn) } luck4cny <- function(luck, posterior=FALSE){ c1 <- c(n0(luck)[1], y0(luck)[1]) c2 <- c(n0(luck)[1], y0(luck)[2]) c3 <- c(n0(luck)[2], y0(luck)[1]) c4 <- c(n0(luck)[2], y0(luck)[2]) if(posterior){ c1 <- nyupdate(c1, c(tau(data(luck)), n(data(luck)))) c2 <- nyupdate(c2, c(tau(data(luck)), n(data(luck)))) c3 <- nyupdate(c3, c(tau(data(luck)), n(data(luck)))) c4 <- nyupdate(c4, c(tau(data(luck)), n(data(luck)))) } list(c1=c1, c2=c2, c3=c3, c4=c4) } dbetany <- function(x, ny, ...){ dbeta(x, shape1=ny[1]*ny[2], shape2=ny[1]*(1-ny[2]), ...) } pbetany <- function(x, ny, ...){ pbeta(x, shape1=ny[1]*ny[2], shape2=ny[1]*(1-ny[2]), ...) }
4f3ccd4d9cd6c1b8b4afab22f038ae7c49e2e252
2d88e86736d81b32e957b62bd8b0041e2a9778ad
/man/plotEnsembleMean.Rd
33819b2cc8b169c0ce9b180941066734e2526054
[]
no_license
cran/amber
c1659595049f230f54db3893704fc67ddb2429ed
e6ef59a25270413a1875c84feac786551bf69315
refs/heads/master
2021-07-23T06:25:02.408885
2020-08-28T10:20:02
2020-08-28T10:20:02
212,134,119
0
0
null
null
null
null
UTF-8
R
false
true
2,837
rd
plotEnsembleMean.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotEnsembleMean.R \name{plotEnsembleMean} \alias{plotEnsembleMean} \title{Ensemble mean plots of AMBER results (bias, bias scores, etc)} \usage{ plotEnsembleMean(long.name, metric, mod.path.list, modelIDs, myVariables, shp.filename = system.file("extdata/ne_110m_land/ne_110m_land.shp", package = "amber"), my.xlim = c(-180, 180), my.ylim = c(-60, 85), plot.width = 5, plot.height = 7, outputDir = FALSE, subcaption = "") } \arguments{ \item{long.name}{A string that gives the full name of the variable, e.g. 'Gross primary productivity'} \item{metric}{A string that specifies what statistical metric should be plotted. This includes for instance 'bias', 'crmse', 'phase', 'iav', 'bias-score', 'rmse-score', 'phase-score', and 'iav-score'.} \item{mod.path.list}{A List of directories where AMBER output is stored for different model runs, e.g. list(mod01.path, mod02.path, mod03.path)} \item{modelIDs}{An R object with the different model run IDs, e.g. c('CLASSIC.CRUJRAv2', 'CLASSIC.GSWP3W5E5', 'CLASSIC.CRUNCEP')} \item{myVariables}{An R object with the variable names of interest, e.g. c('GPP.FluxCom', 'RECO.FluxCom').} \item{shp.filename}{A string that gives the coastline shapefile} \item{my.xlim}{An R object that gives the longitude range that you wish to plot, e.g. c(-180, 180)} \item{my.ylim}{An R object that gives the longitude range that you wish to plot, e.g. c(-90, 90)} \item{plot.width}{Number that gives the plot width, e.g. 8} \item{plot.height}{Number that gives the plot height, e.g. 4} \item{outputDir}{A string that gives the output directory, e.g. '/home/project/study'. The output will only be written if the user specifies an output directory.} \item{subcaption}{A string that defines the subcaption of the figure, e.g. '(a)'.} } \value{ Figures in PDF format. } \description{ This function plots ensemble mean, minimum, and maximum values of a statistical metric computed by \link{scores.grid.time} and \link{scores.grid.notime}. } \examples{ library(amber) library(classInt) library(doParallel) library(foreach) library(Hmisc) library(latex2exp) library(ncdf4) library(parallel) library(raster) library(rgdal) library(rgeos) library(scico) library(sp) library(stats) library(utils) library(viridis) library(xtable) long.name <- 'Gross Primary Productivity' metric <- 'mod-mean' mod01.path <- paste(system.file('extdata', package = 'amber'), 'model01', sep = '/') mod02.path <- paste(system.file('extdata', package = 'amber'), 'model02', sep = '/') mod.path.list <- list(mod01.path, mod02.path) modelIDs <- c('CLASSIC.CRUJRAv2', 'CLASSIC.GSWP3W5E5') myVariables <- c('GPP-GOSIF', 'GPP-MODIS') plotEnsembleMean(long.name, metric, mod.path.list, modelIDs, myVariables, plot.width = 5, plot.height = 5.5) }
5047b2e3eb7beca15b0417e7fb540e5e17d5eca7
0de0b6edf603c9a99da5bd21afae8bd3e5d2f4c0
/man/ARCoeffMap.Rd
49533e71e352de788c832f449f0092f65f4c6407
[]
no_license
cran/cope
17b0f7581865c589331602fd3097d02d6b65341f
c8fc20648160175ebd66885be31d98292608213c
refs/heads/master
2021-06-26T12:34:32.065985
2017-02-13T11:57:47
2017-02-13T11:57:47
29,903,202
0
0
null
null
null
null
UTF-8
R
false
true
514
rd
ARCoeffMap.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ToyExamples.R \name{ARCoeffMap} \alias{ARCoeffMap} \title{Generate the AR coefficient map.} \usage{ ARCoeffMap(Ns = 64) } \arguments{ \item{Ns}{Number of pixels of the result in one direction. The resulting picture will have Ns x Ns pixels.} } \value{ A list containing x and y, the coordinates of the grid and z, a matrix of dimensions Ns x Ns giving the AR coefficients map. } \description{ Generate the AR coefficient map. }
7b2a584172d5c4bf3f46b17b85d181c7cf364f33
ffb2418b096271c5b29821344e47269d6fe4d192
/man/inspect.Rd
2e8664b3d133e64799a84591cb5b666924910aa3
[]
no_license
hadley/pryr
ed001475a186a0125136d40fd2ecaace230ae194
860500b7ff9951441822bf046b2b8665113f2276
refs/heads/master
2023-04-05T06:00:42.153084
2023-01-18T13:54:12
2023-01-18T13:54:12
7,491,765
188
35
null
2023-03-18T16:58:03
2013-01-07T23:19:25
R
UTF-8
R
false
true
1,389
rd
inspect.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R, R/inspect.r \name{sexp_type} \alias{sexp_type} \alias{inspect} \alias{refs} \alias{address} \alias{typename} \title{Inspect internal attributes of R objects.} \usage{ sexp_type(x) inspect(x, env = parent.frame()) refs(x) address(x) typename(x) } \arguments{ \item{x}{name of object to inspect. This can not be a value.} \item{env}{When inspecting environments, don't go past this one.} } \description{ \code{typename} determines the internal C typename, \code{address} returns the memory location of the object, and \code{refs} returns the number of references pointing to the underlying object. } \section{Non-standard evaluation}{ All functions uses non-standard evaluation to capture the symbol you are referring to and the environment in which it lives. This means that you can not call any of these functions on objects created in the function call. All the underlying C level functions use \code{Rf_findVar} to get to the underlying SEXP. } \examples{ x <- 1:10 \dontrun{.Internal(inspect(x))} typename(x) refs(x) address(x) y <- 1L typename(y) z <- list(1:10) typename(z) delayedAssign("a", 1 + 2) typename(a) a typename(a) x <- 1:5 address(x) x[1] <- 3L address(x) } \seealso{ Other object inspection: \code{\link{ftype}()}, \code{\link{otype}()} } \concept{object inspection}
5248d93d27368c40000e5b9285cb5a68825faf5c
1c9dc6b031f967801c894344893285542a7becae
/man/post_clean_chance.Rd
8194875316bd21d5063d770894f2b6c5af679f44
[ "MIT" ]
permissive
Mattlk13/aceR
0a67f1fbc197781bd3417b4da63b02429b16a797
c9c11f9bfd60df6c24ce5fff6a8e2b04aebade5a
refs/heads/master
2022-06-30T03:13:27.428067
2022-06-20T20:38:31
2022-06-20T20:38:31
147,976,156
0
0
MIT
2020-06-30T16:20:36
2018-09-08T23:04:05
R
UTF-8
R
false
true
1,957
rd
post_clean_chance.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/module-post.R \name{post_clean_chance} \alias{post_clean_chance} \title{Scrub processed data with below-chance accuracy} \usage{ post_clean_chance( df, app_type = c("classroom", "explorer"), overall = TRUE, cutoff_dprime = 0, cutoff_2choice = 0.5, cutoff_4choice = 0.25, cutoff_5choice = 0.2, cutoff_k = 1, extra_demos = NULL ) } \arguments{ \item{df}{a df, output by \code{\link{proc_by_module}}, containing processed ACE data.} \item{app_type}{character. What app type produced this data? One of \code{c("classroom", "explorer")}. Must be specified.} \item{overall}{Also scrub ".overall" data? Defaults to \code{TRUE}.} \item{cutoff_dprime}{Maximum value of d' to replace with \code{NA}, for relevant tasks (ACE Tap and Trace, SAAT). Defaults to 0.} \item{cutoff_2choice}{Maximum value of accuracy to replace with \code{NA}, for 2-response tasks (ACE Flanker, Boxed). Defaults to 0.5.} \item{cutoff_4choice}{Maximum value of accuracy to replace with \code{NA}, for 4-response tasks (ACE Stroop, Task Switch). Defaults to 0.25.} \item{cutoff_5choice}{Maximum value of accuracy to replace with \code{NA}, for 5-response tasks (ACE Color Selection). Defaults to 0.2.} \item{cutoff_k}{Maximum \emph{relative} value of Filter k to replace with \code{NA}. Defaults to 1, which corresponds to 1 target item in both 2-target conditions and 4-target conditions.} \item{extra_demos}{Character vector specifying any custom-added demographics columns (beyond app defaults) to pass through the function. Defaults to \{code{NULL}.} } \value{ a df, similar in structure to \code{proc}, but with below-cutoff values in certain columns converted to \code{NA}. } \description{ User-friendly wrapper to replace below-chance records with \code{NA} in ACE data processed with \code{\link{proc_by_module}}. Currently only compatible with ACE (SEA not yet implemented), }
baaa82317dc45f808b27689fde9f87db5e154c07
b2f61fde194bfcb362b2266da124138efd27d867
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nxquery_query51_1344n/nxquery_query51_1344n.R
6f5c64e76dfeb0ee26d3ad2f3409667b4fae8fb2
[]
no_license
arey0pushpa/dcnf-autarky
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
a6c9a52236af11d7f7e165a4b25b32c538da1c98
refs/heads/master
2021-06-09T00:56:32.937250
2021-02-19T15:15:23
2021-02-19T15:15:23
136,440,042
0
0
null
null
null
null
UTF-8
R
false
false
70
r
nxquery_query51_1344n.R
3e0c03874c515f36571a2460e1729f16 nxquery_query51_1344n.qdimacs 335 925
b18e77e9b6e6620b26dd1a46231f225f48371003
5a208336d315d316a493c85b39da5b0df010cfc6
/covid_project.R
cbb2d66894ab7b7cecc3855d840cd76f9c7cba5b
[ "Apache-2.0" ]
permissive
stevenwortmann/CCAC_R
e440e341d59b71e3cbaf6d5f7cf7cd66a7c7d166
7313409add110b64e8d50f3b562c3d776ab4e7df
refs/heads/main
2023-05-20T13:04:09.777936
2021-06-11T15:28:06
2021-06-11T15:28:06
354,147,367
0
0
null
null
null
null
UTF-8
R
false
false
8,827
r
covid_project.R
library(tidyverse) library(plotly) url <- 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv' data <- as_tibble(read.csv(url)) %>% select(-iso_code,-continent) data$date <- as.Date(data$date, '%Y-%m-%d') colnames(data) unique(data$location) continents <- c('World','North America','Europe','European Union','South America','Asia','Africa') countries <- data %>% filter(!(location %in% continents)) %>% filter(grepl((Sys.Date()-1),date)) countries # Top 10 countries by deaths: top_10_deaths <- (countries%>%arrange(desc(total_deaths)))$location[1:10] top_10_deaths #Top 10 countries by deaths-per-million: top_10_deathsPerMillion <- (countries%>%arrange(desc(total_deaths_per_million)))$location[1:10] # Plot 1: Top-10 fatality countries, new deaths over time ggplot(subset(data, location %in% top_10_deaths), aes(x=date, y=new_deaths_smoothed, color=location, na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths') + ylim(0,3500) + ggtitle('Top 10 Countries: New Deaths') + theme(axis.title.x=element_blank(), legend.position = "bottom") + scale_x_date(date_breaks = '3 month',date_labels = "%b%y",limits = as.Date(c('2020-03-01','2021-05-01'))) ggplotly(ggplot(subset(data, location %in% top_10_deaths), aes(x=date, y=new_deaths_smoothed, color=location, na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths') + ylim(0,3500) + ggtitle('Top 10 Countries: New Deaths') + theme(axis.title.x=element_blank(), legend.position = "none") + scale_x_date(date_breaks = '3 month',date_labels = "%b%y",limits = as.Date(c('2020-03-01','2021-05-01')))) worldWide <- data %>% filter(data$location %in% c('World')) ggplot( # Plot 2: Most dense fatalities vs. World trend subset(data, location == 'World' | location %in% top_10_deathsPerMillion), aes(x=date, y=new_deaths_smoothed_per_million, color=(location), na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths per Million') + ggtitle('Worldwide: New Deaths per Million') + theme(axis.title.x=element_blank(), legend.position = "bottom") + ylim(0,30) + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1)))) ggplotly(ggplot( # Interactive version of plot 2 subset(data, location == 'World' | location %in% top_10_deathsPerMillion), aes(x=date, y=new_deaths_smoothed_per_million, color=(location), na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths per Million') + ggtitle('Worldwide: New Deaths per Million') + theme(axis.title.x=element_blank(), legend.position = "none") + ylim(0,30) + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1))))) ggplot( # Plot 3: Most total fatalities (density) vs. World trend subset(data, location == 'World' | location %in% top_10_deaths), aes(x=date, y=new_deaths_smoothed_per_million, color=(location), na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths per Million') + ggtitle('Worldwide: New Deaths per Million') + theme(axis.title.x=element_blank(), legend.position = "bottom") + ylim(0,30) + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1)))) ggplotly(ggplot( # Interactive version of plot 3 subset(data, location == 'World' | location %in% top_10_deaths), aes(x=date, y=new_deaths_smoothed_per_million, color=(location), na.rm=T)) + geom_line(na.rm=T) + ylab('New Deaths per Million') + ggtitle('Worldwide: New Deaths per Million') + theme(axis.title.x=element_blank(), legend.position = "none") + ylim(0,NA) + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1))))) # Median age of the world population is 29.5 yrs median((countries %>% arrange(desc(total_deaths_per_million)))$median_age,na.rm=T) # Average age of 10 most densely-fatal countries: 42.17778 mean((countries %>% arrange(desc(total_deaths_per_million)))$median_age[1:10],na.rm=T) # Average age of countries with 10 highest median age: 45.81 mean((countries %>% arrange(desc(median_age)))$median_age[1:10],na.rm=T) (countries %>% arrange(desc(median_age)))$location[1:50] # Oldest 50 countries on Earth mean((countries %>% arrange(desc(median_age)))$median_age[1:50]) # Their age: 42.292 sd((countries %>% arrange(desc(median_age)))$median_age[1:50],na.rm=T) # Stan Dev: 2.405507 (countries %>% arrange((median_age)))$location[1:50] # Youngest 50 countries on Earth mean((countries %>% arrange((median_age)))$median_age[1:50]) # Their age: 19.068 sd((countries %>% arrange((median_age)))$median_age[1:50],na.rm=T) # Stan Dev: 1.671788 # T-test: Comparing fatalities of lowest and highest median age countries... t.test((countries%>%arrange((median_age)))$total_deaths_per_million[1:50], (countries%>%arrange(desc(median_age)))$total_deaths_per_million[1:50]) #t = -9.1265, df = 48.279, p-value = 4.383e-12 #alternative hypothesis: true difference in means is not equal to 0 #95 percent confidence interval: # -1397.7304 -893.1145 #sample estimates: # mean of x mean of y #77.8436 1223.2660 (countries %>% arrange(desc(population_density)))$location[1:50] # 50 most population-dense countries mean((countries %>% arrange(desc(population_density)))$population_density[1:50]) # 1511.26 people/sq km (countries %>% arrange((population_density)))$location[1:50] # 50 most population-sparse countries mean((countries %>% arrange((population_density)))$population_density[1:50]) # 17.71 people/sq km # T-test: Comparing overall case density of lowest and highest population density countries... t.test((countries%>%arrange(population_density))$total_cases_per_million[1:50], (countries%>%arrange(desc(population_density)))$total_cases_per_million[1:50]) #t = -1.6751, df = 83.521, p-value = 0.09765 #alternative hypothesis: true difference in means is not equal to 0 #95 percent confidence interval: # -25753.001 2204.774 #sample estimates: # mean of x mean of y #23656.80 35430.91 # T-test: Comparing fatalities of lowest and highest population density countries... t.test((countries%>%arrange(population_density))$total_deaths_per_million[1:50], (countries%>%arrange(desc(population_density)))$total_deaths_per_million[1:50]) #t = -0.28893, df = 89.082, p-value = 0.7733 #alternative hypothesis: true difference in means is not equal to 0 #95 percent confidence interval: # -284.1111 211.9740 #sample estimates: # mean of x mean of y #444.5189 480.5875 ggplot( # Plot 4: USA New Cases vs. Total Vaccinations subset(data, location == 'United States'), aes(x=date)) + geom_line(aes(y=new_cases_smoothed_per_million), color = "darkred", na.rm=T) + geom_line(aes(y=total_vaccinations_per_hundred), color="steelblue", na.rm=T) + ylab('New Cases/Vaccinations') + ggtitle('United States: New Cases per Million vs. Total Vaccinations per Hundred') + theme(axis.title.x=element_blank(), legend.position = "bottom") + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1)))) ggplotly(ggplot( # Interactive version of plot 4 subset(data, location == 'United States'), aes(x=date)) + geom_line(aes(y=new_cases_smoothed_per_million), color = "darkred", na.rm=T) + geom_line(aes(y=total_vaccinations_per_hundred), color="steelblue", na.rm=T) + ylab('New Cases/Vaccinations') + ggtitle('United States: New Cases per Million vs. Total Vaccinations per Hundred') + theme(axis.title.x=element_blank(), legend.position = "bottom") + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-02-01',(Sys.Date()-1))))) ggplot( # Plot 5: USA New Cases vs. New Vaccinations subset(data, location == 'United States'), aes(x=date)) + geom_line(aes(y=new_cases_smoothed_per_million), color = "darkred", na.rm=T) + geom_line(aes(y=new_vaccinations_smoothed_per_million), color="steelblue", na.rm=T) + ylab('New Cases/Vaccinations') + ggtitle('United States: New Cases per Million vs. New Vaccinations per Million') + theme(axis.title.x=element_blank(), legend.position = "bottom") + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-10-01',(Sys.Date()-1)))) ggplotly(ggplot( # Interactive version of plot 5 subset(data, location == 'United States'), aes(x=date)) + geom_line(aes(y=new_cases_smoothed_per_million), color = "darkred", na.rm=T) + geom_line(aes(y=new_vaccinations_smoothed_per_million), color="steelblue", na.rm=T) + ylab('New Cases/Vaccinations') + ggtitle('United States: New Cases per Million vs. New Vaccinations per Million') + theme(axis.title.x=element_blank(), legend.position = "bottom") + scale_x_date(date_breaks = '1 month',date_labels = "%b%y",limits = as.Date(c('2020-02-01',(Sys.Date()-1)))))
21e5d74fc025256edc7670d09b2f066e30d7273d
361954cc1036c8e77f6410e5c63955260375f071
/man/mxnt.c.Rd
ec2c23353c1e8a1e155b4fc71bcf5e7e1d28bb7d
[]
no_license
HemingNM/ENMwizard
a4d8f883560e0a5d34c12507489d51e057640f30
b8f30a1e7c255ce43c2f45541e418f06879dbc74
refs/heads/master
2023-06-21T20:25:35.622227
2023-06-12T12:36:24
2023-06-12T12:36:24
104,896,526
17
3
null
null
null
null
UTF-8
R
false
true
423
rd
mxnt.c.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xx.Deprecated.functions.R \name{mxnt.c} \alias{mxnt.c} \title{Calibrate MaxEnt models based on model selection criteria} \usage{ mxnt.c(...) } \arguments{ \item{...}{additional arguments} } \description{ This function will read an object of class ENMevaluation (See ?ENMeval::ENMevaluate for details) and calibrate the selected maxent models. }
d2fa72c5eaf762ec6fa6a0a6e6f4419c91e0817a
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615841609-test.R
250899f6316f736fb405fb1316c8a0d102162cf8
[]
no_license
akhikolla/updatedatatype-list3
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
d1505cabc5bea8badb599bf1ed44efad5306636c
refs/heads/master
2023-03-25T09:44:15.112369
2021-03-20T15:57:10
2021-03-20T15:57:10
349,770,001
0
0
null
null
null
null
UTF-8
R
false
false
698
r
1615841609-test.R
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(3.97819720707908e-140, 2.93002458225603e-103, 5.4110925816528e-312, 6.11701179667664e-231, -4.51958902583875e-52, NaN, -1.68828440347651e+89, -1.07039890209705e+91, 4.660633463353e-232, -7.00882470000077e-295, 1.63325997752026e+86, -9.41858582207924e+144, NaN, 1.38541297412217e-310, -5.04986460561795e-195, -5.04975683349975e-195, -2.70570789531379e-246, 1.13592397524474e-161, 1.36442255699939e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
c478fbab169700420000020d53137f385610c886
0a906cf8b1b7da2aea87de958e3662870df49727
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555794-test.R
87bde6ca1c15b4d7144fd7b2d674298676fc14ed
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
182
r
1610555794-test.R
testlist <- list(data = structure(c(2.12199591744608e-314, 4.46108960268127e-140, 0), .Dim = c(3L, 1L)), q = 0) result <- do.call(biwavelet:::rcpp_row_quantile,testlist) str(result)
50234040df1f27bf324efb6c91968db683238cad
3890b1c631f1b774821dedbb7afdcd3256dd5565
/Script.R
1d5e0489dd3017aac19704beb20b112bc95ba1f7
[]
no_license
britbrin/Vireo-olivaceus
46bbcf4ac97d418aafa73b31bc5b7fde5e784998
ec98cf2dccbad0c86d0f33af21aaa5cd926b432f
refs/heads/master
2021-09-06T00:03:17.078623
2018-01-31T21:09:01
2018-01-31T21:09:01
119,094,101
0
0
null
null
null
null
UTF-8
R
false
false
1,654
r
Script.R
library(readr) library(raster) library(rgdal) library(ggplot2) # download packages hurliang <- read_csv("~/Desktop/SeniorSpring/BIOL395/GitHub/Vireo-olivaceus/uniq_ads_CI_grid_ids_tmin.csv") View(hurliang) # import HurlbertLiang2012 dataset myspecies = hurliang[hurliang$species == 'Vireo olivaceus', ] View(myspecies) # extract only data on Vireo olivaceus yr_lat_long_temp = data.frame(myspecies$year, myspecies$long1, myspecies$lat1) # create new dataframe for year, latitude, longitude, and temperature (7 day span around arrival day) names(yr_lat_long_temp) = c("year", "lat", "long") # rename columns View(yr_lat_long_temp) yr_lat_long_temp[,"D1"]=NA yr_lat_long_temp[,"D2"]=NA yr_lat_long_temp[,"D3"]=NA yr_lat_long_temp[,"D4_arrival"]=NA yr_lat_long_temp[,"D5"]=NA yr_lat_long_temp[,"D6"]=NA yr_lat_long_temp[,"D7"]=NA # create new columns for Julian day (within a week range of arrival day - D4) yr_lat_long_temp$D4_arrival = myspecies$xmid # add arrival day (D4) from myspecies dataframe yr_lat_long_temp$D3 = yr_lat_long_temp$D4_arrival - 1 yr_lat_long_temp$D2 = yr_lat_long_temp$D4_arrival - 2 yr_lat_long_temp$D1 = yr_lat_long_temp$D4_arrival - 3 yr_lat_long_temp$D5 = yr_lat_long_temp$D4_arrival + 1 yr_lat_long_temp$D6 = yr_lat_long_temp$D4_arrival + 2 yr_lat_long_temp$D7 = yr_lat_long_temp$D4_arrival + 3 # calculate julian day for 7 day range yr_lat_long_temp[,"D1_temp"]=NA yr_lat_long_temp[,"D2_temp"]=NA yr_lat_long_temp[,"D3_temp"]=NA yr_lat_long_temp[,"D4_arrival_temp"]=NA yr_lat_long_temp[,"D5_temp"]=NA yr_lat_long_temp[,"D6_temp"]=NA yr_lat_long_temp[,"D7_temp"]=NA # create columns for temperatures at each day
86d8e0ba1bb1c783dfcaff6d35db9aff5f0b29c6
75333bb9412ac97c7afd7fea3361ab885ea7d844
/RPackage/R/onLoad.R
551c9e5c3339f73fe4d2416c23e7a68db5a1f16f
[]
no_license
rohan-shah/networkReliability
d463ffa4bc0857fb663ba4323aeae2bd743fbd61
0ba88a99e0d8af9d310b00e78171f09e9d356059
refs/heads/master
2020-12-21T20:59:36.962538
2020-06-04T03:21:11
2020-06-04T03:21:11
57,937,725
0
0
null
null
null
null
UTF-8
R
false
false
139
r
onLoad.R
.onLoad <- function(libname, pkgname) { library.dynam(package="networkReliability", chname="networkReliability", lib.loc = .libPaths()) }
9d1d159ad4686f2ae7e218f430fb661dab3ba35f
eca4448cb4f62c18e13daa14bbad5a1ccfcf11b2
/R code for gwas/multippleqqplotfun1.r
2eacafe0504cc1be06ca063506f69925006de787
[]
no_license
wzxsoy/R-and-Python-code-for-GWAS-and-Genomic-selection
911273101adbaad9fa2859849fc736ab70dd5962
0f6b38db7f4d48df1ac6cc449e5438b9588e6f16
refs/heads/master
2020-07-02T00:56:39.578287
2020-03-05T15:21:56
2020-03-05T15:21:56
201,365,912
0
0
null
null
null
null
UTF-8
R
false
false
761
r
multippleqqplotfun1.r
# umesh rosyara 8/23/2012 qqplotfun <- function (x1, x2) { if (!is.numeric(x1)) stop("D'oh! X1 P value vector is not numeric.") x1 <- x1[!is.na(x1) & x1<1 & x1>0] if (!is.numeric(x2)) stop("D'oh! X2 P value vector is not numeric.") x2 <- x2[!is.na(x2) & x2<1 & x2>0] x <- c(-log10(x1),-log10(x2)) qx <- qqnorm(x) qx$gr <- c(rep(1, length(x1)), rep(2, length (x2))) df1 <- data.frame ( x = qx$x, y= qx$y, gr = qx$gr) qx1 <- df1[df1$gr==1,] qx2 <- df1[df1$gr==2,] plot(qx1$x,qx1$y, xlab = "Theoritical Quantiles", ylab = "log10 (p-value)", ylim = c(0, max(x)), xlim = c(-3, 3)) points (qx2$x,qx2$y, col = "red", pch = 18) qqline(x, col = 3) } # example x1 <- rnorm (100, 0.5, 1) x2 <- rnorm (100, 0.3, 1) qqplotfun (x1, x2)
3b8c731553ddf0f2b9bb9dcea3baa5fdfbabda2e
a6f13f78977d956f7f1b9c69e46dbef641fd7bd5
/Project_2.R
6ed62f77d8a1a3e4c1a7fcbf5d425aa6eb43b3ad
[]
no_license
sarang125/ML_Project2_UB
eee354bf36134cf10e971887030860f987614bc6
2ca6b8896eb9bc6096e98394d9787821d56db60d
refs/heads/master
2020-05-21T02:58:04.993948
2019-05-10T00:11:23
2019-05-10T00:11:23
185,888,929
0
0
null
null
null
null
UTF-8
R
false
false
7,433
r
Project_2.R
"This is the code for extracting the UB related tweets from tweeter and performing a class prediction task based on its emergency(1) or no-emergency(0) nature " # Step 1 : Installing the necessary packages for extracting.. #...and cleaning the tweet data using twitteR and tm package in R #install.packages('twitteR') library(twitteR) #install.packages('rtweet') library(rtweet) #install.packages('tm') library(tm) #install.packages("devtools") #devtools::install_github("mkearney/rtweet") # Step 2 : Establishing the one-time connection with Twitter # The keys have been hashed from security point of view api_key <- '***' api_secret <- '***' access_token <- '***' access_token_secret <- '***' # Authentication for extracting the tweets setup_twitter_oauth(api_key, api_secret, access_token = access_token, access_secret = access_token_secret) # Step 3 : Extracting the relavant tweets # First set of train data from UBuffalo handle mostly the label '0' data ub_tweets_1 <- userTimeline('UBuffalo', n = 500) length(ub_tweets) # We could fetch 42 only ub_tweets_2 <- userTimeline('UBStudentExp', n = 500) length(ub_tweets_2) # We could fetch 89 only ub_tweets_3 <- userTimeline('UBAthletics', n = 500) length(ub_tweets_3)# We could fetch 30 only ub_tweets_4 <- userTimeline('ubalumni', n = 500) length(ub_tweets_4) # We could fetch 251 only ub_tweets_5 <- userTimeline('UBCommunity', n = 500) length(ub_tweets_5) # We could fetch 92 only # This mostly has the emergency data with Police force as first respondents ub_tweets_6 <- userTimeline('UBuffaloPolice', n = 3000) length(ub_tweets_6) # We could fetch 452 only # By defalut the class is list, so converting into Dataframe df1 <- twListToDF(ub_tweets_1) df2 <- twListToDF(ub_tweets_2) df3 <- twListToDF(ub_tweets_3) df4 <- twListToDF(ub_tweets_4) df5 <- twListToDF(ub_tweets_5) df6 <- twListToDF(ub_tweets_6) write.csv(df6,file = 'pos_tweets.csv') df6 <- read.csv('filtered_positive_incidents.csv') head(df6$text) # Combining the train and test datasets # Since we have lesser no. of positive labelled instances we restrict #..the negative ones too df <- rbind(df1,df2,df3,df6) dim(df) # Appending the labels df$label <- c(rep(0,(nrow(df)-nrow(df6))),rep(1,nrow(df6))) dim(df) head(df) colnames(df) write.csv(df, 'DatasetUsed.csv') final_df <- df[c('text','label')] dim(final_df) table(final_df$label) # 161 Negative cases (Non-emergency) and 15 Positive cases(Emergency cases) # Step 4 : Perfrming the Text Mining on the "text" column of the tweets with tm library # Building the corpus df_corpus <- Corpus(VectorSource(final_df$text)) # Cleaning up the tweets # Removing url using function removeURL <- function(x) gsub('http[^[:space:]]*','',x) df_corpus <- tm_map(df_corpus, content_transformer(removeURL)) # Retaining only the alphabets and space removeExtra <- function(x) gsub('[^[:alpha:][:space:]]*','',x) df_corpus <- tm_map(df_corpus, content_transformer(removeExtra)) df_corpus <- tm_map(df_corpus, tolower) df_corpus <- tm_map(df_corpus, removePunctuation) df_corpus <- tm_map(df_corpus, removeNumbers) df_corpus <- tm_map(df_corpus, removeWords, stopwords(kind = 'en')) # Visualize the content and creating the Document Term matrix for easy data handling content(df_corpus) df_dtm <- DocumentTermMatrix(df_corpus) df_dtm_m <- as.matrix(df_dtm) head(df_dtm_m,1) class(df_dtm_m) dim(df_dtm_m) final_dataset <- cbind(df_dtm_m,c(rep(0,(nrow(df)-nrow(df6))),rep(1,nrow(df6)))) dim(final_dataset) # Step 5 : Performing Gradient Descent optimization using the built-in package library(gradDescent) # Let's track time to run devtools::install_github("collectivemedia/tictoc") library(tictoc) Splited_set <- splitData(final_dataset, dataTrainRate = 0.8, seed = 123) dim(Splited_set$dataTrain) # (140 Documents(instances) by 816 (Terms)) dim(Splited_set$dataTest) # (36 by 816) dim(t(Splited_set$dataTest)) # Performing row-column transformation tic('Start run') grad_descent <- GD(Splited_set$dataTrain, alpha = 0.01, maxIter = 1000, seed = 123) toc() # 2.5 sec dim(grad_descent) # Intercept term intercept <- grad_descent[1] term_weights_matrix_excl_intercept_term <- grad_descent[2:816] term_weights_matrix_excl_intercept_term term_weights_matrix_excl_intercept_term <- as.matrix(term_weights_matrix_excl_intercept_term) dim(term_weights_matrix_excl_intercept_term) # 815 by 1 dim(Splited_set$dataTest[,-1]) # 36 by 815 # We need to transform both matrices to ensure conformable arguments pred <- intercept + t(term_weights_matrix_excl_intercept_term) %*% t(Splited_set$dataTest[,-1]) dim(pred) # 1 by 36 # Step 6 : Validation using the Mean Absolute Error on test dataset # Actual label we have assigned at the start of analysis actual_label <- Splited_set$dataTest[,816] actual_label # Predicted label after we perform Gradient Descent predicted_label <- pred predicted_label # Finding the residual error error <- (actual_label - predicted_label) MAE <- mean(abs(error)) MAE # 1.6639 # Step 7 : Using another algorithm called Stochastic Gradient Descent (SGD) tic() SGD <- SGD(Splited_set$dataTrain, alpha = 0.01, maxIter = 1000, seed = 123) toc() # 1.46 sec dim(SGD) # Intercept term intercept <- SGD[1] term_weights_matrix_excl_intercept_term <- SGD[2:816] term_weights_matrix_excl_intercept_term term_weights_matrix_excl_intercept_term <- as.matrix(term_weights_matrix_excl_intercept_term) dim(term_weights_matrix_excl_intercept_term) # 815 by 1 dim(Splited_set$dataTest[,-1]) # 36 by 815 # We need to transform both matrices to ensure conformable arguments pred <- intercept + t(term_weights_matrix_excl_intercept_term) %*% t(Splited_set$dataTest[,-1]) dim(pred) # 1 by 36 # Actual label we have assigned at the start of analysis actual_label <- Splited_set$dataTest[,816] actual_label # Predicted label after we perform Gradient Descent predicted_label <- pred predicted_label # Finding the residual error error <- (actual_label - predicted_label) MAE <- mean(abs(error)) MAE # 1.6485 # Step 6 : Using another algorithm called Momentum Gradient Descent (MGD) tic() MGD <- MGD(Splited_set$dataTrain, alpha = 0.01, maxIter = 1000, momentum = 0.9, seed = 123) toc() # 2.75 sec dim(MGD) # Intercept term intercept <- MGD[1] term_weights_matrix_excl_intercept_term <- MGD[2:816] term_weights_matrix_excl_intercept_term term_weights_matrix_excl_intercept_term <- as.matrix(term_weights_matrix_excl_intercept_term) dim(term_weights_matrix_excl_intercept_term) # 815 by 1 dim(Splited_set$dataTest[,-1]) # 36 by 815 # We need to transform both matrices to ensure conformable arguments pred <- intercept + t(term_weights_matrix_excl_intercept_term) %*% t(Splited_set$dataTest[,-1]) dim(pred) # 1 by 36 # Actual label we have assigned at the start of analysis actual_label <- Splited_set$dataTest[,816] actual_label # Predicted label after we perform Gradient Descent predicted_label <- pred predicted_label # Finding the residual error error <- (actual_label - predicted_label) MAE <- mean(abs(error)) MAE # 1.6381 # MAE for GD is 1.6639 ( 2.5 sec), for SGD it's 1.6485 (1.46 sec) #...and for MGD it's 1.6381 (2.56 sec) for alpha = 0.01 and max_iterations of 1000
e0fbf3df1aa6493de740b6d6559fb4e732103cd2
732a300fdab998d0aa2e55f7284a94b901f8164c
/repro_research/peer_assessment_1.R
273c16890569740cf24ad920b2d4d0f36731fb0b
[]
no_license
brobinso/datasciencecoursera
aa5000f0654cef5d312ce76a4b7fe989ecad83a2
6ec93e817497a35fdf030294853b20c50e8a56ea
refs/heads/master
2021-01-01T20:00:50.209253
2015-08-17T02:11:25
2015-08-17T02:11:25
26,337,450
0
0
null
null
null
null
UTF-8
R
false
false
3,203
r
peer_assessment_1.R
#setwd("./ML/repro_research/RepData_PeerAssessment1") sessionInfo() packages<-c("lubridate","dplyr","lattice") lapply(packages,require,character.only=TRUE) # read and preprocess x<-read.csv("./RepData_PeerAssessment1/activity.csv",header=T,stringsAsFactors=F) x$date<-ymd(x$date) x<-mutate(x,dow=ordered(wday(x$date))) # total daily steps by date x.perday <- x %>% group_by(date) %>% mutate(daily_total=sum(steps,na.rm=T)) x.perday <- x.perday %>% group_by(interval) %>% mutate(interval_mean=mean(steps,na.rm=T)) x.perday.summary <- x.perday %>% group_by(date) %>% summarize(daily_total=sum(steps)) # Calculate and report the mean and median of the total number of steps taken per day x.dayofweek <- x.perday %>% group_by(dow) %>% summarize(mean_steps=mean(daily_total)) mean(x.perday.summary$daily_total,na.rm=T) median(x.perday.summary$daily_total,na.rm=T) # x.median<-x.perday %>% group_by(weekday) %>% summarize(mean_steps=median(daily_total)) # plot hist(x.perday.summary$daily_total,breaks=30) abline(v=median(x.perday$daily_total),col="magenta") # Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps? x.int <- x %>% group_by(interval) %>% summarize(ave_steps=mean(steps,na.rm=T)) x.int[which.max(x.int$ave_steps),] # Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis) with(x.int,plot(interval,ave_steps,type="l")) # Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs) mean(is.na(x)) # proportion of NA sum(is.na(x)) # number of rows with NA ind<-which(is.na(x)) #get indicides with NA # Create a new dataset that is equal to the original dataset but with the missing data filled in. # replace NAs with the proper interval mean y.perday <- x.perday y.perday$steps[ind]<-y.perday$interval_mean[ind] y.perday <- y.perday %>% group_by(date) %>% mutate(daily_total=sum(steps)) y.perday.summary <- y.perday %>% group_by(date) %>% summarize(daily_total=sum(steps)) # Make a histogram of the total number of steps taken each day and Calculate and report the mean and median total number of steps taken per day. Do these values differ from the estimates from the first part of the assignment? What is the impact of imputing missing data on the estimates of the total daily number of steps? hist(y.perday$daily_total,breaks=30) median(y.perday$daily_total) mean(y.perday$daily_total) # Create a new factor variable in the dataset with two levels - "weekday" and "weekend" indicating whether a given date is a weekday or weekend day. y.perday$wkdy <- factor((ifelse(weekdays(y.perday$date) %in% c("Saturday","Sunday"), "weekend", "weekday"))) # Make a panel plot containing a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all weekday days or weekend days (y-axis). See the README file in the GitHub repository to see an example of what this plot should look like using simulated data. xyplot(steps ~ interval | wkdy, data=y.perday, layout= c(1,2), main="", ylab = "Steps", xlab="Interval",type="l")
2f06f0b935788445111192ef3169156ed07d824b
75b6573d973c8a037e20647947176fb3a79c7acd
/R/aggregate_OCN.R
1ffe5e5d9d68f2f9e76940484f3d855aa68a1440
[]
no_license
lucarraro/OCNet
e6cb08b6fbb154719b8e18ad6ccdac7be3eca8f5
2a2789678c3d5ddcbe49bf8e90b3f73920feb273
refs/heads/master
2023-08-03T04:34:23.223279
2023-07-20T12:54:16
2023-07-20T12:54:16
219,014,909
5
4
null
2023-05-16T09:10:36
2019-11-01T15:43:08
HTML
UTF-8
R
false
false
18,487
r
aggregate_OCN.R
aggregate_OCN <- function(OCN, thrA=0.002*OCN$FD$nNodes*OCN$cellsize^2, streamOrderType="Strahler", maxReachLength=Inf, breakpoints=NULL, displayUpdates=FALSE){ if (!("slope" %in% names(OCN$FD))){ stop('Missing fields in OCN. You should run landscape_OCN prior to aggregate_OCN.') } if (maxReachLength < OCN$cellsize*sqrt(2)){ stop("maxReachLength cannot be smaller than OCN$cellsize*sqrt(2).") } #t1 <- Sys.time() if (thrA==0) maxReachLength <- OCN$cellsize*sqrt(2) #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# # BUILD NETWORK AT RN LEVEL #### #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# if(displayUpdates){message("Calculating network at RN level... \r", appendLF = FALSE)} #print('Crop data at FD level to RN level...',quote=FALSE); RN_mask <- as.vector(OCN$FD$A >= thrA)# RN_mask allows to sample RN-level values from matrices/vectors at FD level RN_to_FD <- which(RN_mask) # RN_to_FD[i] is the pixel ID at the FD level of the pixel whose ID at the RN level is i FD_to_RN <- RN_mask*cumsum(as.numeric(RN_mask)) # FD_to_RN[i] is the pixel ID at the RN level of the pixel whose ID at the FD level is i # if pixel i at FD level doesn't belong to RN, then FD_to_RN[i]=0 Nnodes_RN <- length(RN_to_FD) W_RN <- OCN$FD$W[RN_mask,,drop=FALSE] W_RN <- W_RN[,RN_mask,drop=FALSE] Outlet_RN <- FD_to_RN[OCN$FD$outlet] Outlet_RN <- Outlet_RN[Outlet_RN!=0] # remove outlets if the corresponding catchment size is lower than threshold DownNode_RN <- numeric(Nnodes_RN) # for (i in 1:Nnodes_RN){ # if (!(i %in% Outlet_RN)){ # DownNode_RN[i] <- which(W_RN[i,]==1) # }} tmp <- W_RN@rowpointers NotOutlet <- which((tmp[-1] - tmp[-length(tmp)])==1) DownNode_RN[NotOutlet] <- W_RN@colindices # reverse downNode_RN DownNode_RN_rev <- vector("list",Nnodes_RN) for (i in 1:Nnodes_RN){ d <- DownNode_RN[i] if (d!=0){DownNode_RN_rev[[d]] <- c(DownNode_RN_rev[[d]],i) }} A_RN <- OCN$FD$A[RN_mask] X_RN <- OCN$FD$X[RN_mask] Y_RN <- OCN$FD$Y[RN_mask] Z_RN <- OCN$FD$Z[RN_mask] Length_RN <- OCN$FD$leng[RN_mask] # Drainage density DrainageDensity_RN <- sum(Length_RN)/(OCN$dimX*OCN$dimY*OCN$cellsize^2) # Connectivity indices at pixel level DegreeIn <- colSums(W_RN) DegreeOut <- rowSums(W_RN) Confluence <- DegreeIn>1 Source <- DegreeIn==0 SourceOrConfluence <- Source|Confluence ConfluenceNotOutlet <- Confluence&(DownNode_RN!=0) ChannelHeads <- SourceOrConfluence #Source|ConfluenceNotOutlet OutletNotChannelHead <- (DownNode_RN==0)&(!ChannelHeads) IsNodeAG <- SourceOrConfluence|OutletNotChannelHead IsNodeAG[breakpoints] <- TRUE whichNodeAG <- which(IsNodeAG) # Calculate slope for each pixel of the river network Slope_RN <- OCN$FD$slope[RN_mask] # sort nodes in downstream direction ind_sort <- sort(A_RN, index.return=TRUE) ind_sort <- ind_sort$ix Upstream_RN <- vector("list",Nnodes_RN) Nupstream_RN <- numeric(Nnodes_RN) for (i in 1:Nnodes_RN){ ups <- as.numeric(DownNode_RN_rev[[ind_sort[i]]]) nodes <- numeric(0) for (u in ups){ nodes <- c(nodes, Upstream_RN[[u]])} Upstream_RN[[ind_sort[i]]] <- c(nodes, ind_sort[i]) Nupstream_RN[ind_sort[i]] <- length(Upstream_RN[[ind_sort[i]]]) } # RN_to_CM[i] indicates outlet to which reach i drains RN_to_CM <- numeric(Nnodes_RN) for (i in 1:OCN$nOutlet){ RN_to_CM[Upstream_RN[[Outlet_RN[i]]]] <- i } if (displayUpdates){message("Calculating network at RN level... 100.0%\n", appendLF = FALSE)} #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# # BUILD NETWORK AT AG LEVEL #### #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# if(displayUpdates){message("Calculating network at AG level... \r", appendLF = FALSE)} # Vector that attributes reach ID to all river network pixels #print('Define nodes of aggregated network...',quote=FALSE); Nnodes_AG <- sum(IsNodeAG) Length_AG <- numeric(Nnodes_AG) RN_to_AG <- numeric(Nnodes_RN) AG_to_RN <- vector("list", Nnodes_AG) reachID <- 1 X_AG <- NaN*numeric(Nnodes_AG) Y_AG <- NaN*numeric(Nnodes_AG) Z_AG <- NaN*numeric(Nnodes_AG) A_AG <- NaN*numeric(Nnodes_AG) while (length(whichNodeAG) != 0){ # explore all AG Nodes i <- whichNodeAG[1] # select the first RN_to_AG[i] <- reachID AG_to_RN[[reachID]] <- i j <- DownNode_RN[i] X_AG[reachID] <- X_RN[i] Y_AG[reachID] <- Y_RN[i] Z_AG[reachID] <- Z_RN[i] A_AG[reachID] <- A_RN[i] #Length_AG[reachID] <- Length_RN[i] tmp_length <- Length_RN[i] tmp <- NULL j0 <- j while (!IsNodeAG[j] && j!=0 && tmp_length <= maxReachLength) { tmp <- c(tmp, j) tmp_length <- tmp_length + Length_RN[j] j_old <- j j <- DownNode_RN[j]} if (tmp_length > maxReachLength){ j <- j_old whichNodeAG <- c(whichNodeAG, j) ChannelHeads[j] <- 1 tmp_length <- tmp_length - Length_RN[j] tmp <- tmp[-length(tmp)] } Length_AG[reachID] <- tmp_length RN_to_AG[tmp] <- reachID AG_to_RN[[reachID]] <- c(AG_to_RN[[reachID]], tmp) reachID <- reachID + 1 whichNodeAG <- whichNodeAG[-1] } Nnodes_AG <- length(X_AG) # FD_to_SC: vector of length OCN$FD$nNodes containing subcatchmentID for every pixel of the catchment # AG_to_FD: list containing FD indices of pixels belonging to a given reach # SC_to_FD: list containing FD indices of pixels belonging to a given subcatchment FD_to_SC <- numeric(OCN$FD$nNodes) # initialize FD_to_SC by attributing SC values to pixels belonging to AG level FD_to_SC[RN_mask] <- RN_to_AG # attribute new SC values to pixels corresponding to outlets of catchments without reaches (because the drained area of the catchment is < thrA) Nnodes_SC <- Nnodes_AG + sum(OCN$FD$A[OCN$FD$outlet]<thrA) FD_to_SC[OCN$FD$outlet[OCN$FD$A[OCN$FD$outlet] < thrA]] <- (Nnodes_AG+1):Nnodes_SC IndexHeadpixel <- which(OCN$FD$A==OCN$cellsize^2) # find FD pixels corresponding to headwaters AG_to_FD <- vector("list", Nnodes_AG) for(i in 1:Nnodes_AG) { # attribute river network pixels to fields of the AG_to_FD list AG_to_FD[[i]] <- RN_to_FD[AG_to_RN[[i]]] } SC_to_FD <- AG_to_FD[1:Nnodes_AG] # initialize SC_to_FD by attributing the pixels that belong to reaches # add pixels corresponding to outlets of catchments without reaches if (Nnodes_SC > Nnodes_AG){ for (i in (Nnodes_AG+1):Nnodes_SC){ SC_to_FD[[i]] <- OCN$FD$outlet[OCN$FD$A[OCN$FD$outlet]<thrA][i-Nnodes_AG] }} # for (i in 1:length(IndexHeadpixel)){ # i: index that spans all headwater pixels # p <- IndexHeadpixel[i] # p: ID of headwater pixel # pNew <- p; # pNew: pixel downstream of p # k <- 0; # k: SC value of pixel pNew # sub_p <- integer(0) # sub_p is the subset of pixels downstream of pixel p # while (k==0){ # continue downstream movement until a pixel to which the SC has already been attributed is found # k <- FD_to_SC[pNew] # if (k==0){ # sub_p <- c(sub_p,pNew) # pNew <- OCN$FD$downNode[pNew] # }} # FD_to_SC[sub_p] <- k # SC_to_FD[[k]] <- c(SC_to_FD[[k]],sub_p) # } ll <- continue_FD_SC(IndexHeadpixel, FD_to_SC, SC_to_FD, OCN$FD$downNode) FD_to_SC <- ll$FD_to_SC SC_to_FD <- ll$SC_to_FD #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# # CALCULATE PROPERTIES AT AG LEVEL #### #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# #print('W matrix at AG level...',quote=FALSE); # Adjacency matrix at reach level DownNode_AG <- numeric(Nnodes_AG) W_AG <- spam(0,Nnodes_AG,Nnodes_AG) ind <- matrix(0,Nnodes_AG,2) reachID <- sum(ChannelHeads) + 1 for (i in 1:Nnodes_RN){ if (DownNode_RN[i] != 0 && RN_to_AG[DownNode_RN[i]] != RN_to_AG[i]) { DownNode_AG[RN_to_AG[i]] <- RN_to_AG[DownNode_RN[i]] ind[RN_to_AG[i],] <- c(RN_to_AG[i],DownNode_AG[RN_to_AG[i]]) } } ind <- ind[-which(ind[,1]==0),] W_AG[ind] <- 1 Outlet_AG <- RN_to_AG[Outlet_RN] # reverse downNode_AG DownNode_AG_rev <- vector("list",Nnodes_AG) for (i in 1:Nnodes_AG){ d <- DownNode_AG[i] if (d!=0){DownNode_AG_rev[[d]] <- c(DownNode_AG_rev[[d]],i) }} # Upstream_AG : list containing IDs of all reaches upstream of each reach (plus reach itself) # sort nodes in downstream direction ind_sort <- sort(A_AG, index.return=TRUE) ind_sort <- ind_sort$ix Upstream_AG <- vector("list",Nnodes_AG) Nupstream_AG <- numeric(Nnodes_AG) for (i in 1:Nnodes_AG){ ups <- as.numeric(DownNode_AG_rev[[ind_sort[i]]]) nodes <- numeric(0) for (u in ups){ nodes <- c(nodes, Upstream_AG[[u]])} Upstream_AG[[ind_sort[i]]] <- c(nodes, ind_sort[i]) Nupstream_AG[ind_sort[i]] <- length(Upstream_AG[[ind_sort[i]]]) } # AG_to_CM[i] indicates outlet to which reach i drains AG_to_CM <- numeric(Nnodes_AG) for (i in 1:OCN$nOutlet){ AG_to_CM[Upstream_AG[[Outlet_AG[i]]]] <- i } ind_sort <- sort(A_AG, index.return=T) ind_sort <- ind_sort$ix if (streamOrderType=="Strahler"){ # calculate Strahler stream order StreamOrder_AG <- numeric(Nnodes_AG) for (j in ind_sort){ tmp <- DownNode_AG_rev[[j]] # set of reaches draining into j if (length(tmp)>0){ IncreaseOrder <- sum(StreamOrder_AG[tmp]==max(StreamOrder_AG[tmp])) # check whether tmp reaches have the same stream order if (IncreaseOrder > 1) { StreamOrder_AG[j] <- 1 + max(StreamOrder_AG[tmp]) # if so, increase stream order } else {StreamOrder_AG[j] <- max(StreamOrder_AG[tmp])} # otherwise, keep previous stream order } else {StreamOrder_AG[j] <- 1} # if j is an headwater, impose StreamOrder = 1 } } else if (streamOrderType=="Shreve"){ # calculate Shreve stream order StreamOrder_AG <- numeric(Nnodes_AG) for (j in ind_sort){ tmp <- DownNode_AG_rev[[j]] # set of reaches draining into j if (length(tmp)>0){ StreamOrder_AG[j] <- sum(StreamOrder_AG[tmp]) } else {StreamOrder_AG[j] <- 1} # if j is an headwater, impose StreamOrder = 1 } } # Calculate slopes of reaches Slope_AG <- numeric(Nnodes_AG) for (i in 1:Nnodes_AG){ if (!(i %in% Outlet_AG)) Slope_AG[i] <- (Z_AG[i] - Z_AG[DownNode_AG[i]])/Length_AG[i] } if(displayUpdates){message("Calculating network at AG level... 100.0%\n", appendLF = FALSE)} #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# # CALCULATE PROPERTIES AT SC LEVEL #### #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# if(displayUpdates){message("Calculating network at SC level... \r", appendLF = FALSE)} #print(sprintf('Elapsed time %.2f s',difftime(Sys.time(),t1,units='secs')),quote=FALSE) #t1 <- Sys.time() #print('Subcatchment properties...',quote=FALSE) # calculate subcatchment properties: Local Elevation, Local Drained Area, Upstream Area Z_SC <- numeric(Nnodes_SC) Alocal_SC <- numeric(Nnodes_SC) for (i in 1:Nnodes_SC) { Z_SC[i] <- mean(OCN$FD$Z[SC_to_FD[[i]]]) Alocal_SC[i] <- length(SC_to_FD[[i]])*OCN$cellsize^2 } # drained area at AG level: note that the first Nnodes_AG elements of Alocal_SC correspond to subcatchments with reaches # Areach_AG: includes the areas drained by the reaches Areach_AG <- numeric(Nnodes_AG) for (i in 1:Nnodes_AG) { Areach_AG[i] <- sum(Alocal_SC[Upstream_AG[[i]]]) } # coordinates of AG nodes considered at the downstream end of the respective edge XReach <- numeric(Nnodes_AG) YReach <- numeric(Nnodes_AG) ZReach <- numeric(Nnodes_AG) for (i in 1:Nnodes_AG){ tmp <- AG_to_RN[[i]] ind <- which(A_RN[tmp]==max(A_RN[tmp])) node <- tmp[ind] XReach[i] <- X_RN[node] YReach[i] <- Y_RN[node] ZReach[i] <- Z_RN[node] } XReach[Outlet_AG] <- NaN YReach[Outlet_AG] <- NaN ZReach[Outlet_AG] <- NaN # build neighbouring nodes at FD level # find list of possible neighbouring pixels movement <- matrix(c(0,-1,-1,-1,0,1,1,1,1,1,0,-1,-1,-1,0,1),nrow=2,byrow=TRUE) if (length(OCN$typeInitialState)!=0 | OCN$FD$nNodes==OCN$dimX*OCN$dimY){ # all OCNs NeighbouringNodes <- NN_OCN(OCN$dimX, OCN$dimY, OCN$periodicBoundaries, movement) # NeighbouringNodes <- vector("list", OCN$dimX*OCN$dimY) # cont_node <- 0 # for (cc in 1:OCN$dimX) { # for (rr in 1:OCN$dimY) { # cont_node <- cont_node + 1 # neigh_r <- rep(rr,8)+movement[1,] # neigh_c <- rep(cc,8)+movement[2,] # if (OCN$periodicBoundaries == TRUE){ # neigh_r[neigh_r==0] <- OCN$dimY # neigh_c[neigh_c==0] <- OCN$dimX # neigh_r[neigh_r>OCN$dimY] <- 1 # neigh_c[neigh_c>OCN$dimX] <- 1 # } # NotAboundary <- neigh_r>0 & neigh_r<=OCN$dimY & neigh_c>0 & neigh_c<=OCN$dimX # only effective when periodicBoundaries=FALSE # NeighbouringNodes[[cont_node]] <- neigh_r[NotAboundary] + (neigh_c[NotAboundary]-1)*OCN$dimY # }} } else { NeighbouringNodes <- NN_river(OCN$dimX, OCN$dimY, OCN$periodicBoundaries, movement, OCN$FD$toDEM, OCN$FD$nNodes) # NeighbouringNodes <- vector("list", OCN$dimX*OCN$dimY) # for (i in 1:OCN$FD$nNodes){ # nodeDEM <- OCN$FD$toDEM[i] # cc <- (nodeDEM %% OCN$dimX); if (cc==0) cc <- OCN$dimX # rr <- (nodeDEM - cc)/OCN$dimX + 1 # neigh_r <- rep(rr,8)+movement[1,] # neigh_c <- rep(cc,8)+movement[2,] # if (OCN$periodicBoundaries == TRUE){ # neigh_r[neigh_r==0] <- OCN$dimY # neigh_c[neigh_c==0] <- OCN$dimX # neigh_r[neigh_r>OCN$dimY] <- 1 # neigh_c[neigh_c>OCN$dimX] <- 1 # } # NotAboundary <- neigh_r>0 & neigh_r<=OCN$dimY & neigh_c>0 & neigh_c<=OCN$dimX # only effective when periodicBoundaries=FALSE # NeighbouringNodes[[nodeDEM]] <- (neigh_r[NotAboundary]-1)*OCN$dimX + neigh_c[NotAboundary] # } } if (OCN$FD$nNodes < OCN$dimX*OCN$dimY){ # general contour OCNs and real rivers NeighbouringNodes <- NN_FD(OCN$FD$nNodes, OCN$dimX, OCN$dimY, NeighbouringNodes, OCN$FD$toDEM) # NeighbouringNodes_FD <- vector("list", OCN$FD$nNodes) # DEM_to_FD <- numeric(OCN$dimX*OCN$dimY) # DEM_to_FD[OCN$FD$toDEM] <- 1:OCN$FD$nNodes # for (i in 1:OCN$FD$nNodes){ # indDEM <- OCN$FD$toDEM[i] # tmp <- DEM_to_FD[NeighbouringNodes[[indDEM]]] # NeighbouringNodes_FD[[i]] <- tmp[tmp != 0] # } # NeighbouringNodes <- NeighbouringNodes_FD } # Subcatchment adjacency matrix: find which subcatchments have borders in common # W_SC <- spam(0,Nnodes_SC,Nnodes_SC) # indices <- matrix(0,Nnodes_SC*20,2) # k <- 1 # for (i in 1:Nnodes_SC){ # set <- SC_to_FD[[i]] # nodes <- numeric(0) # for (s in set){ nodes <- union(nodes, FD_to_SC[NeighbouringNodes[[s]]])} # NeighSubcatch <- setdiff(nodes, i) # indices[k:(k+length(NeighSubcatch)-1),1] <- i # indices[k:(k+length(NeighSubcatch)-1),2] <- NeighSubcatch # k <- k + length(NeighSubcatch) # if (displayUpdates){ # if ((i %% max(1,round(Nnodes_SC*0.01)))==0){ # message(sprintf("Calculating network at SC level... %.1f%%\r",i/Nnodes_AG*100), appendLF = FALSE)}} # } # indices <- indices[1:(k-1),] # W_SC[indices] <- 1 ll <- WSC(Nnodes_SC,SC_to_FD,FD_to_SC,NeighbouringNodes) W_SC <- spam(0,Nnodes_SC,Nnodes_SC) W_SC[cbind(ll[[1]],ll[[2]])] <- 1 # X,Y of subcatchment centroids X_SC <- numeric(Nnodes_SC) Y_SC <- numeric(Nnodes_SC) for (i in 1:Nnodes_SC){ X_SC[i] <- mean(OCN$FD$X[SC_to_FD[[i]]]) Y_SC[i] <- mean(OCN$FD$Y[SC_to_FD[[i]]]) } if(displayUpdates){message("Calculating network at SC level... 100.0%\n", appendLF = FALSE)} #%%%%%%%%%%%%%%%%%%%%%# # EXPORT VARIABLES #### #%%%%%%%%%%%%%%%%%%%%%# #FD level OCN$FD[["toRN"]] <- FD_to_RN OCN$FD[["toSC"]] <- FD_to_SC # RN level OCN$RN[["A"]] <- A_RN OCN$RN[["W"]] <- W_RN OCN$RN[["downNode"]] <- DownNode_RN OCN$RN[["drainageDensity"]] <- DrainageDensity_RN OCN$RN[["leng"]] <- Length_RN OCN$RN[["nNodes"]] <- Nnodes_RN OCN$RN[["nUpstream"]] <- Nupstream_RN OCN$RN[["outlet"]] <- Outlet_RN OCN$RN[["slope"]] <- Slope_RN OCN$RN[["toFD"]] <- RN_to_FD OCN$RN[["toAGReach"]] <- RN_to_AG OCN$RN[["toCM"]] <- RN_to_CM OCN$RN[["upstream"]] <- Upstream_RN OCN$RN[["X"]] <- X_RN OCN$RN[["Y"]] <- Y_RN OCN$RN[["Z"]] <- Z_RN # AG level OCN$AG[["A"]] <- A_AG OCN$AG[["AReach"]] <- Areach_AG OCN$AG[["W"]] <- W_AG OCN$AG[["downNode"]] <- DownNode_AG OCN$AG[["leng"]] <- Length_AG OCN$AG[["nNodes"]] <- Nnodes_AG OCN$AG[["nUpstream"]] <- Nupstream_AG OCN$AG[["outlet"]] <- Outlet_AG OCN$AG[["slope"]] <- Slope_AG OCN$AG[["streamOrder"]] <- StreamOrder_AG OCN$AG[["ReachToFD"]] <- AG_to_FD OCN$AG[["ReachToRN"]] <- AG_to_RN OCN$AG[["toCM"]] <- AG_to_CM OCN$AG[["upstream"]] <- Upstream_AG OCN$AG[["X"]] <- X_AG OCN$AG[["XReach"]] <- XReach OCN$AG[["Y"]] <- Y_AG OCN$AG[["YReach"]] <- YReach OCN$AG[["Z"]] <- Z_AG OCN$AG[["ZReach"]] <- ZReach # SC level OCN$SC[["ALocal"]] <- Alocal_SC OCN$SC[["W"]] <- W_SC OCN$SC[["nNodes"]] <- Nnodes_SC OCN$SC[["toFD"]] <- SC_to_FD OCN$SC[["X"]] <- X_SC OCN$SC[["Y"]] <- Y_SC OCN$SC[["Z"]] <- Z_SC # other OCN$thrA <- thrA OCN$streamOrderType <- streamOrderType OCN$maxReachLength <- maxReachLength # re-define AG_to_RN, AG_to_FD, RN_to_AG considering AG nodes as pixels and not reaches AG_to_FDnode <- numeric(Nnodes_AG) AG_to_RNnode <- numeric(Nnodes_AG) for (i in 1:Nnodes_AG){ tmpFD <- AG_to_FD[[i]] AG_to_FDnode[i] <- tmpFD[OCN$FD$A[tmpFD]==min(OCN$FD$A[tmpFD])] tmpRN <- AG_to_RN[[i]] AG_to_RNnode[i] <- tmpRN[OCN$RN$A[tmpRN]==min(OCN$RN$A[tmpRN])] } RN_to_AGnode <- numeric(Nnodes_RN) for (i in 1:Nnodes_AG){ RN_to_AGnode[AG_to_RNnode[i]] <- i } OCN$RN[["toAG"]] <- RN_to_AGnode OCN$AG[["toFD"]] <- AG_to_FDnode OCN$AG[["toRN"]] <- AG_to_RNnode invisible(OCN) }
81ff7a8901902b2b238479969d2d58271430b9ca
f095c50e1d1d8a7bb2229c5e4d101912c44aae10
/man/average.Rd
ced69253f7d8b1e0c77add5c00005811d8de1d4e
[]
no_license
stla/ocpuHello
d5a7cbee6428df1c428dbbc0d254ff90da4a727c
7ab6966cdbea06ae13f314ab753dc8b0c7623281
refs/heads/master
2020-09-13T17:40:22.609150
2016-09-14T11:25:50
2016-09-14T11:25:50
66,934,325
0
0
null
null
null
null
UTF-8
R
false
true
210
rd
average.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hello.R \name{average} \alias{average} \title{Average} \usage{ average(x) } \arguments{ \item{x}{vector} } \description{ Average }
7ee11874302cca238cda60752174a0761533f0db
909157178ed55cf23adbd5b835012f78f510669e
/Basic data/dataframes.R
51301d1ea5dc4bbb87e5b872673ea40550647f39
[]
no_license
tomscott1/R-notes
d7e4c0e45497c02e88b7bfe4fbbecd8cea811d58
d890a5432acddd3618fa7ce1b11aa45b7c0ef3d4
refs/heads/master
2021-01-12T10:52:27.142033
2016-12-04T21:35:53
2016-12-04T21:35:53
72,739,973
0
0
null
null
null
null
UTF-8
R
false
false
2,395
r
dataframes.R
empty <- data.frame() c1 <- 1:10 c2 <- letters[c1] df <- data.frame(col.name.1 = c1, col.name.2 = c2) # import / export .csv write_csv(df, file = 'saved.csv') df2 <- read_csv('saved.csv') # get info on DF nrow(df) # number of rows in dataframe ncol(df) # number of cols in dataframe colnames(df) rownames(df) str(df) summary(df) # referencing cells df[[5,2]] # absolute df[[5,'col.name.2']] # using col name df[[2,'col.name.1']] <- 999 # assign value to cell # referencing rows df[1,] # return df of row 1 # referencing columns df[, 1] # return vector of column 1 df$col.name.1 # return vector of column name df[, 'col.name.1'] # return vector of column name df[['col.name.1']] # return vector of column name df['col.name.1'] # return df of column name df[1] # return df of column 1 df[c('col.name.1','col.name.2')] #return df of column names in order of vector # adding rows and columns df2 <- data.frame(col.name.1 = 2000, col.name.2 = 'new') # new row values dfnew <- rbind(df,df2) # added to old df df$newcol <- 2 * df$col.name.1 # add new column with assignment df$newcol.copy <- df$newcol # copy an existing column df[, 'newcol.copy.2'] <- df$newcol # setting column names colnames(df) # return column names colnames(df) <- c(1,2,3,4,5) # rename all columns using a vector colnames(df)[1] <- 'new col name' # assign a name to a single column # selecting multiple rows df[1:10, ] # select first 10 rows head(df) # returns first 6 rows by default, head(df, 7) = first 7 rows df[-2, ] # select everything but row 2 mtcars[mtcars$mpg > 20, ] # select rows conditionally mtcars[mtcars$mpg > 20 & mtcars$cyl == 6, ] # select rows on multiple conditions mtcars[mtcars$mpg > 20 & mtcars$cyl == 6, c('mpg','cyl', 'hp')] # select multiple rows on conditions and return only certain columns subset(mtcars, mpg > 20 & cyl == 6) # subset syntax of above multiple conditions # selecting multiple columns mtcars[, c(1,2,3)] # returns first 3 columns mtcars[, c('mpg','cyl')] # returns columns based on vector names # missing data is.na(mtcars) # returns dframe of boolean values, T if na any(is.na(df)) # returns T if any na any(is.na(mtcars$mpg)) # check a single column(vector) df[is.na(df)] <- 0 # replace all na wih '0' (fillna in python) mtcars$mpg[is.na(mtcars$mpg)] <- mean(mtcars$mpg) # fill na with mean of column
afff1a8a4ea7a4666891dc52edc9934c732369df
169a6494a475f42d0452d3ade4622bde1eb939cc
/R/tax_agg.R
91a4c8dfb4d75fd8daa639368949cc7f699a1ae7
[ "MIT" ]
permissive
ropensci/taxize
d205379bc0369d9dcdb48a8e42f3f34e7c546b9b
269095008f4d07bfdb76c51b0601be55d4941597
refs/heads/master
2023-05-25T04:00:46.760165
2023-05-02T20:02:50
2023-05-02T20:02:50
1,771,790
224
75
NOASSERTION
2023-05-02T20:02:51
2011-05-19T15:05:33
R
UTF-8
R
false
false
4,594
r
tax_agg.R
#' Aggregate species data to given taxonomic rank #' #' @export #' @param x Community data matrix. Taxa in columns, samples in rows. #' @param rank character; Taxonomic rank to aggregate by. #' @param db character; taxonomic API to use, 'ncbi, 'itis' or both, see #' [tax_name()]. Note that each taxonomic data source has #' their own identifiers, so that if you provide the wrong `db` value #' for the identifier you could get a result, but it will likely be wrong (not #' what you were expecting). If using ncbi we recommend getting an API key; #' see [taxize-authentication] #' @param messages (logical) If FALSE (Default) suppress messages #' @param ... Other arguments passed to [get_tsn()] or [get_uid()] #' #' @details `tax_agg` aggregates (sum) taxa to a specific taxonomic level. #' If a taxon is not found in the database (ITIS or NCBI) or the supplied taxon #' is on higher taxonomic level this taxon is not aggregated. #' #' #' @return A list of class `tax_agg` with the following items: #' * `x` Community data matrix with aggregated data. #' * `by` A lookup-table showing which taxa were aggregated. #' * `n_pre` Number of taxa before aggregation. #' * `rank` Rank at which taxa have been aggregated. #' #' @seealso [tax_name] #' @examples \dontrun{ #' if (requireNamespace("vegan", quietly = TRUE)) { #' # use dune dataset #' data(dune, package='vegan') #' species <- c("Achillea millefolium", "Agrostis stolonifera", #' "Aira praecox", "Alopecurus geniculatus", "Anthoxanthum odoratum", #' "Bellis perennis", "Bromus hordeaceus", "Chenopodium album", #' "Cirsium arvense", "Comarum palustre", "Eleocharis palustris", #' "Elymus repens", "Empetrum nigrum", "Hypochaeris radicata", #' "Juncus articulatus", "Juncus bufonius", "Lolium perenne", #' "Plantago lanceolata", "Poa pratensis", "Poa trivialis", #' "Ranunculus flammula", "Rumex acetosa", "Sagina procumbens", #' "Salix repens", "Scorzoneroides autumnalis", "Trifolium pratense", #' "Trifolium repens", "Vicia lathyroides", "Brachythecium rutabulum", #' "Calliergonella cuspidata") #' colnames(dune) <- species #' #' # aggregate sample to families #' (agg <- tax_agg(dune, rank = 'family', db = 'ncbi')) #' #' # extract aggregated community data matrix for further usage #' agg$x #' # check which taxa have been aggregated #' agg$by #' } #' #' # A use case where there are different taxonomic levels in the same dataset #' spnames <- c('Puma','Ursus americanus','Ursidae') #' df <- data.frame(c(1,2,3), c(11,12,13), c(1,4,50)) #' names(df) <- spnames #' out <- tax_agg(x=df, rank = 'family', db='itis') #' out$x #' #' # You can input a matrix too #' mat <- matrix(c(1,2,3, 11,12,13), nrow = 2, ncol = 3, #' dimnames=list(NULL, c('Puma concolor','Ursus americanus','Ailuropoda melanoleuca'))) #' tax_agg(mat, rank = 'family', db='itis') #' } tax_agg <- function(x, rank, db = 'ncbi', messages=FALSE, ...) { if (is.matrix(x)) { if (is.null(colnames(x))) stop("The community data matrix must have named columns") x <- data.frame(x, check.names = FALSE) } # bring to long format # df_m <- data.table::melt(x) x$rownames <- rownames(x) df_m <- setDF(suppressWarnings(data.table::melt(as.data.table(x)))) # aggregate to family level (by querying NCBI for taxonomic classification) uniq_tax <- as.character(unique(df_m$variable)) agg <- tax_name(uniq_tax, get = rank, db = db, messages = messages, ...) lookup <- data.frame(variable = uniq_tax, agg = agg[ , 3], stringsAsFactors = FALSE) # merge lookup with orig. df_merged <- merge(lookup, df_m, by = 'variable') # if not found , or on higher level -> use orig. = no aggrgation df_merged$agg <- ifelse(is.na(df_merged$agg), df_merged$variable, df_merged$agg) # bring back to long format and aggregate df_l <- setDF(data.table::dcast(as.data.table(df_merged), rownames ~ agg, value.var = 'value', fun.aggregate = sum)) rownames(df_l) <- df_l$rownames df_l$rownames <- NULL # restore order df_l <- df_l[x$rownames, ] out <- list(x = df_l, by = lookup, n_pre = ncol(x) - 1, rank = rank) class(out) <- 'tax_agg' return(out) } #' @method print tax_agg #' @export #' @rdname tax_agg print.tax_agg <- function(x, ...) { cat("\n") writeLines(strwrap("Aggregated community data\n", prefix = "\t")) cat(paste("\nLevel of Aggregation:", toupper(x$rank))) cat(paste("\nNo. taxa before aggregation:", x$n_pre)) cat(paste("\nNo. taxa after aggregation:", ncol(x$x))) cat(paste("\nNo. taxa not found:", sum(is.na(x$by$agg)))) }
cddf7d68a965d5840c4599f5af088156d867cd16
19519ca0e33425ee70663568855cad34b22b36d0
/plot1.R
d9cb672c25f57363bcc2a054ea33effe4282e11a
[]
no_license
dmdata101/ExData_Plotting1
36c508b51427d5ab7735f347c9211dadcfcc50d4
3756a88144c843e35d3475f4d81c3a6f4859fce1
refs/heads/master
2021-01-18T10:48:12.050466
2015-03-07T16:58:12
2015-03-07T16:58:12
31,813,760
0
0
null
2015-03-07T14:03:45
2015-03-07T14:03:44
null
UTF-8
R
false
false
704
r
plot1.R
# PLOT 1 # File 'household_power_consumption.txt' is a subset of the complete data. # It only contains data for Feb 1 2007 and Feb 2 2007 # Because it is a 'write' of a previous data.frame, no need for the specific # header, sep and na.strings functions any more because # See source file '0-subset full data.R' for procedure. data=read.table('household_power_consumption_filtered.txt') # Convert date and time to a proper POSIXlt class, creates new 'DateTime' column data$DateTime=strptime(paste(data$Date,data$Time),'%d/%m/%Y %R') # Saves to file png('plot1.png') hist(data$Global_active_power,main='Global Active Power',col='red',xlab='Global Active Power (kilowatts)') # Closes file dev.off()
1a794b97562a097ec82535e3cfaedc2b4f80fb2c
436f71883853fe2a9e48086718bcdefa32ced7b7
/kmeans.R
e2a69030379dcb4e1fb49fb22927b02de8bc7fe9
[]
no_license
yayitsnaomi/Algorithms-in-R
2cc1a53083f30dc0eb34b25a2f9326f733961e87
1aaf41ab4feed94080e3eb21c88cf95d452481b4
refs/heads/master
2020-04-19T06:49:14.799401
2019-03-01T04:56:32
2019-03-01T04:56:32
168,029,191
0
0
null
null
null
null
UTF-8
R
false
false
8,125
r
kmeans.R
library(stringr) #Regular expression leveraged in feature engineering library(dplyr) #data transformation library(tidyr) #data transformation ## Kmeans # Cluster analysis steps # variable constrction/feature engineering # consider initial splits # variable/feature selection - good clusters are actionalble # pick clustering method # kmeans - numerical variables only # hierarchical - does not work on giant data sets # mixture models and latent class: handles numerical and/or categorical variables # transform variables if necessary # symmetrize and standardize or use quantile method # transform always if units are incommensurate or if distribution is skewed # Find various cluster solutions, profile them, and pick one # fit kmeans model fit = kmeans(cluster_data,centers = 5,iter.max = 1000, nstart = 100) fit$cluster # which cluster group is an item in fit$size # number of items in each cluster fit$centers # means of each cluster fit$withinss # sum of squares within each cluster (fit$withinss)/(fit$size * ncol(cluster_data)) # variance within each cluster sqrt((fit$withinss)/(fit$size * ncol(cluster_data))) # standard deviation within each cluster #elbow plot - figure out optimal K to maximize SSB and minimize SSE i = 1 diff_ncluster = c(3,4,5,6,7,8,9,10,11,12) withinss <- vector(mode = 'numeric', length = length(diff_ncluster)) inbess <- vector(mode = 'numeric', length = length(diff_ncluster)) for (K in diff_ncluster){ fit <- kmeans(ent2, centers = K, nstart=100, iter.max = 100) withinss[i] <- fit$tot.withinss inbess[i] <- fit$betweenss i = i+1 } plot(diff_ncluster, withinss, type = 'b', xlab = '#clusters', ylab = '', ylim = c(0,max(inbess)), col = 'red') lines(diff_ncluster, inbess, type = 'b', col = 'blue') legend("center", legend=c("withinss", "betweenss"), col=c("red", "blue"), lty = 1, cex=1) #Violin Plot - look more meaingful clusters, where multiple dots in each cluster are in different places across cluters plot.kmeans = function(fit,boxplot=F) { require(lattice) p = ncol(fit$centers) k = nrow(fit$centers) plotdat = data.frame( mu=as.vector(fit$centers), clus=factor(rep(1:k, p)), var=factor( 0:(p*k-1) %/% k, labels=colnames(fit$centers)) ) print(dotplot(var~mu|clus, data=plotdat, panel=function(...){ panel.dotplot(...) panel.abline(v=0, lwd=.1) }, layout=c(k,1), xlab="Cluster Mean" )) invisible(plotdat) } # kmeans summary function with SSE, R^2, and Pseudo F summary.kmeans = function(fit) { # number of x features p = ncol(fit$centers) # number of clusters k = nrow(fit$centers) # number of observations need to be classified into clusters n = sum(fit$size) # sum of square within clusters sse = sum(fit$withinss) # weighted mean --> grand mean of the dataset xbar = t(fit$centers)%*%fit$size/n # sum of square between clusters ssb = sum(fit$size*(fit$centers - rep(1,k) %*% t(xbar))^2) print(data.frame( n=c(fit$size, n), # percentage of obs in this cluster Pct=(round(c(fit$size, n)/n,2)), round(rbind(fit$centers, t(xbar)), 2), RMSE = round(sqrt(c(fit$withinss/(p*(fit$size-1)), sse/(p*(n-k)))), 4) )) cat("SSE = ", sse, "; SSB = ", ssb, "\n") cat("R-Squared = ", ssb/(ssb+sse), "\n") cat("Pseudo F = ", (ssb/(k-1))/(sse/(n-k)), "\n\n"); invisible(list(sse=sse, ssb=ssb, Rsqr=ssb/(ssb+sse), F=(ssb/(k-1))/(sse/(n-k)))) } summary.kmeans(fit) # Pseudo F - if we assume that "natural groupings" means homogeneous within and heterogeneous across, # we can evaluate a clust solution with pseudo F # F = (SSB/[p(k-1)])/(SSE/[p(n-k)]) # If a cluster solution "fits the data," the between-cluster variance will be large, # the within-cluster variance will be small, and the pseudo F will spike. # Even if there are no spikes, the solution might still be interesting. Then use judgment to pick one. # Caution: when there is no spike, the solution may be very sensitive to sampling variation, starting values, etc. # You may also look for where SSE or R2 flattens out. plot.kmeans(fit) ## another cluster visualization - good for plotting and visualizing two variables ata time from a cluster fviz_cluster(fit, geom = "point", data=cluster_data, xlab = "Desktop", ylab = "Mobile") # standardize columns (often necessary before clustering using kmeans) scale(cluster_data) #Example: #Read in new zaneville data z1 <- read.csv("zanesville1.csv",stringsAsFactors = FALSE) z2 <- read.csv("zanesville2.csv",stringsAsFactors = FALSE) z3 <- read.csv("zanesville3.csv",stringsAsFactors = FALSE) z4 <- read.csv("zanesville4.csv",stringsAsFactors = FALSE) #View(z1) #consolidate into single df with required columns z <- rbind(z1[,c("fire_fly_id", "section","content_type", "sub_section", "topic", "event_date")], z2[,c("fire_fly_id", "section","content_type", "sub_section", "topic", "event_date")], z3[,c("fire_fly_id", "section","content_type", "sub_section", "topic", "event_date")], z4[,c("fire_fly_id", "section","content_type", "sub_section", "topic", "event_date")]) #Regular expression to parse out text pattern <- "([^:]*)$" z$sub_section_parsed <- str_extract(z$sub_section, regex(pattern)) z$topic_parsed <- str_extract(z$topic, regex(pattern)) #group data by fire_fly_id z_features_sub_section <- z %>% group_by(fire_fly_id,sub_section_parsed) %>% summarize(count_sub_section =n()) %>% spread(sub_section_parsed, count_sub_section) #create individual columns from values in a single column z_features_topic <- z %>% group_by(fire_fly_id,topic_parsed) %>% summarize(count_topic =n()) %>% spread(topic_parsed, count_topic) #replace zeros z_features_sub_section[is.na(z_features_sub_section)] <- 0 #log all columns using apply function temp <- data.frame(apply(z_features_sub_section,2,function(x) log(x+1))) #transform from matrix to data frame #K means temp1<- temp[,-1] #remove fire fly id as we do not want this attribute in the cluster, only the features View(temp1) fit1 <- kmeans(temp1[,211:214], centers = 5, nstart=100, iter.max = 100) summary(fit1) fit1$size plot(fit1) #allthemoms, V1, blogs, baseball, arts, announcements, celebrations, business, bugpages, #crime, columnists, columnist, college, dining, deals, cycling, error, entertainment, #education, editorials, food, flights, fantasy, extras, extended, experience, golf, get.access, ftw, #home, high.school, insider, humankind, life, lancaster.festival, local, mlb, nation, music, movies #money, news, ncaaf, nation.now, people, outdoors, opinion, olympics, ohio.state, nhl, nfl, #politics.extra, politics, personalfinance, reviewedcom, rentals, real.estate, readers, #sports, special.reports, search, tech, static, state, staff, usi, ufc, ue, tv, travel, traffic, #tennis, wellness, weather, ustoa, world, wnba, winter.olympics.2018 #business bus1 <- temp1[,c( "business", "crime", "college", "education", "editorials", "golf", "insider", "opinion", "politics", "personalfinance", "real.estate", "traffic", "tech")] fit1 <- kmeans(ent2, centers = 9, nstart=100, iter.max = 100) summary(fit1) fit1$size plot(fit1) #explorer exp <- temp1[,c( "celebrations", "dining", "life","people", "nation.now", "outdoors" , "get.access")] fit1 <- kmeans(exp, centers = 8, nstart=100, iter.max = 100) summary(fit1) fit1$size plot(fit1) #entertainment ent <- temp1[,c("allthemoms", "V1", "blogs", "arts", "announcements", "celebrations", "bugpages", "columnists", "columnist", "dining", "deals", "entertainment")] fit1 <- kmeans(ent, centers = 7, nstart=100, iter.max = 100) summary(fit1) fit1$size plot(fit1) #sports sports <- temp1[, c("baseball", "college", "entertainment", "fantasy", "golf", "get.access", "ftw", "local", "mlb", "nation", "olympics", "ohio.state", "nhl", "nfl", "state")] fit1 <- kmeans(sports, centers = 6, nstart=100, iter.max = 100) summary(fit1) fit1$size plot(fit1)
df6c31c292b11024a4c63b0a5812017b56f5190f
6d9b097cef9ce745ed5232b4f99bbbd3a65df770
/man/selectContrast.Rd
68c0e587471c4e6feaa0df1aeb5b52c2bc82c730
[]
no_license
arturochian/MRIaggr
e4bcb4c933a7d728259b7cfba967f27948c5516e
9091de45d349ccad0eabc7e2f99edd56f9874cff
refs/heads/master
2021-01-16T19:40:08.102222
2015-02-27T00:00:00
2015-02-27T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
8,609
rd
selectContrast.Rd
\name{selectContrast} \title{Extract contrast parameters} \alias{selectContrast} \alias{selectContrast,Carto3D-method} \alias{selectContrast,MRIaggr-method} \description{ Extract the contrast parameters from a \code{\linkS4class{Carto3D}} or a \code{\linkS4class{MRIaggr}} object. } \usage{ \S4method{selectContrast}{Carto3D}(object,num=NULL,na.rm=FALSE,coords=TRUE, format="any") \S4method{selectContrast}{MRIaggr}(object,param=NULL,num=NULL,format="any", slice_var="k",coords=FALSE,hemisphere="both",norm_mu=FALSE,norm_sigma=FALSE, na.rm=FALSE,subset=NULL) } \arguments{ \item{object}{an \code{object} of class \code{\linkS4class{Carto3D}} or \code{\linkS4class{MRIaggr}}. REQUIRED.} \item{param}{the contrast parameters to extract. \emph{character vector} or \code{NULL}.} \item{num}{the slices to extract. \emph{numeric vector} or \code{NULL}.} \item{format}{the format of the output. Can be \code{"matrix"}, \code{"data.frame"} or \code{"any"}.} \item{slice_var}{the type of slice to extract. \code{"i"} for sagittal, \code{"j"} for coronal and \code{"k"} for transverse. \emph{character}.} \item{coords}{the coordinates that sould be extracted. \emph{logical} or any of \code{"i"} \code{"j"} \code{"k"}.} \item{hemisphere}{the hemisphere to extract. \emph{character}. See the details section.} \item{norm_mu}{the type of centering to apply on the parameter values. \emph{character}. See the details section.} \item{norm_sigma}{the type of scaling to apply on the parameter values. \emph{character}. See the details section.} \item{na.rm}{should observations with missing values be removed ? \emph{logical}.} \item{subset}{the subset of observations to extract. \emph{positive integer vector} or \code{NULL} leading to use all observations} } \details{ ARGUMENTS: \cr Information about the \code{param} argument can be found in the details section of \code{\link{initParameter}}. Information about the \code{num} argument can be found in the details section of \code{\link{initNum}}. Possible values for the \code{hemisphere} argument are: \itemize{ \item \code{"both"} : select all the observations. \item \code{"left"} : select the observations from the left hemisphere. \item \code{"right"} : select the observations from the right hemisphere. \item \code{"lesion"} : select the observations belonging to the hemisphere(s) that contain(s) the lesion (if any). \item \code{"controlateral"} : select the observations belonging to the hemisphere(s) that do not contain(s) the lesion (if any). } To select observations from a given hemisphere (all values except \code{"both"}), the parameter \code{hemisphere} must have been affected to the object using, for instance, \code{\link{calcHemisphere}}. In addition for \code{"lesion"} and \code{"controlateral"} values, the slot \code{@hemispheres} has to be filled using, for instance, \code{\link{calcHemisphere}}. Possible values for the centering argument (\code{norm_mu}) and the scaling argument (\code{norm_sigma}) are: \itemize{ \item \code{"FALSE"} : no normalization \item \code{"global"} : the centering or scaling value is computed using all the observations. \item \code{"global_1slice"} : the centering or scaling value is computed using all the observations that belong to the slice of the observation to normalize. \item \code{"global_3slices"} : the centering or scaling value is computed using all the observations that belong to the slice of the observation to normalize, the slice above (if any) and the slice below (if any). \item \code{"controlateral"} : the centering or scaling value is computed using the observations from the controlateral hemisphere. \item \code{"controlateral_1slice"} : the centering or scaling value is computed using the observations from the controlateral hemisphere that belong to the slice of the observation to normalize. \item \code{"controlateral_3slices"} : the centering or scaling value is computed using the observations from the controlateral hemisphere that belong to the slice of the observation to normalize, the slice above (if any) and the slice below (if any). \item \code{"default_value"} : the default value of the parameter stored in the slot \code{@default_value} is used for the centering (for \code{norm_mu} only). } If \code{coords} is set to \code{TRUE} the dataset containing the contrast parameters values will also contains all the coordinates. If \code{coords} is set to \code{FALSE}, it will not contain any coordinates. Argument \code{subset} can be a \emph{character} value that refers to a logical parameter in the \code{object} defining the subset of observation to extract. FUNCTION: \cr Each of the \code{num}, \code{hemisphere} and \code{subset} argument define a subset of the total set of observations. It is the intersection of all these three subsets that is extracted. When a normalisation is requested to center (resp. scale) the data, the normalisation value is extracted for each parameter in the element of the slot normalization that match the argument \code{norm_mu} (resp. \code{norm_sigma}). The parameters values are first centered by substraction with the value returned by \code{norm_mu}. Then they are scaled by division with the value returned by \code{norm_sigma}. } \value{ A \emph{data.frame} or a \emph{matrix} containing the parameter in columns and the observations in rows. If only one parameter is requested and the format is set to \code{"any"} then a \emph{vector} containing the parameter values is returned. } \seealso{ \code{\link{calcControlateral}}, \code{\link{calcRegionalContrast}}, \code{\link{calcFilter}} and \code{\link{calcTissueType}} to retreat and affect the modified contrast parameters. \cr \code{\link{affectContrast<-}} to affect new contrast parameters. \cr \code{\link{calcNormalization}} to compute and affect the normalisation values. \cr \code{\link{affectNormalization<-}} to affect the normalization values when obtained from an external source. \cr \code{\link{calcHemisphere}} and \code{\link{calcControlateral}} to compute and affect the hemispheres. \cr \code{\link{affectHemisphere<-}} and \code{\link{affectContrast<-}} to affect hemispheres obtained from an external source. } \examples{ #### 1- Carto3D method #### ## load nifti files and convert them to Carto3D path.nifti_files <- system.file("nifti",package = "MRIaggr") nifti.Pat1_TTP_t0 <- readMRI(file=file.path(path.nifti_files,"TTP_t0"),format="nifti") Carto3D.Pat1_TTP_t0 <- constCarto3D(nifti.Pat1_TTP_t0,identifier="Pat1",param="TTP_t0") ## select all observations carto1 <- selectContrast(Carto3D.Pat1_TTP_t0) dim(carto1) ## select observations from slices 1 to 3 and return the result into a data.frame carto2 <- selectContrast(Carto3D.Pat1_TTP_t0,num=1:3,coords=FALSE,format="data.frame") dim(carto2) ## select observations from slices 1 to 3 and return the result into a vector carto3 <- selectContrast(Carto3D.Pat1_TTP_t0,num=1:3,coords=FALSE) length(carto3) #### 2- MRIaggr method #### ## load a MRIaggr object data("MRIaggr.Pat1_red", package="MRIaggr") ## select all parameters and all observations carto <- selectContrast(MRIaggr.Pat1_red) dim(carto) head(carto) ## select a subset of parameters carto <- selectContrast(MRIaggr.Pat1_red,param=c("DWI_t0","T2_FLAIR_t2")) dim(carto) head(carto) ## select a subset of parameters on slices 1 to 3 carto <- selectContrast(MRIaggr.Pat1_red,num=1:3,param=c("DWI_t0","T2_FLAIR_t2")) dim(carto) head(carto) ## select a subset of parameters on slices 1 to 3 and normalized the center ## the values using the controlateral carto <- selectContrast(MRIaggr.Pat1_red,num=1:3,param=c("DWI_t0","T2_FLAIR_t2"), norm_mu="controlateral") dim(carto) head(carto) ## select only observations which are lesioned at admission (i.e. MASK_DWI_t0=TRUE) carto <- selectContrast(MRIaggr.Pat1_red,subset="MASK_DWI_t0", param=c("DWI_t0","T2_FLAIR_t2","MASK_DWI_t0")) dim(carto) head(carto) ## select only observations which are lesioned at admission (i.e. MASK_DWI_t0=TRUE) with coordinates carto <- selectContrast(MRIaggr.Pat1_red,subset="MASK_DWI_t0", param=c("DWI_t0","T2_FLAIR_t2","MASK_DWI_t0"),coords=TRUE) dim(carto) head(carto) ## select only observations for which i=55 carto <- selectContrast(MRIaggr.Pat1_red,slice_var="i",num=55,coords=TRUE) dim(carto) head(carto) } \concept{select.} \keyword{methods}
8bd4134a97f9aa5590d82696ac574f4006301417
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.application.integration/man/sqs_get_queue_attributes.Rd
e9a32c711505455ca3df2a7a777761bebdc34024
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
9,743
rd
sqs_get_queue_attributes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sqs_operations.R \name{sqs_get_queue_attributes} \alias{sqs_get_queue_attributes} \title{Gets attributes for the specified queue} \usage{ sqs_get_queue_attributes(QueueUrl, AttributeNames = NULL) } \arguments{ \item{QueueUrl}{[required] The URL of the Amazon SQS queue whose attribute information is retrieved. Queue URLs and names are case-sensitive.} \item{AttributeNames}{A list of attributes for which to retrieve information. The \code{AttributeNames} parameter is optional, but if you don't specify values for this parameter, the request returns empty results. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. The following attributes are supported: The \code{ApproximateNumberOfMessagesDelayed}, \code{ApproximateNumberOfMessagesNotVisible}, and \code{ApproximateNumberOfMessages} metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency. \itemize{ \item \code{All} – Returns all values. \item \code{ApproximateNumberOfMessages} – Returns the approximate number of messages available for retrieval from the queue. \item \code{ApproximateNumberOfMessagesDelayed} – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter. \item \code{ApproximateNumberOfMessagesNotVisible} – Returns the approximate number of messages that are in flight. Messages are considered to be \emph{in flight} if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window. \item \code{CreatedTimestamp} – Returns the time when the queue was created in seconds (\href{https://en.wikipedia.org/wiki/Unix_time}{epoch time}). \item \code{DelaySeconds} – Returns the default delay on the queue in seconds. \item \code{LastModifiedTimestamp} – Returns the time when the queue was last changed in seconds (\href{https://en.wikipedia.org/wiki/Unix_time}{epoch time}). \item \code{MaximumMessageSize} – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it. \item \code{MessageRetentionPeriod} – Returns the length of time, in seconds, for which Amazon SQS retains a message. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the \code{MessageRetentionPeriod} attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the \code{MessageRetentionPeriod} is reduced below the age of existing messages. \item \code{Policy} – Returns the policy of the queue. \item \code{QueueArn} – Returns the Amazon resource name (ARN) of the queue. \item \code{ReceiveMessageWaitTimeSeconds} – Returns the length of time, in seconds, for which the \code{\link[=sqs_receive_message]{receive_message}} action waits for a message to arrive. \item \code{VisibilityTimeout} – Returns the visibility timeout for the queue. For more information about the visibility timeout, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html}{Visibility Timeout} in the \emph{Amazon SQS Developer Guide}. } The following attributes apply only to \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html}{dead-letter queues:} \itemize{ \item \code{RedrivePolicy} – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: \itemize{ \item \code{deadLetterTargetArn} – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of \code{maxReceiveCount} is exceeded. \item \code{maxReceiveCount} – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the \code{ReceiveCount} for a message exceeds the \code{maxReceiveCount} for a queue, Amazon SQS moves the message to the dead-letter-queue. } \item \code{RedriveAllowPolicy} – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: \itemize{ \item \code{redrivePermission} – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: \itemize{ \item \code{allowAll} – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. \item \code{denyAll} – No source queues can specify this queue as the dead-letter queue. \item \code{byQueue} – Only queues specified by the \code{sourceQueueArns} parameter can specify this queue as the dead-letter queue. } \item \code{sourceQueueArns} – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the \code{redrivePermission} parameter is set to \code{byQueue}. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the \code{redrivePermission} parameter to \code{allowAll}. } } The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html}{server-side-encryption}: \itemize{ \item \code{KmsMasterKeyId} – Returns the ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms}{Key Terms}. \item \code{KmsDataKeyReusePeriodSeconds} – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. For more information, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work}{How Does the Data Key Reuse Period Work?}. \item \code{SqsManagedSseEnabled} – Returns information about whether the queue is using SSE-SQS encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html}{SSE-KMS} or \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html}{SSE-SQS}). } The following attributes apply only to \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html}{FIFO (first-in-first-out) queues}: \itemize{ \item \code{FifoQueue} – Returns information about whether the queue is FIFO. For more information, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html}{FIFO queue logic} in the \emph{Amazon SQS Developer Guide}. To determine whether a queue is \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html}{FIFO}, you can check whether \code{QueueName} ends with the \code{.fifo} suffix. \item \code{ContentBasedDeduplication} – Returns whether content-based deduplication is enabled for the queue. For more information, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html}{Exactly-once processing} in the \emph{Amazon SQS Developer Guide}. } The following attributes apply only to \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html}{high throughput for FIFO queues}: \itemize{ \item \code{DeduplicationScope} – Specifies whether message deduplication occurs at the message group or queue level. Valid values are \code{messageGroup} and \code{queue}. \item \code{FifoThroughputLimit} – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are \code{perQueue} and \code{perMessageGroupId}. The \code{perMessageGroupId} value is allowed only when the value for \code{DeduplicationScope} is \code{messageGroup}. } To enable high throughput for FIFO queues, do the following: \itemize{ \item Set \code{DeduplicationScope} to \code{messageGroup}. \item Set \code{FifoThroughputLimit} to \code{perMessageGroupId}. } If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html}{Quotas related to messages} in the \emph{Amazon SQS Developer Guide}.} } \description{ Gets attributes for the specified queue. See \url{https://www.paws-r-sdk.com/docs/sqs_get_queue_attributes/} for full documentation. } \keyword{internal}
6f7dbdd7bf33247877cd1a6e033bb7af9e750d77
706f4099360932a2c1d8e672e9369cf6bcfb8274
/trade.r
e98d310c3e6896bdfdc271048cd1b442bb80db2e
[]
no_license
belmount/tmpy
579794d047f5eaa97a48e16b8fa3dd89c57a3d7e
321726b725decd55e3337a38936a0d6718066b50
refs/heads/master
2021-05-04T11:09:19.355153
2019-03-26T10:27:42
2019-03-26T10:27:42
45,916,807
0
0
null
null
null
null
UTF-8
R
false
false
4,426
r
trade.r
library(quantmod) rm(list=ls()) data <- getSymbols('600660.SS', auto.assign = F) # 福耀玻璃 data2 <- getSymbols('002022.SZ', auto.assign = F) # 科华生物 data3 <- getSymbols('000001.SS', auto.assign = F) # 上证指数 data4 <- getSymbols('600754.SS', auto.assign = F) # 锦江股份 data5 <- getSymbols('000568.SZ', auto.assign = F) # 泸州老叫 # generate indicators need to trade gen.indicators <- function(data, short.p, long.p){ ma.s <- SMA(Ad(data), short.p) ma.l <- SMA(Ad(data), long.p) atr <- ATR(data, n = short.p) indicators <- cbind(ma.s = ma.s, ma.l=ma.l , atr= atr) # generate trade signatures indicators$sig <- 0 indicators$sig[Ad(data) > ma.s & ma.s > ma.l & rollmax(data, )] <-1 indicators <- na.omit(indicators) return(indicators) } gen.trade_range<- function(indicators) { entry_pts <- indicators[indicators$sig - Lag(indicators$sig)>0] exit_pts <- indicators[indicators$sig - Lag(indicators$sig)<0] trade.range <- rbind(entry_pts, exit_pts) return(trade.range) } stop.loss <- function(data, buy.price, stop.loss.pct) { sig <- data[Ad(data)/buy.price < stop.loss.pct] if (nrow(sig) == 0 || stop.loss.pct == 1) return(NA) else return(index(first(sig))) } trail.stop <- function(data, trails.threshold){ cummax <- cummax(Ad(data)) sig <- data[Ad(data) <cummax - trails.threshold ] if (nrow(sig) == 0 ) return(NA) else return(index(first(sig))) } performance.analytics<- function(data){ invest.return <- cumprod(na.fill(Ad(data)/Ad(lag(data)), 1)) max.gain <- max(invest.return) final.return <- last(invest.return) max.drowndown <- 1- min ( invest.return / cumsum(invest.return)) return(data.frame(max.gain = max.gain, max.dd = max.drowndown, final.return=final.return)) } short.p <- 50 long.p <- 150 #holding<- indicators$sig #holding$sig<- 0 #holding$price <- 0 stop.loss.pct <- 0.96 trail.factor <- 3 back_test <- function(data, trade.range, stop.loss.pct, trail.factor, indicators) { pa <- data.frame() retcurve <- xts(order.by=index(data)) retcurve$t <- 0 for(i in 1:nrow(trade.range)){ idx.day <- index(trade.range[i]) if (trade.range$sig[idx.day] == 0){ next } else { if(i + 1 > nrow(trade.range)){ end.day<- last(index(data)) } else { end.day <- index(trade.range[i+1]) } date.range<-paste(idx.day, end.day, sep='/') trade <- data[date.range] #print(date.range) buy.price <- as.numeric(Op(data)[[data[idx.day,which.i=T] + 1]]) factor <- as.numeric(first(Ad(trade)) / first(Cl(trade))) buy.price <- buy.price * factor trail.threshold <- as.numeric(trail.factor * factor * indicators$atr[idx.day]) stoploss.day <- stop.loss(trade, buy.price, stop.loss.pct) trail.stop.day <- trail.stop(trade, trail.threshold) exit.day <- as.Date(min(stoploss.day, end.day, trail.stop.day, na.rm=T)) #if (!is.na(stoploss.day) && (stoploss.day == exit.day)) print('stop loss triggered') #if (!is.na(trail.stop.day) && (trail.stop.day == exit.day)) print('trail stop triggered') date.range<-paste(idx.day, exit.day, sep='/') retcurve$t[date.range] <- Delt(Ad(trade)) #holding$sig[date.range] <- 1 #holding$price[date.range] <- Ad(data)[date.range] pa <- rbind(pa, performance.analytics(data[date.range])) } } return (pa) } disp.performance <- function (pa){ print(paste('trade count', nrow(pa))) print(paste('trade return avg.', mean(pa[,3]), 'trade final return', last(cumprod(pa[,3])))) print(summary(pa)) print("\n\n") } do.param.test <- function(data, short.p, long.p, stop.loss.pct, trail.factor) { indicators <- gen.indicators(data, short.p, long.p) trade.range <- gen.trade_range(indicators) back_test(data, trade.range, stop.loss.pct, trail.factor, indicators) } gen.paramgrid <- function(){ s <- c(15)#seq(15, 30, 5) l <- seq(3, 5) stopl <- seq(0.96, 1.0, 0.01) trail.f <- seq(0, 3) params <- expand.grid(s=s, l=l, stop.l=stopl, trail.f=trail.f) params$l <- params$l * params$s return(params) } params <- gen.paramgrid() for(id in 1:nrow(params)){ i <- params[id,] print(paste(i$s, i$l, i$stop.l, i$trail.f )) pa <- do.param.test(data4, i$s, i$l, i$stop.l, i$trail.f) disp.performance(pa) } pa <- do.param.test(data, 15, 60, 0.96, 3) pa$t <-na.fill(pa$t, 0) pa$s <- cumprod(pa$t+1) plot(pa$s)
e041154540ddbc7878e770ca56d81e3433a64d16
c3026ac4a49b0ff3361cf925a49965bf26ef6bf5
/R/ggirl.R
eef47771f0f366f9ad8f7bea75dd478222fb6631
[ "MIT" ]
permissive
keyegon/ggirl
d85e806c326a3688483949465cafd628ba60a5e6
7e042953dcacf6c569a3c31afe672a5b97456dbf
refs/heads/master
2023-07-03T01:17:04.654516
2021-08-10T12:37:23
2021-08-10T12:37:23
null
0
0
null
null
null
null
UTF-8
R
false
false
1,624
r
ggirl.R
#' Create an address object #' #' This function takes string inputs and converts them into an address object that can be used to send irl art (or as a return address). #' #' @param name The name for the address #' @param address_line_1 The first line of the address #' @param address_line_2 (Optional) A second address line, such as an apartment number. #' @param city the city #' @param state (Optional) The state to send to #' @param postal_code The postal code (ZIP code in the US) #' @param country The 2-character [ISO-1366 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country. Non-US shipping is experimental! #' #' @examples #' send_address <- address(name = "RStudio", address_line_1 = "250 Northern Ave", #' city = "Boston", state = "MA", postal_code = "02210", country = "US") #' #' @export address <- function(name, address_line_1, address_line_2 = NULL, city, state = NULL, postal_code, country){ address_set <- list(name = name, address_line_1 = address_line_1, address_line_2 = address_line_2, city = city, state = state, postal_code = postal_code, country = country) # Check country is valid if (!is.character(country) || nchar(country) != 2){ stop("Country must be a 2-character ISO-1366 code (https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes)") } structure(address_set, class="ggirl_address") }
e1d036ee3dd4df7d402d45b4c41cde42df1f8ea6
e86a5cc90c25ae4ecb1238ff31df11eba7120f4e
/MechaCarChallenge.RScript.R
73692ef9007fe0c5bbc7b1ee4feb47c147fbe278
[]
no_license
markeiabc/R_Analysis
6ece6900e551510e3f3da4f1dd7ce1dc360395a6
c2c7e156b1f1f7dcf3f2b0c99feba4ec744cc569
refs/heads/main
2022-12-31T21:19:06.027867
2020-10-25T01:37:35
2020-10-25T01:37:35
306,980,020
0
0
null
null
null
null
UTF-8
R
false
false
2,207
r
MechaCarChallenge.RScript.R
setwd("~/OneDrive/Data Analytics Bootcamp/R_Analysis") library(tidyverse) #Read CSV Files for Challenge MechaCar_table <- read.csv(file = 'MechaCar_mpg.csv',check.names = T,stringsAsFactors = F) Suspension_Coil_table <- read.csv(file='Suspension_Coil.csv',check.names = T,stringsAsFactors = F) View(MechaCar_table) #Predict mpg of MechaCar prototypes - use qualitative test for normality #Visualize distribution using density plot ggplot(MechaCar_table,aes(x=mpg)) + geom_density() #Predict mpg of MechaCar prototypes - use quantitative test for normality shapiro.test(MechaCar_table$mpg) #Normal distribution bell curve - mean and median should be close in value. #Generate multiple linear regression model lm(mpg ~ vehicle.length + vehicle.weight + spoiler.angle + ground.clearance + AWD,data=MechaCar_table) lm(mpg ~ vehicle.length + vehicle.weight + spoiler.angle + ground.clearance,data=MechaCar_table) summary(lm(mpg ~ vehicle.length + vehicle.weight + spoiler.angle + ground.clearance,data=MechaCar_table)) summary(lm(mpg ~ vehicle.length + vehicle.weight + spoiler.angle + ground.clearance + AWD,data=MechaCar_table)) #Confirmed that intercept, vehicle length, and ground clearance have significant impact on mpg #Create summary statistics table for the suspension coil's pounds-per-inch continuous variable View(Suspension_Coil_table) summary(Suspension_Coil_table) #Reference-https://stackoverflow.com/questions/23163863/mean-of-a-column-in-a-data-frame-given-the-columns-name mean(Suspension_Coil_table$PSI) median(Suspension_Coil_table$PSI) var(Suspension_Coil_table$PSI) sd(Suspension_Coil_table$PSI) #Create a dataframe with the summary statistics (https://www.dummies.com/programming/r/how-to-create-a-data-frame-from-scratch-in-r/) table_columns <- c("Mean", "Median", "Variance", "Standard_Deviation") values <- c(1499.531, 1499.747, 76.23459, 8.731242) summary_statistics_table <- data.frame(table_columns, values) View(summary_statistics_table) #Change column name of the statistics table colnames(summary_statistics_table)[which(names(summary_statistics_table) == "table_columns")] <- "stat_name" #Run t-test for Suspension_Coil t.test(Suspension_Coil_table$PSI,mu=1500)
b1fa86c30244462756d7228657c0496d8551a42c
f09c5a76157b7f608dcd00cd7a33e57cff8e5d41
/brook90Run.r
ce8bb49bdb9dd0aa4b6baf673d4b56c503563994
[]
no_license
GeorgKindermann/Brook90_R
09347146f366388e3bb7c848ef8563d140ed3f75
fd6f8760516f891879faa281aff157e88d2ae488
refs/heads/master
2020-08-03T22:12:27.825987
2019-10-01T14:24:57
2019-10-01T14:24:57
211,901,507
0
0
null
2019-09-30T16:12:28
2019-09-30T16:12:28
null
UTF-8
R
false
false
8,621
r
brook90Run.r
if((runflag == 0) || (runflag == 1)){ DAYMO[1] = 31 DAYMO[2] = 28 DAYMO[3] = 31 DAYMO[4] = 30 DAYMO[5] = 31 DAYMO[6] = 30 DAYMO[7] = 31 DAYMO[8] = 31 DAYMO[9] = 30 DAYMO[10] = 31 DAYMO[11] = 30 DAYMO[12] = 31 IDAY =1 IInterValDay=1 NDAYS=length(MData[[1]]) NITSR = 0 NITSY = 0 NITSM = 0 YEARN = as.numeric(MData[[1]][IDAY]) daymax=NDAYS-IDAY+1 maxF=0 timeseries_prec=rep(0,daymax) timeseries_evp=rep(0,daymax) timeseries_flow=rep(0,daymax) timeseries_rnet=rep(0,daymax) timeseries_ptran=rep(0,daymax) timeseries_irvp=rep(0,daymax) timeseries_isvp=rep(0,daymax) timeseries_snow=rep(0,daymax) timeseries_swat=rep(0,daymax) timeseries_pint=rep(0,daymax) timeseries_snvp=rep(0,daymax) timeseries_slvp=rep(0,daymax) timeseries_trand=rep(0,daymax) timeseries_mesfld=rep(0,daymax) timeseries_smltd=rep(0,daymax) timeseries_slfld=rep(0,daymax) timeseries_rfald=rep(0,daymax) timeseries_sfald=rep(0,daymax) timeseries_awat=rep(0,daymax) timeseries_adef=rep(0,daymax) timeseries_sintd=rep(0,daymax) timeseries_rintd=rep(0,daymax) timeseries_rthrd=rep(0,daymax) timeseries_sthrd=rep(0,daymax) timeseries_rsnod=rep(0,daymax) if( YEARN < 100){ if(YEARN > 20){ YEARN = YEARN + 1900 }else{ YEARN = YEARN + 2000 } } MONTHN = as.numeric(MData[[2]][IDAY]) DOM = as.numeric(MData[[3]][IDAY]) DOY = DOY=DOYF(DOM,MONTHN,DAYMO) if (fnleap()) { DAYMO[2] = 29 }else{ DAYMO[2] = 28 } if (SUBDAYDATA) { DTP = DT / NPINT }else{ DTP = DT } # zero accumulators zyear() zmonth() # initial values SNOW = SNOWIN GWAT = GWATIN INTR = INTRIN INTS = INTSIN for( i in 1:NLAYER){ PSIM[i] = PSIMIN[i] } # soil water parameters and initial variables soilp<-SOILPAR() PSIG<-unlist(soilp[2]) SWATMX<-unlist(soilp[3]) WETF<-unlist(soilp[4]) WETC<-unlist(soilp[5]) CHM<-unlist(soilp[6]) CHN<-unlist(soilp[7]) WETNES<-unlist(soilp[8]) SWATI<-unlist(soilp[9]) KSAT<-unlist(soilp[10]) # ^^ # initial soil water variables soil<-SOILVAR() PSITI<-soil[1:ML] THETA<-soil[(ML+1):(2*ML)] KK<-soil[(2*ML+1):(3*ML)] SWAT<-soil[(3*ML+1)] # ^^ # initial total water in system STORD = INTR + INTS + SNOW + SWAT + GWAT STORM = STORD STORY = STORD # any initial snow has zero liquid water and cold content CC = 0 SNOWLQ = 0 } ## ----chunkpara----------------------------------------------------------- # parameter conversions GLMAX = GLMAXC / 100 GLMIN = GLMINC / 100 LAT = LATD / 57.296 ESLOPE = ESLOPED / 57.296 DSLOPE = DSLOPED / 57.296 ASPECT = ASPECTD / 57.296 # equivalent slope for radiation calculations equi<-EQUIVSLP(LAT, ESLOPE, ASPECT) L1<-unlist(equi[1]) L2<-unlist(equi[2]) # ^^ # infiltration parameters infpa<-INFPAR(INFEXP, IDEPTH, NLAYER, THICK) ILAYER<-unlist(infpa[1]) INFRAC<-unlist(infpa[2]) # ^^ # source area parameters srfp<-SRFPAR(QDEPTH, NLAYER, THETAF, THICK, STONEF, SWATMX) QLAYER<-unlist(srfp[1]) SWATQX<-unlist(srfp[2]) SWATQF<-unlist(srfp[3]) # ^^ # root density parameters RELDEN<-RTDEN(ROOTDEN, NLAYER, THICK) ## ----chunkmodel---------------------------------------------------------- while( IDAY <= NDAYS){ NITSD = 0 subdatafileline(IDAY) if( IDAY == INIDAYS + 1){ # end of initialization, reinitialize year and month accumulators STORD = INTR + INTS + SNOW + SWAT + GWAT STORM = STORD STORY = STORD NITSY = 0 NITSM = 0 zyear() zmonth() } # calculate derived variables MSBSETVARS() # #* * * * * B E G I N D A Y - N I G H T E T L O O P * * * * * * * * * #potential and actual interception, evaporation, and transpiration MSBDAYNIGHT() # #* * * * * * * * E N D D A Y - N I G H T L O O P * * * * * * * * * * # average rates over day PTRAN = (PTR[1] * DAYLEN + PTR[2] * (1 - DAYLEN)) / DT GEVP = (GER[1] * DAYLEN + GER[2] * (1 - DAYLEN)) / DT PINT = (PIR[1] * DAYLEN + PIR[2] * (1 - DAYLEN)) / DT GIVP = (GIR[1] * DAYLEN + GIR[2] * (1 - DAYLEN)) / DT for(i in 1:NLAYER){ TRANI[i] = (ATRI[1, i] * DAYLEN + ATRI[2, i] * (1 - DAYLEN)) / DT } # zero daily integrators zday() # #* * * * * * * * B E G I N P R E C I P I N T E R V A L * * * * * * * * * for( N in 1:NPINT){ if (SUBDAYDATA){ subprfileline(IInterValDay) if (MESFLP <= -0.01) {MESFLP = MESFL / DT} }else{ # precip data from data file PREINT = PRECIN / DT MESFLP = MESFL / DT } # interception and snow accumulation/melt MSBPREINT() # initialize for iterations # initial time remaining in iteration time step = precip time step DTRI = DTP # initialize iteration counter NITS = 0 # zero precip interval integrators zpint() # # * * * * * * B E G I N I T E R A T I O N * * * * * * * * while(!(DTRI <= 0)){ NITS = NITS + 1 # check for events if (NITS %% 100 == 0) {} # water movement through soil MSBITERATE() # iteration calculations # calculate SLFLI vertical macropore infiltration out of layer SLFLI[1] = SLFL - INFLI[1] - BYFLI[1] if (ILAYER >= 2){ if (NLAYER >= ILAYER +1){ for (i in 2:ILAYER){ # does not execute if ILAYER% = 1 or 0 SLFLI[i] = SLFLI[i - 1] - INFLI[i] - BYFLI[i] } for( i in (ILAYER + 1):NLAYER){ # does not execute if NLAYER% < ILAYER% + 1 SLFLI[i] = 0 } } } # integrate below ground storages over iteration interval for( i in 1:NLAYER){ SWATI[i] = SWATI[i] + NTFLI[i] * DTI } GWAT = GWAT + (VRFLI[NLAYER] - GWFL - SEEP) * DTI # new soil water variables and test for errors for (i in 1:NLAYER){ swchek(i) WETNES[i] = SWATI[i] / SWATMX[i] PSIM[i] = FPSIMF(WETNES[i], PSIF[i], BEXP[i], WETINF[i], WETF[i], CHM[i], CHN[i]) } soil<-SOILVAR() PSITI<-soil[1:ML] THETA<-soil[(ML+1):(2*ML)] KK<-soil[(2*ML+1):(3*ML)] SWAT<-soil[(3*ML+1)] # ^^ # iteration output # flows accumulated over precip interval paccum() # time remaining in precipitation time-step DTRI = DTRI - DTI NITSR = NITSR + 1 # for visible display of iterations } # # * * * * E N D i T E R A T I O N L O O P * * * * * * * * # display iterations # integrate interception storages over precip interval INTS = INTS + (SINT - ISVP) * DTP INTR = INTR + (RINT - IRVP) * DTP # flows for precip interval summed from components psum() # precipitation interval output # flows accumulated over day daccum() # accumulate iterations NITSD = NITSD + NITS NITSM = NITSM + NITS NITSY = NITSY + NITS IInterValDay<-IInterValDay+1 } # #* * * * * E N D P R E C I P I N T E R V A L L O O P * * * * * * * * # flows for day summed from components dsum() # check for water balance error BALERD = STORD - (INTR + INTS + SNOW + SWAT + GWAT) + PRECD - EVAPD - FLOWD - SEEPD STORD = INTR + INTS + SNOW + SWAT + GWAT # flows accumulated over month maccum() # date checking on if(DOM == DAYMO[MONTHN]){ # set up for next month zmonth() MONTHN = MONTHN + 1 DOM = 0 NITSM = 0 } # for end of month if (MONTHN == 13) { # end of year # set up for next year MONTHN = 1 DOM = 1 DOY = 1 YEARN = YEARN + 1 zyear() if (fnleap() ){ DAYMO[2] = 29 }else{ DAYMO[2] = 28 } NITSY = 0 NITSM = 0 } #set up for next day IDAY = IDAY + 1 MONTHN = as.numeric(MData[[2]][IDAY]) DOM = as.numeric(MData[[3]][IDAY]) YEARN = as.numeric(MData[[1]][IDAY]) if(IDAY <= NDAYS) DOY=DOYF(DOM,MONTHN,DAYMO) #* * * I N P U T W E A T H E R L I N E F R O M D F I L E * * * #subdatafileline() # # *************** E N D D A Y L O O P ************************** timeseries_prec[daymax-NDAYS+IDAY-1]<-PRECD timeseries_evp[daymax-NDAYS+IDAY-1]<-EVAPD timeseries_flow[daymax-NDAYS+IDAY-1]<-FLOWD timeseries_rnet[daymax-NDAYS+IDAY-1]<-RNET timeseries_irvp[daymax-NDAYS+IDAY-1]<-IRVPD timeseries_isvp[daymax-NDAYS+IDAY-1]<-ISVPD timeseries_ptran[daymax-NDAYS+IDAY-1]<-PTRAND timeseries_snow[daymax-NDAYS+IDAY-1]<-SNOW timeseries_swat[daymax-NDAYS+IDAY-1]<-SWAT timeseries_pint[daymax-NDAYS+IDAY-1]<-PINTD timeseries_snvp[daymax-NDAYS+IDAY-1]<-SNVPD timeseries_slvp[daymax-NDAYS+IDAY-1]<-SLVPD timeseries_trand[daymax-NDAYS+IDAY-1]<-TRAND timeseries_mesfld[daymax-NDAYS+IDAY-1]<-MESFLD timeseries_smltd[daymax-NDAYS+IDAY-1]<-SMLTD timeseries_slfld[daymax-NDAYS+IDAY-1]<-SLFLD timeseries_rfald[daymax-NDAYS+IDAY-1]<-RFALD timeseries_awat[daymax-NDAYS+IDAY-1]<-AWAT timeseries_adef[daymax-NDAYS+IDAY-1]<-ADEF timeseries_sintd[daymax-NDAYS+IDAY-1]<-SINTD timeseries_rintd[daymax-NDAYS+IDAY-1]<-RINTD timeseries_sfald[daymax-NDAYS+IDAY-1]<-SFALD timeseries_rthrd[daymax-NDAYS+IDAY-1]<-RTHRD timeseries_sthrd[daymax-NDAYS+IDAY-1]<-STHRD timeseries_rsnod[daymax-NDAYS+IDAY-1]<-RSNOD }
06a5d8cf9e6022740c6f539b1dd5042b3e897030
36ed93e0ab7767d73262bd38374d97e549f0b5f1
/inst/doc/DMRP_Paper.R
771ccbc21ed48b18d2244fa1c1c02483c9e386c3
[]
no_license
cran/HMP
16678dbeb20fdda6fcf5422a7047c3c89f4af0ce
30dadecea268319438aeb41a23441535624d247c
refs/heads/master
2021-01-21T01:53:01.212756
2019-08-31T10:00:06
2019-08-31T10:00:06
17,679,740
1
0
null
null
null
null
UTF-8
R
false
false
1,904
r
DMRP_Paper.R
### R code from vignette source 'DMRP_Paper.Rnw' ################################################### ### code chunk number 1: initializing ################################################### library(HMP) data(dmrp_data) data(dmrp_covars) ################################################### ### code chunk number 2: figure1 (eval = FALSE) ################################################### ## ## # Set splitting parameters for DM-Rpart (see ??DM-Rpart for details) ## minBucket <- 6 ## minSplit <- 18 ## ## # Set the number of cross validations ## # 20 means the model will run 20 times, each time holding 5% of the data out ## numCV <- 20 ## ## # Run the DM-RPart function with a seed set ## set.seed(2019) ## DMRPResults <- DM.Rpart.CV(dmrp_data, dmrp_covars, plot=FALSE, minsplit=minSplit, ## minbucket=minBucket, numCV=numCV) ## ## # Pull out and plot the best tree ## bestTree <- DMRPResults$bestTree ## rpart.plot::rpart.plot(bestTree, type=2, extra=101, box.palette=NA, branch.lty=3, ## shadow.col="gray", nn=FALSE) ## ################################################### ### code chunk number 3: figure2 (eval = FALSE) ################################################### ## ## # Split the data by terminal nodes ## nodeNums <- bestTree$frame$yval[bestTree$frame$var == "<leaf>"] ## nodeList <- split(dmrp_data, f=bestTree$where) ## names(nodeList) <- paste("Node", nodeNums) ## ## # Get the PI for each terminal node ## myEst <- Est.PI(nodeList) ## myPI <- myEst$MLE$params ## ## # Plot the PI for each terminal node ## myColr <- rainbow(ncol(dmrp_data)) ## lattice::barchart(PI ~ Group, data=myPI, groups=Taxa, stack=TRUE, col=myColr, ## ylab="Fractional Abundance", xlab="Terminal Node", ## auto.key=list(space="top", columns=3, cex=.65, rectangles=FALSE, ## col=myColr, title="", cex.title=1)) ##
adc56c453ab023619fce3fd91430a4e53fd6f142
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/agridat/examples/stirret.borers.Rd.R
31b377d6a596b3e7a09691aa8f480eb561f4c9e6
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
633
r
stirret.borers.Rd.R
library(agridat) ### Name: stirret.borers ### Title: Corn borer control by application of fungal spores. ### Aliases: stirret.borers ### Keywords: datasets ### ** Examples data(stirret.borers) dat <- stirret.borers require(lattice) xyplot(count2~count1|trt,dat, main="stirret.borers - by treatment", xlab="Early count of borers", ylab="Late count") # Even though the data are counts, Normal distribution seems okay # qqmath(~count1|trt, dat, main="stirret.borers") m1 <- lm(count1 ~ trt + block, dat) anova(m1) if(require(effects)){ e1 <- effect('trt',m1) as.data.frame(e1) plot(e1, main="stirret.borer") }
983c27674c953239015043424ec16565e62daf17
0423e43b4580047768bfc72a1f32f39ed5f25d9a
/R/checkexperiment.R
865d678894fc23667ea70cebeced235266baa8a1
[]
no_license
cran/drfit
ed43c44d5b42fa298710741443e2abed1cc367da
c00ca373d91c6a62a3d1235424e1714ea8babff5
refs/heads/master
2021-01-20T21:00:18.313000
2018-10-11T15:00:07
2018-10-11T15:00:07
17,695,636
0
0
null
null
null
null
UTF-8
R
false
false
6,757
r
checkexperiment.R
utils::globalVariables(c("type", "conc", "substance")) checkexperiment <- function(id, db = c("ecotox", "cytotox", "enzymes"), endpoint = "%") { db = match.arg(db) databases <- data.frame( responsename = c("viability", "activity", "raw_response"), testtype = c("celltype", "enzyme", "organism"), exptype = c("plate", "plate", "experiment"), row.names = c("cytotox", "enzymes", "ecotox"), stringsAsFactors = FALSE) con <- dbConnect(odbc(), "cytotox", database = db) responsename <- databases[db, 1] testtype <- databases[db, 2] exptype <- databases[db, 3] exptable <- paste(exptype, "s", sep = "") commentquery <- paste0("SELECT comment FROM ", exptable, " ", "WHERE ", exptype, " = ", id) commentdata <- dbGetQuery(con, commentquery) comment <- as.character(commentdata[[1]]) expquery <- paste0("SELECT ", "experimentator, substance, ", testtype, ", conc, unit, ", responsename, ", ", if (db == "ecotox") "type, raw_0, duration, ", "performed, ok ", "FROM ", db, " ", "WHERE ", exptype, "=", id) if (db == "ecotox") { expquery <- paste0(expquery, " AND type LIKE '", endpoint, "'") } expdata <- dbGetQuery(con, expquery) if (db %in% c("cytotox", "enzymes")) { controlquery <- paste0("SELECT type, response FROM controls ", " WHERE plate=", id) controldata <- dbGetQuery(con, controlquery) } op <- par(ask=TRUE) on.exit(par(op)) if (db %in% c("cytotox","enzymes")) { blinds <- subset(controldata, type == "blind") controls <- subset(controldata, type == "control") QA <- matrix(nrow = 2, ncol = 4, dimnames = list(c("Blind", "Control (conc = 0)"), c("Number", "Mean", "Std. Dev.", "% Std. Dev"))) QA[1, 1] <- length(blinds$response) QA[1, 2] <- signif(mean(blinds$response), 2) QA[1, 3] <- signif(sd(blinds$response), 2) QA[1, 4] <- signif(QA[1, 3] * 100 / QA[1, 2], 2) } else { # Use raw response for ecotox expdata$response <- expdata$raw_response if (nlevels(expdata$type) > 1) { message("There are data for more than one type of raw response in your data.\n", "The types are ", paste(levels(expdata$type), collapse = " and "), ".\n", "You should choose one of these types using 'endpoint = \"$type\"'", "in your call to checkexperiment\n", "For now, we are continuing with the data for ", levels(expdata$type)[1]) } endpoint <- expdata$type[1] expdata <- subset(expdata, type == endpoint) controls <- subset(expdata, conc == 0) expdata <- subset(expdata, conc != 0) QA <- matrix(nrow = 1, ncol = 4, dimnames = list(c("Control (conc = 0)"), c("Number", "Mean", "Std. Dev.", "% Std. Dev"))) } numberOfControls <- length(controls$response) QA["Control (conc = 0)", 1] <- numberOfControls if (numberOfControls > 0) { QA["Control (conc = 0)", 2] <- signif(mean(controls$response),2) QA["Control (conc = 0)", 3] <- signif(sd(controls$response),2) QA["Control (conc = 0)", 4] <- signif(QA["Control (conc = 0)", 3] * 100 / QA["Control (conc = 0)", 2],2) } if (db == "ecotox") { if (identical(as.character(levels(expdata$organism)), "Vibrio fischeri")) { positive <- subset(expdata, substance == "Na Cl") if (nrow(positive) > 0) { QA <- rbind(QA, c(nrow(positive), signif(mean(positive$raw_response), 2), signif(sd(positive$raw_response), 2), signif(100 * sd(positive$raw_response) / mean(positive$raw_response), 2))) rownames(QA) <- c("Control (conc = 0)", "Positive control (Na Cl)") } expdata <- subset(expdata, substance != "Na Cl", drop = TRUE) } } if (length(expdata$experimentator) < 1) { stop("There is no response data for ", exptype, " ", id, " in database ", db, "\n") } exptypestring <- paste0(toupper(substring(exptype, 1, 1)), substring(exptype, 2)) expdata$experimentator <- factor(expdata$experimentator) expdata$type <- factor(expdata[[testtype]]) expdata$performed <- factor(as.character(expdata$performed)) expdata$substance <- factor(expdata$substance) expdata$unit <- factor(expdata$unit) expdata$ok <- factor(expdata$ok) # Info on the experiment cat("\n", exptypestring, id, "from database", db, ":\n\n", "\tExperimentator(s):\t",levels(expdata$experimentator),"\n", "\tType(s):\t\t",levels(expdata$type),"\n", "\tPerformed on:\t\t",levels(expdata$performed),"\n", "\tSubstance(s):\t\t",levels(expdata$substance),"\n", "\tConcentration unit(s):\t",levels(expdata$unit),"\n", "\tComment:\t\t",comment,"\n", "\tOK Levels:\t\t",levels(expdata$ok),"\n\n") print(QA) # Control growth rate for Lemna and algae if (endpoint %in% c("cell count", "frond area", "frond number")) { duration <- as.numeric(unique(expdata$duration)) # in hours if (length(duration) > 1) stop("More than one duration in the data") response_0 <- unique(expdata$raw_0) if (length(response_0) > 1) stop("More than one mean response at time 0 in the data") t_days <- duration / 24 control_growth_rates <- (log(controls$response) - log(response_0)) / t_days cat("\nMean growth rate in controls:\t", round(mean(control_growth_rates), 3), "per day\n") } # Box plot of control data if (db == "ecotox") { boxplot(controls$response, names="controls", ylab=endpoint, ylim=range(controls$response, na.rm = TRUE), boxwex=0.4, main=paste("Plate ",id)) } else { boxplot(blinds$response,controls$response, names=c("blinds","controls"), ylab="Response", boxwex=0.4, main=paste("Plate ",id)) } # Plot of dose response data drdata <- expdata[c(2,4,6)] drdata$substance <- factor(drdata$substance) substances <- levels(drdata$substance) lld <- log10(min(subset(drdata,conc!=0)$conc)) lhd <- log10(max(drdata$conc)) ylab <- if (db == "ecotox") endpoint else responsename plot(1,type="n", xlim = c(lld - 0.5, lhd + 2), ylim = range(expdata[responsename], na.rm = TRUE), xlab = paste("decadic logarithm of the concentration in ",levels(expdata$unit)), ylab = ylab) drdatalist <- split(drdata,drdata$substance) for (i in 1:length(drdatalist)) { points(log10(drdatalist[[i]]$conc),drdatalist[[i]][[responsename]],col=i); } legend("topright",substances, pch=1, col=1:length(substances), inset=0.05) title(main=paste(levels(expdata$experimentator), " - ",levels(expdata$type))) }
6536bf639a9721b156eb9162587c4f1c724f5d65
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/ISOpureR/man/ISOpure.util.matlab_less_than.Rd
02a07f2b32182c2e65d27aad9d9aadf0180e28a2
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
false
833
rd
ISOpure.util.matlab_less_than.Rd
\name{ISOpure.util.matlab_less_than} \alias{ISOpure.util.matlab_less_than} \title{Less than operator} \description{Less than function that matches Matlab behaviour when one of the arguments is NA (i.e. returns FALSE instead of NA)} \usage{ ISOpure.util.matlab_less_than(a, b) } \arguments{ \item{a}{A numeric value (including Inf) or NA} \item{b}{A numeric value (including Inf) or NA} } \value{Logical: TRUE if a < b, FALSE if a >= b OR if one of a, b is NA or NaN} \author{Catalina Anghel} \examples{ ISOpure.util.matlab_less_than(5,3) #[1] FALSE ISOpure.util.matlab_less_than(3,5) #[1] TRUE ISOpure.util.matlab_less_than(5,NA) #[1] FALSE ISOpure.util.matlab_less_than(NA,5) #[1] FALSE ISOpure.util.matlab_less_than(5,Inf) #[1] TRUE ISOpure.util.matlab_less_than(Inf,5) #[1] FALSE } \keyword{arith} \keyword{NA} \keyword{logic}
d17aabd45638d2417cddbd2696f30f3970af61c3
0b61fdadaaafb28829e1d7eccc07972f53f3aa3d
/man/plot_elevation_gradient.Rd
93db9f0c1ff9d6603ace2a6e3d89d05818cc6d4a
[]
no_license
mhhsanim/LandClimTools
1e165ae435d4e4e043866bfc054b9fa8d651464d
6c1a3c782990e2b4ebda51b0d37396f8868b2fe6
refs/heads/master
2020-12-11T09:04:07.487287
2016-04-26T19:05:51
2016-04-26T19:05:51
null
0
0
null
null
null
null
UTF-8
R
false
false
1,944
rd
plot_elevation_gradient.Rd
\name{plot_elevation_gradient} \alias{plot_elevation_gradient} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ Plot elevation gradient } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ Create standart figure for elevation gradient for selected decade based on elevation aggregated LandClim output file. } \usage{ plot_elevation_gradient(elevationBiomassOut, species, selection = 10, lty = 1, cols = rainbow(length(species)), plotlegend = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{elevationBiomassOut}{ %% ~~Describe \code{elevationBiomassOut} here~~ } \item{species}{ %% ~~Describe \code{species} here~~ } \item{selection}{ %% ~~Describe \code{selection} here~~ } \item{lty}{ %% ~~Describe \code{lty} here~~ } \item{cols}{ %% ~~Describe \code{cols} here~~ } \item{plotlegend}{ %% ~~Describe \code{plotlegend} here~~ } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{plot_forest}} } \examples{ dat <- read.table(system.file("elevation_biomass_out.csv", package = "landclimtools"), sep=",", dec=".", header=T) species <- c("abiealba" , "piceabie", "fagusylv", "pinusilv", "querpetr") plot_elevation_gradient(elevationBiomassOut=dat, species=species, selection=30, lty=1, cols= rainbow(length(species))) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
346d47812db02daea60d5738bc342f146430690d
158af6c29d41ba5f5b38cd19133de5d73d9065c2
/plot4.R
469c7021d2e26c093d9f9c928328bfab6855c4d9
[]
no_license
Nicolas-Perrin/ExData_Plotting1
5c229f5b22932265ea71fa9374ed99fc98ef0e4b
40c8660bbdd42fda7224fd44ff5e962552b16028
refs/heads/master
2020-03-13T17:50:13.436798
2018-04-27T00:35:57
2018-04-27T00:35:57
131,224,822
0
0
null
2018-04-27T00:33:04
2018-04-27T00:33:03
null
UTF-8
R
false
false
2,088
r
plot4.R
#LOADING OF DATA AND FORMATING #------------------------------- # define column names of the data set header <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") #load the data set power_consumption <- read.table(file = "household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?", col.names = header) #merge column Date and Times into an array called full_date full_dates <- paste(power_consumption$Date,power_consumption$Time) #converte the merged dates and time to a time object in R (easier to manipulate) dates_rformart <- strptime(full_dates, format = "%d/%m/%Y %H:%M:%S") #add the column of converted time object into the dataset power_consumption$fulldate <- dates_rformart #exctracting the subset of interest for the plot (i.e. data from 01/02/2007 to 02/02/2007) datemin = strptime("01/02/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") datemax = strptime("03/02/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") power_extract = subset(power_consumption, fulldate >= datemin & fulldate < datemax) # CREATION OF PLOT 4 #--------------------- png(filename = "plot4.png") #open a png file par(mfcol = c(2,2)) #divides scrren in 2 x 2 for 4 plots #subplot 1 with(power_extract, plot(fulldate,Global_active_power, type = "l", ylab = "Global Active Power", xlab = "")) #subplot 2 with(power_extract, plot(fulldate,Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = "")) with(power_extract, { lines(fulldate, Sub_metering_1, col = "black") lines(fulldate, Sub_metering_2, col = "red") lines(fulldate, Sub_metering_3, col = "blue") }) legend("topright", col = c("black","red","blue"), bty = "n", lty = "solid", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) #subplot 3 with(power_extract, plot(fulldate,Voltage, type = "l", ylab = "Voltage", xlab = "datetime")) #subplot 4 with(power_extract, plot(fulldate,Global_reactive_power, type = "l", xlab = "datetime")) dev.off() #close file
44b078ceb9152e85de0b7a6ecdecf4b2aa97c3aa
ef1d6fa0df37fa552c4c4625e6e9cb974e8482f0
/R/ovcYoshihara.R
354ebcb538cd0e8131a19b1b427afed5d780c40f
[]
no_license
bhklab/genefu
301dd37ef91867de8a759982eb9046d3057723af
08aec9994d5ccb46383bedff0cbfde04267d9c9a
refs/heads/master
2022-11-28T09:22:02.713737
2022-05-30T15:35:53
2022-05-30T15:35:53
1,321,876
17
15
null
2022-11-07T11:52:05
2011-02-02T21:06:25
R
UTF-8
R
false
false
4,887
r
ovcYoshihara.R
#' @title Function to compute the subtype scores and risk classifications for #' the prognostic signature published by Yoshihara et al. #' #' @description #' This function computes subtype scores and risk classifications from gene #' expression values following the algorithm developed by Yoshihara et al, #' for prognosis in ovarian cancer. #' #' @usage #' ovcYoshihara(data, annot, hgs, #' gmap = c("entrezgene", "ensembl_gene_id", "hgnc_symbol", "unigene", "refseq_mrna"), #' do.mapping = FALSE, verbose = FALSE) #' #' @param data Matrix of gene expressions with samples in rows and probes in #' columns, dimnames being properly defined. #' @param annot Matrix of annotations with one column named as gmap, dimnames #' being properly defined. #' @param hgs vector of booleans with TRUE represents the ovarian cancer #' patients who have a high grade, late stage, serous tumor, FALSE otherwise. #' This is particularly important for properly rescaling the data. If hgs is #' missing, all the patients will be used to rescale the subtype score. #' @param gmap character string containing the biomaRt attribute to use for #' mapping if do.mapping=TRUE #' @param do.mapping TRUE if the mapping through Entrez Gene ids must be #' performed (in case of ambiguities, the most variant probe is kept for #' each gene), FALSE otherwise. #' @param verbose TRUE to print informative messages, FALSE otherwise. #' #' @return #' A list with items: #' - score: Continuous signature scores. #' - risk: Binary risk classification, 1 being high risk and 0 being low risk. #' - mapping: Mapping used if necessary. #' - probe: If mapping is performed, this matrix contains the correspondence #' between the gene list (aka signature) and gene expression data. #' #' @references #' Yoshihara K, Tajima A, Yahata T, Kodama S, Fujiwara H, Suzuki M, Onishi Y, #' Hatae M, Sueyoshi K, Fujiwara H, Kudo, Yoshiki, Kotera K, Masuzaki H, #' Tashiro H, Katabuchi H, Inoue I, Tanaka K (2010) "Gene expression profile #' for predicting survival in advanced-stage serous ovarian cancer across two #' independent datasets", PloS one, 5(3):e9615. #' #' @seealso #' [genefu::sigOvcYoshihara] #' #' @examples #' # load the ovcYoshihara signature #' data(sigOvcYoshihara) #' # load NKI dataset #' data(nkis) #' colnames(annot.nkis)[is.element(colnames(annot.nkis), "EntrezGene.ID")] <- "entrezgene" #' # compute relapse score #' ovcYoshihara.nkis <- ovcYoshihara(data=data.nkis, #' annot=annot.nkis, gmap="entrezgene", do.mapping=TRUE) #' table(ovcYoshihara.nkis$risk) #' #' @md #' @export ovcYoshihara <- function(data, annot, hgs, gmap=c("entrezgene", "ensembl_gene_id", "hgnc_symbol", "unigene", "refseq_mrna"), do.mapping=FALSE, verbose=FALSE) { if (!exists('sigOvcYoshihara')) data(sigOvcYoshihara, envir=environment()) gmap <- match.arg(gmap) if(missing(hgs)) { hgs <- rep(TRUE, nrow(data)) } if(do.mapping) { if(!is.element(gmap, colnames(annot))) { stop("gmap is not a column of annot!") } if(verbose) { message("the most variant probe is selected for each gene") } sigt <- sigOvcYoshihara[order(abs(sigOvcYoshihara[ ,"weight"]), decreasing=FALSE), ,drop=FALSE] sigt <- sigt[!duplicated(sigt[ ,gmap]), ,drop=FALSE] gid2 <- sigt[ ,gmap] names(gid2) <- rownames(sigt) gid1 <- annot[ ,gmap] names(gid1) <- colnames(data) rr <- geneid.map(geneid1=gid1, data1=data, geneid2=gid2) data <- rr$data1 annot <- annot[colnames(data), ,drop=FALSE] sigt <- sigt[names(rr$geneid2), ,drop=FALSE] pold <- colnames(data) pold2 <- rownames(sigt) colnames(data) <- rownames(annot) <- rownames(sigt) <- paste("geneid", annot[ ,gmap], sep=".") mymapping <- c("mapped"=nrow(sigt), "total"=nrow(sigOvcYoshihara)) myprobe <- data.frame("probe"=pold, "gene.map"=annot[ ,gmap], "new.probe"=pold2) } else { gix <- intersect(rownames(sigOvcYoshihara), colnames(data)) if(length(gix) < 2) { stop("data do not contain enough gene from the ovcTCGA signature!") } data <- data[ ,gix,drop=FALSE] annot <- annot[gix, ,drop=FALSE] mymapping <- c("mapped"=length(gix), "total"=nrow(sigOvcYoshihara)) myprobe <- data.frame("probe"=gix, "gene.map"=annot[ ,gmap], "new.probe"=gix) sigt <- sigOvcYoshihara[gix, ,drop=FALSE] } ## transform the gene expression in Z-scores data <- scale(data) pscore <- genefu::sig.score(x=data.frame("probe"=colnames(data), "EntrezGene.ID"=annot[ ,gmap], "coefficient"=sigt[ ,"weight"]), data=data, annot=annot, do.mapping=FALSE, signed=FALSE)$score prisk <- as.numeric(pscore > median(pscore, na.rm=TRUE)) names(prisk) <- names(pscore) <- rownames(data) return (list("score"=pscore, "risk"=prisk, "mapping"=mymapping, "probe"=myprobe)) }
fbc8b88c192357f66cf7bd800cf106de53de6326
00b9933806fdd54e30ff8700a49503c07da7e977
/Scripts/analysis.R
7fe5156351d7985b75175afd1722682a0376cb5d
[]
no_license
tommymtang/Predicting-the-Hugo-Awards
a428a241abcaad941210d94d019e7c77515e3917
787a7c23cf5e616a58d0790116260afaa8e224f0
refs/heads/master
2021-08-28T11:15:03.038089
2017-12-12T03:12:43
2017-12-12T03:12:43
113,901,085
3
1
null
null
null
null
UTF-8
R
false
false
2,368
r
analysis.R
# Library, and setting the seed library(randomForest) set.seed(100) #attach probabilities[,2] vector to test before using predictWinners predictWinners <- function(data) { # data assumed to be from roughImplementation year <- data$Year[1] winners <- logical() yearDat <- numeric() for (i in 1:dim(data)[1]) { if (i == (dim(data)[1])) { yearDat <- c(yearDat, data$probabilities[i]) maxProb <- max(yearDat) yearWinners <- (yearDat == maxProb) winners <- c(winners, yearWinners) } else if (data$Year[i] == year) { yearDat <- c(yearDat, data$probabilities[i]) } else { maxProb <- max(yearDat) yearWinners <- (yearDat == maxProb) winners <- c(winners, yearWinners) # Now clear the data year = year + 1 yearDat <- numeric() yearDat <- c(yearDat, data$probabilities[i]) } } return(winners) } # NOTES: These functions used for feature engineering. # FEATURE ENGINEER: # Data on "number of nominations". Also better way to group data. See oscars info. # this function takes in an author, the year, and the dataset (defaulting to Hugo) and gives # the number of award nominations the author has had since their most recent award win BEFORE the # current year. Therefore, it counts the current year's nomination (if any) as a nomination, # but will not output 0 if the author also won that same year. hugosURL <- "https://raw.githubusercontent.com/tommymtang/Predicting-the-Hugo-Awards/master/Dataset/HugosPolished.csv" nomsWithoutWin <- function(author, year, data = read.csv(url(hugosURL))) { id <- which(data$Year == year)[length(which(data$Year == year))] found <- FALSE count <- 0 authors <- removeWinnerAsterisk(data$Author) # assumes a winner column while (!found && (id > 0)) { # halt once id finished scrolling or author has won if (authors[id] == author) { if (data$Winner[id]) { found = TRUE if (data$Year[id] == year) { count = count + 1 } } else { count = count + 1 } } id <- id - 1 } return (count) } getNoms <- function(data) { return (mapply(nomsWithoutWin, data$Author, data$Year, MoreArgs = list(data = data))) } removeWinnerAsterisk <- function(author) { return (unlist(strsplit(as.character(author), "[*]"))) }
c107e8b5438559b8eb4a9b70b5fb76a1d9015f17
361045b8660071fc6bc9bf1d5d5727632dc8235d
/LTRE_data_prep.r
f8cd7232b86a14f58bff51d69d8b829d5120338b
[ "CC0-1.0" ]
permissive
steffenoppel/TRAL_IPM
61a46a30fd64611cc3af86dda2e4c1b3316c1b9a
cbe939f7234b4ced1f50fa2492cc6c0b42222c71
refs/heads/main
2023-04-08T03:45:08.927110
2022-05-12T06:59:28
2022-05-12T06:59:28
198,238,566
1
0
null
null
null
null
UTF-8
R
false
false
3,410
r
LTRE_data_prep.r
# DATA PREPARATION FOR LTRE ANALYSIS ### ## prepare output data from IPM saved as individual csv files ## convert into a single table with years in rows and cohorts in columns library(tidyverse) library(data.table) filter<-dplyr::filter select<-dplyr::select ### for most parameters the years are in separate rows setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\TRAL_IPM") parameters <- c("Ntot","Ntot.breed","ann.fec","phi.ad","phi.juv","p.ad","breed.prop","agebeta","mean.p.juv") ## added IM and JUV to facilitate LTRE analysis selrows<-list(seq(1,18,1),seq(1,18,1),seq(1,18,1),seq(27,44,1),seq(27,44,1),seq(27,44,1),seq(1,18,1),rep(1,18),rep(2,18)) LTRE_input<-data.frame(Year=seq(2004,2021,1)) for(p in 1:length(parameters)){ input<-fread(sprintf("IPM_output_%s.csv",parameters[p])) LTRE_input[,p+1]<-input$Median[selrows[[p]]] names(LTRE_input)[p+1]<-parameters[p] fwrite(LTRE_input, "LTRE_input_extended.csv") } ### for immature birds we need to split by age group LTRE_input<-fread("IPM_output_IM.csv") %>% select(parameter,median) %>% mutate(Age= as.numeric(str_match(parameter, "\\,\\s*(.*?)\\s*\\,")[,2])) %>% mutate(Year= as.numeric(str_match(parameter, "\\[\\s*(.*?)\\s*\\,")[,2])) %>% arrange(Age,Year) %>% mutate(Cohort=paste("IM",Age,sep="")) %>% select(Cohort,Year, median) %>% spread(key=Cohort,value=median) %>% mutate(Ntot.IM = rowSums(across(where(is.numeric)))-Year) %>% filter(Year<19) %>% mutate(Year=Year+2003) %>% left_join(LTRE_input, by="Year") %>% mutate(Ntot.nonbreed=Ntot-Ntot.breed-Ntot.IM) fwrite(LTRE_input, "LTRE_input_extended.csv") ######################################################################### # DO THE ABOVE FOR ALL MCMC SAMPLES ######################################################################### load("TRAL_IPM_output_REV2022_FINAL.RData") str(TRALipm$mcmc) retain<-parameters[c(8,15,4,11,12,14,9,13,18)] ### need the following parameters from model #which(dimnames(TRALipm$mcmc[[1]])[[2]]=="lambda[17]") # lambda: 8-24 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="phi.ad[43]") # phi.ad: 177-194 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="phi.juv[43]") # phi.juv: 220-237 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="ann.fec[1]") # ann.fec: 256 - 273 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="breed.prop[18]") # breed.prop: 4-21 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="Ntot.breed[18]") # Ntot.breed: 238-255 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="Ntot[18]") # Ntot: 44-61 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="IM[1,1,1]") # IM: 321-860 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="IM[18,30,1]") # IM: 321-860 which(dimnames(TRALipm$mcmc[[1]])[[2]]=="agebeta") which(dimnames(TRALipm$mcmc[[1]])[[2]]=="mean.p.juv[2]") retain retaincols<-c(43, #agebeta 275, #mean.p.juv[2] 4:21, # breed.prop 177:194, #phi.ad 220:238, # phi.juv 256:273, # ann.fec 44:61, #Ntot 238:255, #Ntot.breed 321:860) # IM year 1:18 for ages 1:30 ### EXTRACT ALL FROM THE MODEL OUTPUT AND SAVE IN DIFFERENT LIST LTRE_input_mcmc<-as.matrix(TRALipm$mcmc[[1]])[,retaincols] str(LTRE_input_mcmc) for(ch in 2:nc){ LTRE_input_mcmc<-rbind(LTRE_input_mcmc,as.matrix(TRALipm$mcmc[[ch]])[,retaincols]) } rm(list= ls()[!(ls() %in% c('LTRE_input_mcmc','parameters'))]) save.image("TRAL_LTRE_input.RData") str(LTRE_input_mcmc)
acf1281141b40fdf307a0a2261a02125de6ebbb3
377b1ebf3f53e04cecc6e93fe88703f52421d06c
/code/worker.R
2a55f10adcbbafeebc3db59bd021bdef7686490d
[]
no_license
Bin-Chen-Lab/biomarker_curation
c953adfb73d999d8907c6a735c528e72374e595e
2ce42e221f1eb38def2a60abcb2409bf992f684a
refs/heads/master
2021-02-19T07:03:18.641244
2020-06-30T17:57:29
2020-06-30T17:57:29
245,288,124
0
1
null
2023-09-07T14:13:12
2020-03-05T23:23:51
HTML
UTF-8
R
false
false
1,807
r
worker.R
setwd("~/Documents/stanford/grant/2019/ot2/work/") ## #parse curation. have to specify file every time #name convention: output file name starts with curator name. input_file = "data/curation/FDA-drug_biomarkers-v9_BC_cleaned.xlsx" output_file = "data/parsed/Ben_FDA-drug_biomarkers-v9.csv" input_file = "data/curation/Breast_cancer_CLINICALTRIAL_v7_BC_cleaned.xlsx" #too many failed terms output_file = "data/parsed/Ben_Breast_cancer_CLINICALTRIAL_v7.csv" ##input_file = "data/curation/Breast_Cancer_austin.xlsx" ##output_file = "data/parsed/Austin_Breast_Cancer.csv" ##input_file = "data/curation/Breast_Cancer_tyler.xlsx" ##output_file = "data/parsed/Tyler_Breast_Cancer.csv" input_file = "data/curation/Breast_Cancer_15_16_17_18_19_Final_Excel.xlsx" output_file = "data/parsed/Breast_Cancer_Clinical_Trials_Annotated.csv" input_file = "data/curation/Liver_Clinical_Trials.xlsx" output_file = "data/parsed/Liver_Clinical_Trials.csv" #input_file = "data/curation/Breast-Cancer-Drug_biomarkers-pubmed-v4-030920-30-revised-2.xlsx" #output_file = "data/parsed/Ben_Breast_Cancer_pubmed.csv" input_file = "data/curation/Breast-Cancer-Drug_biomarkers-pubmed-031420-v5.xlsx" output_file = "data/parsed/Breast-Cancer-Drug_biomarkers-pubmed-031420-v5.csv" input_file = "data/curation/Liver_PUBMED_200.xlsx" output_file = "data/parsed/Liver_PUBMED_200.csv" cmd = paste("Rscript code/parser.R", input_file, output_file) system(cmd) ######## #merge all files from parser cmd = paste("Rscript code/merger.R") system(cmd) ####### #match patients to biomarker records system(paste("Rscript code/mapper_patient2biomarker.R")) #convert biomarker records to json system(paste("Rscript code/converter_json.R")) #convert biomarker records to relational database system(paste("Rscript code/converter_db.R"))
0498c2b182d8bf0c6b25cee76f402dd29e8df271
e7b352cdbccc680d50c589cc469af18dd8db53be
/inst/scriptsR2.15.0/Ch08.R
0e0da7e3cf4137c7eb8f380cc1477dd8eeb6ddda
[]
no_license
cran/nlmeU
6c14c211a7b6526d586979a96f56fc0852340531
6a642dabc3b2a3a6dbf7b7078bfa794377a876b0
refs/heads/main
2023-06-22T21:25:12.144953
2022-05-02T14:40:02
2022-05-02T14:40:02
17,697,875
0
7
null
null
null
null
UTF-8
R
false
false
2,342
r
Ch08.R
################################################### ### code chunk: Chap8init ################################################### options(width = 65, digits = 5, show.signif.stars = FALSE) date() packageVersion("nlmeU") packageVersion("nlme") sessionInfo() data(armd, package = "nlmeU") ## lm1.form was defined in Chapter 4 lm1.form <- formula(visual ~ -1 + visual0 + time.f + treat.f:time.f ) library(nlme) ################################################### ### code chunk: R8.1 ################################################### (val <- c("12wks" = 0.5, "24wks" = 2)) # delta1 = 1, delta2 = 0.5, delta3 = 2 (fix <- c("52wks" = 3)) # delta4 = 3 (fixed) frm <- formula(~1|time.f) # time.f is a stratifying factor (vf0 <- varIdent(value = val, # Var. function object defined... fixed = fix, form = frm)) (vf0i <- Initialize(vf0, armd)) # ... and initialized ################################################### ### code chunk: R8.2a ################################################### coef(vf0i, unconstrained = FALSE, allCoef = TRUE) # All delta coefs coef(vf0i, unconstrained = FALSE, allCoef = FALSE)# Varying only ################################################### ### code chunk: R8.2b ################################################### coef(vf0i, unconstrained = TRUE, allCoef = TRUE) # All delta* coefs coef(vf0i, unconstrained = TRUE, allCoef = FALSE) # Varying (default) coef(vf0i) <- c(-0.6, 0.7) # New coefs assigned coef(vf0i, allCoef = TRUE) # All coefs printed ################################################### ### code chunk: R8.3 ################################################### summary(vf0i) # Summary formula(vf0i) # Variance function formula getCovariate(vf0i) # Variance covariate getGroupsFormula(vf0i) # Formula for variance strata length(stratum <- # Length of stratum indicator getGroups(vf0i)) unique(stratum) # Unique strata stratum[1:6] # First six observations varWeights(vf0i)[3:6] # Variance weights 1/lambdai:(7.8) logLik(vf0i) # Contribution to the log-likelihood ###### sessionInfo() with packages attached sessionInfo() detach(package:nlme)
a0c3e5a6daf98ddd574af8e7e1dcc83158fd4dc2
88f75b3d6e11c51a0ca6ead5b34e0ebbfcc12a3f
/SOLUSv2/misc-list.R
0f2167b044f578761423347a3b643591a249bc4b
[]
no_license
ncss-tech/gridded-comparisons
35d6472fc4e13b650ccf9620d6cf0c4b50423c03
678f328d74c18268e85d2b9b37b1e8e05cdfface
refs/heads/master
2023-07-09T11:51:27.860439
2023-06-30T06:14:39
2023-06-30T06:14:39
189,273,682
1
0
null
null
null
null
UTF-8
R
false
false
5,697
r
misc-list.R
x <- list( list( bb = '-87.7152 37.8206,-87.7152 37.9503,-87.4826 37.9503,-87.4826 37.8206,-87.7152 37.8206', caption = 'Evansville, IN', url = 'https://casoilresource.lawr.ucdavis.edu/gmap/?loc=37.88549,-87.59889,z13' ), list( bb = '-98.5020 37.9187,-98.5020 38.2026,-98.0846 38.2026,-98.0846 37.9187,-98.5020 37.9187', caption = 'KS155', url = '' ), list( bb = '-97.0893 39.0262,-97.0893 39.3938,-96.0347 39.3938,-96.0347 39.0262,-97.0893 39.0262', caption = 'Manhattan, KS', url = 'https://casoilresource.lawr.ucdavis.edu/gmap/?loc=39.17325,-96.51421,z11' ), list( bb = '-97.4697 30.9909,-97.4697 31.1322,-97.2391 31.1322,-97.2391 30.9909,-97.4697 30.9909', caption = 'Blacklands, TX', url = '' ), list( bb = '-96.8927 40.8209,-96.8927 40.8833,-96.7774 40.8833,-96.7774 40.8209,-96.8927 40.8209', caption = 'Pawnee Lake, NE', url = '' ), list( bb = '-76.7537 36.6033,-76.7537 36.7356,-76.5232 36.7356,-76.5232 36.6033,-76.7537 36.6033', caption = 'VA800', url = '' ), list( bb = '-77.7619 37.0396,-77.7619 37.1711,-77.5314 37.1711,-77.5314 37.0396,-77.7619 37.0396', caption = 'VA653', url = '' ), list( bb = '-77.2112 36.8763,-77.2112 37.4023,-76.2891 37.4023,-76.2891 36.8763,-77.2112 36.8763', caption = 'Newport News, VA', url = 'https://casoilresource.lawr.ucdavis.edu/gmap/?loc=37.14144,-76.75289,z11' ), list( bb = '-95.1268 44.6068,-95.1268 44.8413,-94.6658 44.8413,-94.6658 44.6068,-95.1268 44.6068', caption = 'Renville County, MN', url = '' ), list( bb = '-78.6197 37.0657,-78.6197 37.1972,-78.3892 37.1972,-78.3892 37.0657,-78.6197 37.0657', caption = 'VA037-VA147', url = '' ), list( bb = '-78.6990 39.5652,-78.6990 39.6923,-78.4482 39.6923,-78.4482 39.5652,-78.6990 39.5652', caption = 'MD001', url = 'https://casoilresource.lawr.ucdavis.edu/gmap/?loc=39.62949,-78.56873,z12' ), list( bb = '-78.5514 39.5182,-78.5514 39.6454,-78.3006 39.6454,-78.3006 39.5182,-78.5514 39.5182', caption = 'MA-WV, Potomac River', url = 'https://casoilresource.lawr.ucdavis.edu/gmap/?loc=39.5818,-78.426,z11' ), list( bb = '-82.8417 42.9094,-82.8417 43.1291,-82.3401 43.1291,-82.3401 42.9094,-82.8417 42.9094', caption = 'MI147', url = '' ), list( bb = '-89.9464 44.9456,-89.9464 45.1578,-89.4448 45.1578,-89.4448 44.9456,-89.9464 44.9456', caption = 'WI073', url = '' ), list( bb = '-122.1755 39.3733,-122.1755 39.5007,-121.8804 39.5007,-121.8804 39.3733,-122.1755 39.3733', caption = 'Sacramento River, Glenn County', url = 'https://casoilresource.lawr.ucdavis.edu/soil-properties/?prop=texture_025&lat=39.4129&lon=-122.0746&z=9' ), list( bb = '-79.9019 38.6567,-79.9019 38.7876,-79.5919 38.7876,-79.5919 38.6567,-79.9019 38.6567', caption = 'WV', url = '' ), list( bb = '-81.0511 27.3155,-81.0511 27.4620,-80.7560 27.4620,-80.7560 27.3155,-81.0511 27.3155', caption = 'FL', url = '' ), list( bb = '-89.2633 37.9833,-89.2633 38.0151,-89.1809 38.0151,-89.1809 37.9833,-89.2633 37.9833', caption = 'southern IL', url = '' ), list( bb = '-119.8402 38.9222,-119.8402 38.9863,-119.6911 38.9863,-119.6911 38.9222,-119.8402 38.9222', caption = 'Minden, NV', url = '' ), list( bb = '-76.9845 38.9764,-76.9845 39.0084,-76.9100 39.0084,-76.9100 38.9764,-76.9845 38.9764', caption = 'College Park, MD', url = '' ), list( bb = '-77.2527 38.9417,-77.2527 39.0058,-77.1036 39.0058,-77.1036 38.9417,-77.2527 38.9417', caption = 'College Park 2, MD', url = '' ), list( bb = '-97.4535 30.8660,-97.4535 31.1488,-96.9279 31.1488,-96.9279 30.8660,-97.4535 30.8660', caption = 'TX027', url = '' ), list( bb = '-96.7651 30.8599,-96.7651 30.9307,-96.6337 30.9307,-96.6337 30.8599,-96.7651 30.8599', caption = 'TX027 Zoom', url = '' ), list( bb = '-121.0269 38.0948,-121.0269 38.2246,-120.7641 38.2246,-120.7641 38.0948,-121.0269 38.0948', caption = 'Valley Springs, CA', url = '' ), list( bb = '-71.8018 41.3583,-71.8018 41.6054,-71.2762 41.6054,-71.2762 41.3583,-71.8018 41.3583', caption = 'RI600', url = '' ), list( bb = '-122.1354 39.5018,-122.1354 39.6290,-121.8725 39.6290,-121.8725 39.5018,-122.1354 39.5018', caption = 'Sacramento River, Glenn - Butte co. boundary', url = '' ), list( bb = '-121.9556 39.6609,-121.9556 39.7878,-121.6928 39.7878,-121.6928 39.6609,-121.9556 39.6609', caption = 'Chico, CA', url = '' ), list( bb = '-119.5323 36.6515,-119.5323 36.7837,-119.2695 36.7837,-119.2695 36.6515,-119.5323 36.6515', caption = 'Gabbro, vertisols near Sanger, CA', url = '' ), list( bb = '-119.7997 36.6051,-119.7997 36.8695,-119.2741 36.8695,-119.2741 36.6051,-119.7997 36.6051', caption = 'Kings River alluvial fan, outwash sequences', url = '' ), list( bb = '-121.6715 36.4873,-121.6715 36.6198,-121.4087 36.6198,-121.4087 36.4873,-121.6715 36.4873', caption = 'Salinas Valley, CA', url = '' ), list( bb = '-120.7047 37.5502,-120.7047 37.6808,-120.4419 37.6808,-120.4419 37.5502,-120.7047 37.5502', caption = 'Turlock Lake, CA', url = '' ), list( bb = '-121.3049 36.4195,-121.3049 36.5521,-121.0420 36.5521,-121.0420 36.4195,-121.3049 36.4195', caption = 'Pinnacles National Park, CA', url = '' ), list( bb = '-97.0010 30.7474,-97.0010 31.0306,-96.4754 31.0306,-96.4754 30.7474,-97.0010 30.7474', caption = 'TX331-TX395', url = '' ) ) names(x) <- sprintf("%02d", 1:length(x))
ec425e04a5b10e7a5824f10ea35d9a4f1c737ffb
f3fb7f95a12a14a5cc8931bb60759e94b449c57b
/man/getZoteroBib.Rd
f3814011067d0da398a07a231fc26581576f5776
[]
no_license
patzaw/bibeatR
eb8a9c4201118a7950f604c02d9fbefb111a6b36
f45f3d75083bae71d3723b0759e9c941775da6a8
refs/heads/master
2020-09-16T23:39:53.470319
2019-12-04T05:24:15
2019-12-04T05:24:15
223,922,389
0
0
null
null
null
null
UTF-8
R
false
true
1,481
rd
getZoteroBib.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zoteroBib.R \name{getZoteroBib} \alias{getZoteroBib} \title{Get Zotero bibiliography from cloud} \usage{ getZoteroBib( baseURL = "https://api.zotero.org", userID, key, latestVersion = NA, excludedTypes = "attachment", by = 100, verbose = FALSE ) } \arguments{ \item{userID}{Zotero user identifier} \item{key}{Zotero key} \item{latestVersion}{A numeric indicating the version after which modified records should be taken (Default: NA ==> take all records)} \item{excludedTypes}{the types of record to exclude (default: "attachment")} \item{by}{Number of records to be taken at once (default: 100 (maximum value for Zotero API))} \item{verbose}{If TRUE messages regarding the download of records are displayed.} } \value{ A \link[tibble:tibble]{tibble::tibble} with a \code{latestVersion} single numeric attribute and the following fields: \itemize{ \item \strong{key}: the Zotero internal key of the record \item \strong{bib}: bibtex representation of the record \item \strong{type}: the type of the record \item \strong{id}: the record identifier \item \strong{title}: the record title \item \strong{journal}: the record journal \item \strong{year}: publication year \item \strong{authors}: the record authors \item \strong{pmid}: PubMed identifier \item \strong{doi}: Digital Object Identifier \item \strong{url}: the record URL } } \description{ Internal function (not exported) }
79a8bdedc0150562464644642191ce63c22173ef
b06a44444664a09816ae2570900366bcccd3e96d
/plot1.R
dfe676cdefcab1e0b10f9789a8130159af47122e
[]
no_license
kataletho/ExData_Plotting1
322b09a98e316251d73c675c0af5bcd12f7fec99
6f1ce36b1d8adae7e384cf02761b30a334e08c3a
refs/heads/master
2021-01-18T12:31:01.553675
2014-09-07T19:44:54
2014-09-07T19:44:54
null
0
0
null
null
null
null
UTF-8
R
false
false
1,143
r
plot1.R
# Plots a histogram of the household global minute-averaged active power # (in kilowatt) for the 1st and 2nd of February 2007 based on data from # the UC Irvine Machine Learning Repository at # https://d396qusza40orc.cloudfront.net/ # exdata%2Fdata%2Fhousehold_power_consumption.zip library(dplyr) library(data.table) # Load the household global active power consumption data for the # 1st and 2nd of February 2007 from household_power_consumption.txt data <- tbl_dt(fread("./data/household_power_consumption.txt", na.strings=c("NA","?",""), stringsAsFactors=F, colClasses = "character")) %>% filter(Date=="1/2/2007" | Date=="2/2/2007") %>% select(Date, Time, Global_active_power) %>% mutate(Global_active_power = as.numeric(Global_active_power)) # Open the png file png(filename = "plot1.png", width = 480, height = 480, units = "px") # Plot the data with(data, hist(Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")) # Save and close the png file dev.off()
1be21fb66085830136291d075e2edac6d8578d49
94ff26a01bd3aed034b2ecef75c793782d42e8f3
/plots_from_data.R
63d579c2fd0abf7bb3b09541ffc12ee291bbe8a2
[]
no_license
CarolineNM/stochastic_vs_deterministic
2b42d2bb093dc8abea5fa33de099287ae1fd9516
b0a09a9b6b7f954559e066487767792df0782460
refs/heads/master
2021-09-08T18:04:53.728484
2018-03-11T17:33:39
2018-03-11T17:33:39
null
0
0
null
null
null
null
UTF-8
R
false
false
13,621
r
plots_from_data.R
# Making plots from saved data setwd("C:/Users/Janetta Skarp/OneDrive - Imperial College London/MRes_BMR/Project_1/Work_folder/Data") re_bootstrap <- read.csv("re_betagamma_bootstrap_27.01.18.csv") re_point <- read.csv("re_betagamma_point_27.01.18.csv") stoch_mcmc <- read.csv("stoch_mcmc_betagamma_25.01.18.csv") det_mcmc <- read.csv("det_mcmc_betagamma_25.01.18.csv") burnIn = 250 ######################## ## With constant xlim ## ######################## # Histogram par(mfrow = c(2,3)) # RE beta hist(as.numeric(re_bootstrap[1,2:ncol(re_bootstrap)]),nclass=30, main="RE Beta", xlab="Beta", xlim= c(0.002, 0.007)) abline(v = re_point[1,2], col = "red") # Det MCMC beta hist(as.numeric(det_mcmc[1,burnIn:ncol(det_mcmc)]),nclass=30, main="Det MCMC Beta", xlab="Beta", xlim= c(0.002, 0.007)) abline(v = mean(as.numeric(det_mcmc[1,burnIn:ncol(det_mcmc)])), col = "red") # Stoch MCMC beta hist(as.numeric(stoch_mcmc[1,burnIn:ncol(stoch_mcmc)]),nclass=30, main="Stoch MCMC Beta", xlab="Beta", xlim= c(0.002, 0.007)) abline(v = mean(as.numeric(stoch_mcmc[1,burnIn:ncol(stoch_mcmc)])), col = "red") # RE gamma hist(as.numeric(re_bootstrap[2,2:ncol(re_bootstrap)]),nclass=30, main="RE Gamma", xlab="Gamma", xlim=c(0.05,0.11)) abline(v = re_point[2,2], col = "red") # Det MCMC gamma hist(as.numeric(det_mcmc[2, burnIn:ncol(det_mcmc)]),nclass=30, main="Det MCMC Gamma", xlab="Gamma", xlim=c(0.05,0.11)) abline(v = mean(as.numeric(det_mcmc[2,burnIn:ncol(det_mcmc)])), col = "red") # Stoch MCMC gamma hist(as.numeric(stoch_mcmc[2, burnIn:ncol(stoch_mcmc)]),nclass=30, main="Stoch MCMC Gamma", xlab="Gamma", xlim=c(0.05,0.11)) abline(v = mean(as.numeric(stoch_mcmc[2,burnIn:ncol(stoch_mcmc)])), col = "red") ################## ## Without xlim ## ################## # Histogram par(mfrow = c(2,3)) # RE beta hist(as.numeric(re_bootstrap[1,2:ncol(re_bootstrap)]),nclass=30, main="RE Beta", xlab="Beta") abline(v = re_point[1,2], col = "red") # Det MCMC beta hist(as.numeric(det_mcmc[1,250:ncol(det_mcmc)]),nclass=30, main="Det MCMC Beta", xlab="Beta") abline(v = mean(as.numeric(det_mcmc[1,250:ncol(det_mcmc)])), col = "red") # Stoch MCMC beta hist(as.numeric(stoch_mcmc[1,250:ncol(stoch_mcmc)]),nclass=30, main="Stoch MCMC Beta", xlab="Beta") abline(v = mean(as.numeric(stoch_mcmc[1,250:ncol(stoch_mcmc)])), col = "red") # RE gamma hist(as.numeric(re_bootstrap[2,2:ncol(re_bootstrap)]),nclass=30, main="RE Gamma", xlab="Gamma") abline(v = re_point[2,2], col = "red") # Det MCMC gamma hist(as.numeric(det_mcmc[2, 250:ncol(det_mcmc)]),nclass=30, main="Det MCMC Gamma", xlab="Gamma") abline(v = mean(as.numeric(det_mcmc[2,250:ncol(det_mcmc)])), col = "red") # Stoch MCMC gamma hist(as.numeric(stoch_mcmc[2, 250:ncol(stoch_mcmc)]),nclass=30, main="Stoch MCMC Gamma", xlab="Gamma") abline(v = mean(as.numeric(stoch_mcmc[2,250:ncol(stoch_mcmc)])), col = "red") ################################### ## Plot code from residual error ## ################################### re_data1 <- read.csv("re_betagamma_badstart_01.02.18.csv") # Starting parameter guess not based on point estimate re_data2 <- read.csv("re_betagamma_goodstart_01.02.18.csv") # Starting parameter guess based on point estimate # print(c(re_data$beta[1], re_data$gamma[1])) # Histogram par(mfrow = c(1,3)) # Beta hist(re_data1$beta[2:nrow(re_data1)],nclass=30, col = rgb(0.1,0.1,0.1,0.5), main="Beta", xlab="Beta value") abline(v = re_data1$beta[1], col = "red") hist(re_data2$beta[2:nrow(re_data2)],nclass=30, col=rgb(0.8,0.8,0.8,0.5), add = T) abline(v = re_data2$beta[1], col = "red") box() # Gamma hist(re_data1$gamma[2:nrow(re_data1)],nclass=30, col = rgb(0.1,0.1,0.1,0.5), main="Gamma", xlab="Gamma value") abline(v = re_data1$gamma[1], col = "red") hist(re_data2$gamma[2:nrow(re_data2)],nclass=30, col=rgb(0.8,0.8,0.8,0.5), add = T) abline(v = re_data2$gamma[1], col = "red") box() # Residual error hist(re_data1$RE[2:nrow(re_data1)],nclass=30, col=rgb(0.1,0.1,0.1,0.5), main="RE", xlab="Residual Error") abline(v = re_data1$RE[1], col = "red") hist(re_data2$RE[2:nrow(re_data2)],nclass=30, col=rgb(0.8,0.8,0.8,0.5), add = T) abline(v = re_data2$RE[1], col = "red") box() # Beta vs. Gamma par(mfrow = c(1,1)) plot(x = re_data1$gamma, y = re_data1$beta, col = rgb(1,0,0,0.5), xlab = "Gamma", ylab = "Beta", pch = 4, cex = 1) points(x = re_data2$gamma, y = re_data2$beta, col = rgb(0,0,0,0.5), xlab = "Gamma", ylab = "Beta", pch = 1, cex = 1) # # Lines # run_det <- as.data.frame(ode(y = init.values, times = times, func = sir, parms = sse_fit$par)) # # par(mfrow = c(1,1)) # plot(run_stoch$R, ylim = c(0, N), type = "l", col = "orange", xlab = "Timestep", ylab = "Number of individuals") # lines(run_det$I, type = "l", col = "red", xlab = " ", ylab = " ") # lines(run_stoch$I, type = "l", col = "grey", xlab = " ", ylab = " ") # lines(run_det$R, type = "l", col = "black", xlab = "", ylab = "") # legend(100, 0.5*N, c("Deterministic recovered", "True recovered", "Deterministic infected", "True infected"), pch = 1, col = c("black", "orange", "red", "grey"), bty = "n") # Making a heatmap for beta vs. gamma residual errors if (!require("plotly")) install.packages("plotly") library("plotly") #package for solving differential equations # heatmap <- read.csv("re_heatmap_test.csv") heatmap <- read.csv("re_heatmap_small_range.csv") matrix_heatmap <- xtabs(RE~beta+gamma, data=heatmap) beta <- seq(min(heatmap$beta), max(heatmap$beta), by = ((max(heatmap$beta)-min(heatmap$beta))/nrow(matrix_heatmap))) gamma <- seq(min(heatmap$gamma), max(heatmap$gamma), by = ((max(heatmap$gamma)-min(heatmap$gamma))/nrow(matrix_heatmap))) tick_beta <- list( autotick = FALSE, ticks = "outside", tick0 = min(heatmap$beta), dtick = ((max(heatmap$beta)-min(heatmap$beta))/nrow(matrix_heatmap)), ticklen = 5, tickwidth = 2, tickcolor = toRGB("blue") ) tick_gamma <- list( autotick = FALSE, ticks = "outside", tick0 = min(heatmap$gamma), dtick = ((max(heatmap$gamma)-min(heatmap$gamma))/nrow(matrix_heatmap)), tickangle = 45, ticklen = 5, tickwidth = 2, tickcolor = toRGB("blue") ) m <- list( l = 100, r = 5, b = 80, t = 5, pad = 4 ) # vals <- unique(scales::rescale(c(matrix_heatmap))) # o <- order(10*vals, decreasing = FALSE) # cols <- scales::col_numeric("Blues", domain = NULL)(-10*vals) # colz <- setNames(data.frame(vals[o], cols[o]), NULL) # p <- plot_ly(z = volcano, colorscale = colz, type = "heatmap") # col_num = 80 # grays <- array(dim = c(col_num)) # for (i in 1:col_num){ # grays[i] = paste("gray",i+(100-col_num), sep = "") # } plot_heatmap <- plot_ly(z = matrix_heatmap, x = ~gamma, y = ~beta, colors = colorRamp(c("yellow","darkorange2","orangered","red","maroon","magenta4","blue", "navy", "midnightblue")), type = "heatmap") %>% layout(xaxis = tick_gamma, yaxis = tick_beta, margin = m) plot_heatmap # rev(c("white", "yellow", "gold", "goldenrod1","orange", "darkorange","darkorange2", "orangered","red","firebrick3","firebrick","firebrick4", "deeppink4", "darkmagenta", "darkorchid4", "darkslateblue", "dodgerblue4", "dodgerblue3", "deepskyblue3", "turquoise3", "turquoise", "palegreen2", "palegreen3", "palegreen4"))), # colorRamp(c("yellow", "darkorange2","orangered","red", "maroon","magenta4","blue", "navy", "midnightblue")), #################################### ## Plot code from stochastic MCMC ## #################################### # plot(run_stoch$time, run_stoch$R, ylim = c(0,N), type = "l", col = "orange", xlab = "time (days)", ylab = "Number infectious/recovered") # par(new=T) # plot(run_stoch$time, run_stoch$guess_I, ylim = c(0,N), type = "l", col = "red", xlab = " ", ylab = " ") # par(new=T) # plot(x = run_stoch$time, y = run_stoch$I, type = "l", col = "black", ylim = c(0,N), xlab = " ", ylab = " ") # legend(60, 0.8*N, c("Recovered", "Guessed infected", "True infected"), pch = 1, col = c("orange", "red", "black"), bty = "n") # # plot(run_stoch$guess_I, ylim = c(0, N), type = "l", col = "red", xlab = "Timestep", ylab = "Number of individuals infected") # lines(run_stoch$I, type = "l", col = "grey", xlab = " ", ylab = " ") # lines(temp_chain[,1,2], type = "l", lty = 2, col = "black", xlab = " ", ylab = " ") # legend(130, 1.0*N, c("True infected", "Guessed infected", "MCMC"), pch = 1, col = c("grey", "red", "black"), bty = "n") # # The beginning of the chain is biased towards the starting point, so take them out # normally burnin is 10%-50% of the runs # burnIn = 0.1*(iterations/divisor) # acceptance <- 1-mean(duplicated(chain[,-(1:burnIn),])) # inf_acceptance <- 1-mean(duplicated(chain[,-(1:burnIn),2])) # # #Histogram # par(mfrow = c(2,2)) # # hist(chain[1,-(1:burnIn),1],nclass=30, main="Posterior of beta") # abline(v = mean(chain[1,-(1:burnIn),1]), col = "red") # # hist(chain[2, -(1:burnIn),1],nclass=30, main="Posterior of gamma") # abline(v = mean(chain[2,-(1:burnIn),1]), col = "red") # # plot(chain[1, -(1:burnIn),1], type = "l", main = "Chain values of beta") # # plot(chain[2, -(1:burnIn),1], type = "l", main = "Chain values of gamma") # # # Plot beta vs. gamma # par(mfrow = c(1,1)) # library(RColorBrewer) # library(MASS) # # plot(x = chain[2,,1], y = chain[1,,1], xlab = "Gamma", ylab = "Beta", pch = 20, cex = 0.8) # # k <- 11 # my.cols <- rev(brewer.pal(k, "RdYlBu")) # z <- kde2d(chain[2,,1], chain[1,,1], n=50) # filled.contour(z, nlevels=k, col=my.cols, xlab = "Gamma", ylab = "Beta") # # par(mfrow = c(1,1)) # # plot(run_stoch$guess_I, ylim = c(0, N), type = "l", col = "red", xlab = "Timestep", ylab = "Number of individuals infected") # lines(run_stoch$I, type = "l", col = "grey", xlab = " ", ylab = " ") # lines(chain[,ncol(chain),2], type = "l", lty = 2, col = "black", xlab = " ", ylab = " ") # legend(130, 1.0*N, c("True infected", "Guessed infected", "MCMC"), pch = 1, col = c("grey", "red", "black"), bty = "n") # # plot(run_stoch$guess_I, ylim = c(0, N), type = "l", col = "red", xlab = "Timestep", ylab = "Number of individuals infected") # lines(run_stoch$I, type = "l", col = "grey", xlab = " ", ylab = " ") # for (i in 1:ncol(chain)){ # lines(chain[,i,2], type = "l", lty = 2, col = "black", xlab = " ", ylab = " ") # } # legend(130, 1.0*N, c("True infected", "Guessed infected", "MCMC"), pch = 1, col = c("grey", "red", "black"), bty = "n") ####################################### ## Plot code from deterministic MCMC ## ####################################### # det_sir <- ode(y = init.values, times = times, func = sir, parms = temp_chain[1:2,]) # det_sir <- as.data.frame(det_sir) # # S = array(0, dim = (c(nrow(run_stoch)))) # new_I = array(0, dim = (c(nrow(run_stoch)))) # # for (i in 1:nrow(run_stoch -1)){ # S[i] = (N - (round(det_sir$I[i]) + run_stoch$R[i])) # Susceptibles for timestep i # new_I[i] = if (i == 1){ # round(det_sir$I[i]) # } else { # (round(det_sir$I[i+1]) - round(det_sir$I[i]) + run_stoch$R[i+1] - run_stoch$R[i]) # new I for timestep i+1 # } # } # # par(mfrow = c(2,1)) # # plot(run_stoch$R, ylim = c(0, N), type = "l", col = "orange", xlab = "Timestep", ylab = "Number of individuals") # lines(round(det_sir$I), type = "l", col = "red", xlab = " ", ylab = " ") # lines(run_stoch$I, type = "l", col = "grey", xlab = " ", ylab = " ") # lines(round(det_sir$R), type = "l", col = "black", xlab = "", ylab = "") # lines(S, type = "l", col = "darkolivegreen3", xlab = "", ylab = "") # legend(100, 0.5*N, c("Deterministic recovered", "True recovered", "Deterministic infected", "True infected", "Susceptible"), pch = 1, col = c("black", "orange", "red", "grey", "darkolivegreen3"), bty = "n") # # plot(new_I, ylim = c(-10, N*0.25), type = "l", col = "red", xlab = "Timestep", ylab = "Number of individuals") # # lines(run_stoch$new_R, type = "l", col = "orange", xlab = "", ylab = "") # lines(run_stoch$new_I, type = "l", col = "grey", xlab = "", ylab = "") # legend(100, 0.5*(N*0.25), c("Newly infected", "True newly infected"), pch = 1, col = c("red", "grey"), bty = "n") # The beginning of the chain is biased towards the starting point, so take them out # normally burnin is 10%-50% of the runs # burnIn = 0.1*(iterations/divisor) # acceptance <- 1-mean(duplicated(chain[,-(1:burnIn)])) # ## MCMC Plots # par(mfrow = c(2,2)) # # hist(chain[1,-(1:burnIn)],nclass=30, main="Posterior of beta") # abline(v = mean(chain[1,-(1:burnIn)]), col = "red") # # hist(chain[2, -(1:burnIn)],nclass=30, main="Posterior of gamma") # abline(v = mean(chain[2,-(1:burnIn)]), col = "red") # # plot(chain[1,], type = "l", main = "Chain values of beta") # # plot(chain[2,], type = "l", main = "Chain values of gamma") # # # Plot beta vs. gamma # par(mfrow = c(1,1)) # library(RColorBrewer) # library(MASS) # # plot(x = chain[2,], y = chain[1,], xlab = "Gamma", ylab = "Beta", pch = 20, cex = 0.8) # # k <- 11 # my.cols <- rev(brewer.pal(k, "RdYlBu")) # z <- kde2d(chain[2,], chain[1,], n=50) # filled.contour(z, nlevels=k, col=my.cols, xlab = "Gamma", ylab = "Beta") # ## Likelihood plot # par(mfrow = c(1,2), mar=c(5,6,2,0.5)) # plot(chain[3,], type = "l", main = "Chain values of log likelihood", xlab = "", ylab = "Log(likelihood)") # mtext("Iteration x100",side=1,line=2) # plot(chain[3,-(1:burnIn)], type = "l", main = "Zoomed in" , xlab = "", ylab = "Log(likelihood)") # mtext("(Iteration - burn-in) x100",side=1,line=2)
1fba521ff57cd8db69128fb423c3bbb86941cbae
689a1102732f036813cc946e84f823c4d6e05f4a
/KOD Bubbelplot + stapelplot.R
b3187c57a72f8caa568759a0037a18e11903308d
[]
no_license
westbergss/R-Code-Bachelor-Thesis-Statistics
4b2c8a9c15918cf704a743f526834016cda2a0b0
b368ebe8aaf67577d322f63cb592b27ea4d19116
refs/heads/master
2022-07-26T05:40:27.378813
2020-05-15T10:13:34
2020-05-15T10:13:34
null
0
0
null
null
null
null
ISO-8859-1
R
false
false
5,654
r
KOD Bubbelplot + stapelplot.R
####################################### # Boxplot av medelvärden med errors ####################################### Boxplot1 <- data4 %>% group_by(lannr) %>% summarise(mean_PL = mean(Vardkostnad), sd_PL = sd(Vardkostnad), n_PL = n(), SE_PL = sd(Vardkostnad)/sqrt(n())) Plotsummary <- ggplot(Boxplot1, aes(lannr, mean_PL)) + geom_col() + geom_errorbar(aes(ymin = mean_PL - sd_PL, ymax = mean_PL + sd_PL), width=0.2) Plotsummary + labs(y="Genomsnittlig vårdkostnad (kr) ± s.d.", x = "Län") + theme_classic() ### Loggade versionen test <- data4[,c("Vardkostnad","lannr")] test <- mutate(test, logkostnad = log(Vardkostnad)) test <- test[,c("lannr","logkostnad")] test$logkostnad[test$logkostnad == -Inf] <- 0 Boxplot2 <- test %>% group_by(lannr) %>% summarise(mean_PL = mean(logkostnad), sd_PL = sd(logkostnad), n_PL = n(), SE_PL = sd(logkostnad)/sqrt(n())) Plotsummary2 <- ggplot(Boxplot2, aes(lannr, mean_PL)) + geom_col() + geom_errorbar(aes(ymin = mean_PL - sd_PL, ymax = mean_PL + sd_PL), width=0.2) Plotsummary2 + labs(y="Ln av genomsnittlig vårdkostnad (kr) ± s.d.", x = "Län") + theme_classic() #### Bubbpleplot av snittkostnader df.medel$Urvalsstorlek <- c("3904", "2639", "3210", "3986", "1905", "2665", "1436", "12314", "2554", "16338", "1749", "3290", "1874", "2529", "2276", "2580", "2860", "2806") df.medel$Urvalsstorlek <- as.numeric(df.medel$Urvalsstorlek) df.medel$Antalinvanare <- c("376354", "294695", "461583", "360825", "199886", "244670", "159684", "1362000", "329352", "1710000", "281482", "302252", "273929", "287191", "286547", "245453", "270154", "250497") df.medel$Antalinvanare <- as.numeric(df.medel$Antalinvanare) ggplot(df.medel, aes(x=Urvalsstorlek, y=Medelkostnad, size = Antalinvanare, color=Medelkostnad, label = rownames(lannr))) + geom_point(alpha=0.3) + scale_size(range = c(2, 24), name="Antal invånare per län") + geom_hline(yintercept=87488, linetype="dashed", color="red", size=1) + geom_text(data=df.medel, aes(Urvalsstorlek, Medelkostnad, label = lannr), colour = I(alpha("Black", 1)), size = 4 ); #### Bubbpleplot av snittskillnader df.skillnad$Urvalsstorlek <- c("3904", "2639", "3210", "3986", "1905", "2665", "1436", "12314", "2554", "16338", "1749", "3290", "1874", "2529", "2276", "2580", "2860", "2806") df.skillnad$Urvalsstorlek <- as.numeric(df.skillnad$Urvalsstorlek) df.skillnad$Antalinvanare <- c("376354", "294695", "461583", "360825", "199886", "244670", "159684", "1362000", "329352", "1710000", "281482", "302252", "273929", "287191", "286547", "245453", "270154", "250497") df.skillnad$Antalinvanare <- as.numeric(df.medel$Antalinvanare) ggplot(df.skillnad, aes(x=Urvalsstorlek, y=Medelskillnad, size = Antalinvanare, color=Medelskillnad, label = rownames(lannr))) + geom_point(alpha=0.3) + scale_size(range = c(2, 24), name="Antal invånare per län (2019)") + geom_hline(yintercept=12188, linetype="dashed", color="red", size=1) + geom_text(data=df.skillnad, aes(Urvalsstorlek, Medelskillnad, label = lannr), colour = I(alpha("Black", 1)), size = 4 ); summary(data4$kostnadsskillnad) #### Bubbleplot av snittdrgikr df$Urvalsstorlek <- c("3904", "2639", "3210", "3986", "1905", "2665", "1436", "12314", "2554", "16338", "1749", "3290", "1874", "2529", "2276", "2580", "2860", "2806") df$Urvalsstorlek <- as.numeric(df.medel$Urvalsstorlek) df$Antalinvanare <- c("376354", "294695", "461583", "360825", "199886", "244670", "159684", "1362000", "329352", "1710000", "281482", "302252", "273929", "287191", "286547", "245453", "270154", "250497") df$Antalinvanare <- as.numeric(df.medel$Antalinvanare) ggplot(df, aes(x=Urvalsstorlek, y=Medeldrgikr, size = Antalinvanare, color="skyblue3", label = rownames(lannr))) + geom_point(alpha=0.3) + scale_size(range = c(2, 24), name="Antal invånare per län") + geom_text(data=df, aes(Urvalsstorlek, Medeldrgikr, label = lannr), colour = I(alpha("Black", 1)), size = 4 ); #### Barplot av Medelvärden Kostnad<-ggplot(data=df.medel, aes(x=as.factor(lannr), y=Medelkostnad)) + geom_bar(stat="identity", fill="steelblue")+ geom_hline(yintercept=87488, linetype="dashed", color="red", size=1) + theme_minimal() Kostnad Skillnad<-ggplot(data=df, aes(x=as.factor(lannr), y=Medelskillnad)) + geom_bar(stat="identity", fill="steelblue")+ geom_hline(yintercept=12188, linetype="dashed", color="red", size=1) + theme_minimal() Skillnad Dagar<-ggplot(data=df, aes(x=as.factor(lannr), y=Medeldagar)) + geom_bar(stat="identity", fill="steelblue")+ geom_hline(yintercept=12.42, linetype="dashed", color="red", size=1) + theme_minimal() Dagar drgikr<-ggplot(data=df, aes(x=as.factor(lannr), y=Medeldrgikr)) + geom_bar(stat="identity", fill="steelblue")+ geom_hline(yintercept=75301, linetype="dashed", color="red", size=1) + theme_minimal() drgikr ###################################################### # Snitt estimerad kostnad vs snitt faktisk kostnad ###################################################### ggplot(data = dfmedel %>% gather(Variable, Medeldrgikr, -lannr), aes(x = as.factor(lannr), y = Medeldrgikr, fill = Variable)) + geom_bar(stat = 'identity', position = 'dodge') + labs(y= "Kronor", x = "Län") + geom_hline(yintercept=73604, linetype="dashed", color="red", size=1) + geom_hline(yintercept=87488, linetype="dashed", color="skyblue3", size=1) facet_grid(~lannr, scales = 'free_x', space = 'free') summary(data4$Vardkostnad)
abfd04c23e3eec4e702f588e5a8f0f9d3bf8c4cd
5148465eb3d690d04d38f8888fe8b930a1cbdeae
/Make3DScatterplot.R
73b219dccb0af80144defa11c06128cbf25723cc
[]
no_license
onlineclass/DevDataProducts-Shiny
ba3ad696b72c6c7101e2423c33224065d7555815
331d2fb7fa5d96ab0602b2694e9b5fae852828c7
refs/heads/master
2020-05-16T15:11:58.619169
2014-08-19T15:14:56
2014-08-19T15:14:56
null
0
0
null
null
null
null
UTF-8
R
false
false
3,669
r
Make3DScatterplot.R
## This script contains only one function - plot.3D.data - which creates a 3D ## scatter plot of the 3D data points in the provided data set. ## The data set must contain a total of 4 columns: one column for each of ## the cartesian coordinates x, y and z and one column for the class value ## (outcome or label) of the 3D point. The outcome is always assumed to be the ## last column of the data set. One color will be chosen for each value of the ## outcome, except the one value to be ignored which must be specified at the ## function call time. The function takes as input the following optional ## parameters: ## - data = the data set to be plotted ## - density.plot = visibility of the density contour lines projected ## on xy, xz and yz planes ## plot.3D.data <- function(data, density.plot = T) { par(mar = c(1, 1, 1, 1)) ## Get the outcome column index outcome.ndx = ncol(data) ## Create the subset of the original data which will be plotted plot.data <- data[data[,outcome.ndx] == "inside",] ## Define the background color bg.col <- c("#FF000088") ## Extract a sample of the raw data (4% of the data points) for the plot set.seed(1443) sample.plot.data <- plot.data[sample(1:nrow(plot.data), as.integer(0.6 * nrow(plot.data)), prob = rep(1 / nrow(plot.data), nrow(plot.data))),] x.intv <- c(min(data[,1]) - 5, max(data[,1]) + 5) y.intv <- c(min(data[,2]) - 5, max(data[,2]) + 5) z.intv <- c(min(data[,3]) - 5, max(data[,3]) + 5) s3d <- with(sample.plot.data, scatterplot3d(z ~ x + y, angle = 55, scale.y = 0.75, type = "n", pch = 21, bg = bg.col, x.ticklabs = as.character( c(-20, -15, -10, -5, 0, 5, 10, 15, 20)), grid = F, xlim = x.intv, ylim = y.intv, zlim = z.intv)) # Check if the density contour lines should be visible if (density.plot == T) { xyDensity <- kde2d(sample.plot.data$x, sample.plot.data$y, lims = c(x.intv, y.intv), n = 80) clines.xy <- contourLines(xyDensity, nlevels = 8) xzDensity <- kde2d(sample.plot.data$x, sample.plot.data$z, lims = c(x.intv, z.intv), n = 80) clines.xz <- contourLines(xzDensity, nlevels = 8) yzDensity <- kde2d(sample.plot.data$y, sample.plot.data$z, lims = c(y.intv, z.intv), n = 80) clines.yz <- contourLines(yzDensity, nlevels = 8) lapply(clines.xy, function(cl) { polygon(s3d$xyz.convert(cl$x, cl$y, rep(-10, length(cl$x))), lwd = 1, border = "#50505088") }) lapply(clines.xz, function(cl) { polygon(s3d$xyz.convert(cl$x, rep(20, length(cl$x)), cl$y), lwd = 1, border = "#50505088") }) lapply(clines.yz, function(cl) { polygon(s3d$xyz.convert(rep(-20, length(cl$x)), cl$x, cl$y), lwd = 1, border = "#50505088") }) } # Now draw the actual points with(sample.plot.data, s3d$points3d(z ~ x + y, pch = 21, col = "black", bg = bg.col)) }
47ec691a42971ae5941b8fddbece7a631a1b2cd9
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Compounding/examples/pgfIpolyaaeppli.Rd.R
aae779403f26153952e937ba5697a129b00752ea
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
722
r
pgfIpolyaaeppli.Rd.R
library(Compounding) ### Name: pgfIpolyaaeppli ### Title: Function pgfIpolyaaeppli ### Aliases: pgfIpolyaaeppli ### ** Examples params<-c(5,.4) pgfIpolyaaeppli(.5,params) ## The function is currently defined as pgfIpolyaaeppli <- function(s,params) { k<-s[abs(s)>1] if (length(k)>0) warning("At least one element of the vector s are out of interval [-1,1]") if (length(params)<2) stop("At least one value in params is missing") if (length(params)>2) stop("The length of params is 2") theta<-params[1] p<-params[2] if (theta<=0) stop ("Parameter theta must be positive") if ((p>=1)|(p<=0)) stop ("Parameter p belongs to the interval (0,1)") (theta+log(s))/(theta+p*log(s)) }
e34cf1ea26593de1093277f12eb3f7f36ce07eaf
ecfc1abfa8563404def7598044847d2a824f84b2
/SpyPlanes.R
d4ec3f7917e4e35ee928cbc53f0c462d65345eca
[]
no_license
qrm2228231/Spy-Plane-Finder
3e1e445c3b07db6d64c55416d9b6c8d54c316357
937436b1036a561a20a9eeb3d728d5db2ccef202
refs/heads/master
2022-04-11T17:38:15.287548
2018-05-20T20:17:26
2018-05-20T20:17:26
null
0
0
null
null
null
null
UTF-8
R
false
false
11,346
r
SpyPlanes.R
#Spy Plane finder #Loading libraries for reading data from CSV files library(readr) library(dplyr) library(Amelia) #loading full registration data of the planes registered= read.csv('faa_registration.csv',header=T,na.strings=c("")) #plotting missing values plot missmap(registered, main = "Missing values vs observed") #loading features and training data features = read.csv('planes_features.csv',header=T,na.strings=c("")) training = read.csv('train.csv',header=T,na.strings=c("")) #loading known federation planes data feds <- read.csv("feds.csv") features = features %>% mutate(type2=as.integer(as.factor(type))) head(features) training <- read.csv("train.csv") %>% inner_join(features, by="adshex") head(training) formula =as.factor(class) ~ duration1 + duration2 + duration3 + duration4 + duration5 + boxes1 + boxes2 + boxes3 + boxes4 + boxes5 + speed1 + speed2 + speed3 + speed4 + speed5 + altitude1 + altitude2 + altitude3 + altitude4 + altitude5 + steer1 + steer2 + steer3 + steer4 + steer5 + steer6 + steer7 + steer8 + flights + squawk_1 + observations + type2 #random forest model library(randomForest) set.seed(35) rfmodel <- randomForest(formula,data=training,metric="ROC",importance=TRUE,ntree=1000) rfmodel #plotting variable importance plot library(ggplot2) varImpPlot(rfmodel, pch = 20, main = "Variable Importance", color = "blue", cex = 1) #removing training planes and known federation planes data from features data labeling <- anti_join(features, training) %>% anti_join(feds) head(labeling) #predicting classes for the labeling data based on training results labelrf <- predict(rfmodel, labeling) #storing the predicted labels in dataframe based on each plane using thier adshex code labelrf_df <- data.frame(adshex = labeling$adshex, class = labelrf) #printing the planes summary based on each type typesrf <- labelrf_df %>% group_by(class) %>% summarize(count=n()) print(typesrf) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes rfprobs <- as.data.frame(predict(rfmodel, labeling, type = "prob")) head(rfprobs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data rfprobs_df<- bind_cols(as.data.frame(labeling$adshex), rfprobs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing rfresults <- head(rfprobs_df, 1000) head(rfresults) #getting n_number, name and adshex from planes registration details based on adshex registered <- registered %>% select(1,7,34) names(registered) <- c("n_number","name","adshex") registered <- registered %>% mutate(reg = paste0("N",n_number)) %>% select(2:4) #joining the results with planes registraion details through adshex rfresults <- left_join(rfresults,registered, by="adshex") #exporting the resulting data to a csv file write.csv(rfresults, "rfresults.csv", na="") #------------- #CART library(caret) set.seed(71) #applying CART model on the training data cartmodel = train(formula,data=training, method="rpart") cartmodel plot(cartmodel) #results of the model summary(cartmodel) #predicting the labels for remaining data using trained input labelcart=predict(cartmodel, labeling) head(labelcart) #storing the predicted labels in dataframe based on each plane using thier adshex code labelcart_df <- data.frame(adshex = labeling$adshex, class = labelcart) #printing the planes summary based on each type typescart <- labelcart_df %>% group_by(class) %>% summarize(count=n()) print(typescart) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes cartprobs <- as.data.frame(predict(cartmodel, labeling, type = "prob")) head(cartprobs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data cartprobs_df<- bind_cols(as.data.frame(labeling$adshex), cartprobs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing cartresults <- head(cartprobs_df, 1000) head(cartresults) #joining the results with planes registraion details through adshex cartresults <- left_join(cartresults,registered, by="adshex") #exporting the resulting data to a csv file write.csv(cartresults, "cartresults.csv", na="") #------------- #evtree library(evtree) set.seed(19) #applying evtree model on the training data evtreemodel = evtree(formula,data=training, method="class") evtreemodel #plotting the tree plot(evtreemodel) #predicting the labels for remaining data using trained input labelevtree=predict(evtreemodel, labeling) head(labelevtree) #storing the predicted labels in dataframe based on each plane using thier adshex code labelevtree_df <- data.frame(adshex = labeling$adshex, class = labelevtree) #printing the planes summary based on each type typesevtree <- labelevtree_df %>% group_by(class) %>% summarize(count=n()) print(typesevtree) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes evtreeprobs <- as.data.frame(predict(evtreemodel, labeling, type = "prob")) head(evtreeprobs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data evtreeprobs_df<- bind_cols(as.data.frame(labeling$adshex), evtreeprobs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing evtreeresults <- head(evtreeprobs_df, 1000) head(evtreeresults) #joining the results with planes registraion details through adshex evtreeresults <- left_join(evtreeresults,registered, by="adshex") #exporting the resulting data to a csv file write.csv(evtreeresults, "evtreeresults.csv", na="") #------------- #ctree library(partykit) set.seed(87) #applying ctree model on the training data ctreemodel = ctree(formula,data=training) ctreemodel #plotting the tree plot(ctreemodel) #predicting the labels for remaining data using trained input labelctree=predict(ctreemodel, labeling) head(labelctree) #storing the predicted labels in dataframe based on each plane using thier adshex code labelctree_df <- data.frame(adshex = labeling$adshex, class = labelctree) #printing the planes summary based on each type typesctree <- labelctree_df %>% group_by(class) %>% summarize(count=n()) print(typesctree) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes ctreeprobs <- as.data.frame(predict(ctreemodel, labeling, type = "prob")) head(ctreeprobs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data ctreeprobs_df<- bind_cols(as.data.frame(labeling$adshex), ctreeprobs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing ctreeresults <- head(ctreeprobs_df, 1000) head(ctreeresults) #joining the results with planes registraion details through adshex ctreeresults <- left_join(ctreeresults,registered, by="adshex") #exporting the resulting data to a csv file write.csv(ctreeresults, "ctreeresults.csv", na="") #------------- #C4.5 library(RWeka) set.seed(57) #applying c45 model on the training data c45model = J48(formula,data=training) c45model plot(c45model) #results of the model summary(c45model) #predicting the labels for remaining data using trained input labelc45=predict(c45model, labeling) head(labelc45) #storing the predicted labels in dataframe based on each plane using thier adshex code labelc45_df <- data.frame(adshex = labeling$adshex, class = labelc45) #printing the planes summary based on each type typesc45 <- labelc45_df %>% group_by(class) %>% summarize(count=n()) print(typesc45) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes c45probs <- as.data.frame(predict(c45model, labeling, type = "prob")) head(c45probs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data c45probs_df<- bind_cols(as.data.frame(labeling$adshex), c45probs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing c45results <- head(c45probs_df, 1000) head(c45results) #joining the results with planes registraion details through adshex c45results <- left_join(c45results,registered, by="adshex") #exporting the resulting data to a csv file write.csv(c45results, "c45results.csv", na="") #------------- #bagging library(ipred) set.seed(29) #applying bagging model on the training data baggingmodel = bagging(formula,data=training) #results of the model summary(baggingmodel) #predicting the labels for remaining data using trained input labelbagging=predict(baggingmodel, labeling) head(labelbagging) #storing the predicted labels in dataframe based on each plane using thier adshex code labelbagging_df <- data.frame(adshex = labeling$adshex, class = labelbagging) #printing the planes summary based on each type typesbagging <- labelbagging_df %>% group_by(class) %>% summarize(count=n()) print(typesbagging) #getting prediction probabilities for the 2 classes #based on probabilities, each type is classified into #surveil or other classes baggingprobs <- as.data.frame(predict(baggingmodel, labeling, type = "prob")) head(baggingprobs) #with the probabilities, labeling the class and storing in a dataframe #in descending order of Surveil class data baggingprobs_df<- bind_cols(as.data.frame(labeling$adshex), baggingprobs) %>% mutate(adshex = labeling$adshex) %>% select(2:4) %>% arrange(desc(surveil)) %>% inner_join(features) %>% select(1:3,squawk_1) #displaying resulting probabilities and storing baggingresults <- head(baggingprobs_df, 1000) head(baggingresults) #joining the results with planes registraion details through adshex baggingresults = left_join(baggingresults,registered, by="adshex") #exporting the resulting data to a csv file write.csv(baggingresults, "baggingresults.csv", na="") #comparison of models: #misclassification rate: mc <- function(obj) 1 - mean(predict(obj) == training$class) trees <- list("RF"=rfmodel,"CART"=cartmodel,"evtree" = evtreemodel, "ctree" = ctreemodel, "C4.5"=c45model,"Bagging"=baggingmodel) round(sapply(trees, function(obj) c("misclassification" = mc(obj))),digits = 3)
aedfd4854d86bf47d0981e946fa7edaf212fa7ad
a0d169245ddf5c247463fe926acb142adc7b485f
/R/widget.R
9a275deb21856ff8a397016951951d90e3e12856
[]
no_license
ggobi/qtbase
87b5f838c3e4171fbefe7a64194d229aea3967a9
16afbafff319b5a6f7bf8d82e6751aee217d181e
refs/heads/master
2021-01-01T20:10:54.612110
2019-03-01T17:13:56
2019-03-01T17:14:36
954,675
16
3
null
2014-10-21T14:15:11
2010-10-01T16:41:44
C++
UTF-8
R
false
false
109
r
widget.R
### Conveniences for widgets print.QWidget <- function(x, ...) { x$show() NextMethod() invisible(x) }
b31882ca97b62eef0f44f948ec9e150a35c9953c
6f10771dc8f681b4731e0816318e6abce938c04a
/Data_Visualisation/Global.R
58875d7beb5857b9df2e8242d5c536b5f1c5cbdf
[]
no_license
sagidavid/Affordability
9205bc8f8c86fde56fea7b7528a4319f90a4f2f1
70b9a18c584a899e6fc440071bf6c3b7e8014819
refs/heads/master
2020-04-01T21:02:22.044222
2018-10-23T12:49:44
2018-10-23T12:49:44
153,634,399
0
1
null
null
null
null
UTF-8
R
false
false
1,149
r
Global.R
library(rgdal) Cities <- readOGR(dsn = "Deploy", layer = "Cities", GDAL1_integer64_policy = TRUE) Filter_Coastal <- readOGR(dsn = "Deploy", layer = "Filter_Coastal", GDAL1_integer64_policy = TRUE) Filter_Inland <- readOGR(dsn = "Deploy", layer = "Filter_Inland", GDAL1_integer64_policy = TRUE) Filter_Rural <- readOGR(dsn = "Deploy", layer = "Filter_Rural", GDAL1_integer64_policy = TRUE) Filter_Urban <- readOGR(dsn = "Deploy", layer = "Filter_Urban", GDAL1_integer64_policy = TRUE) Filter_England <- readOGR(dsn = "Deploy", layer = "Filter_England", GDAL1_integer64_policy = TRUE) Filter_Scotland <- readOGR(dsn = "Deploy", layer = "Filter_Scotland", GDAL1_integer64_policy = TRUE) Filter_Wales <- readOGR(dsn = "Deploy", layer = "Filter_Wales", GDAL1_integer64_policy = TRUE) RentDataPolygons <- readOGR(dsn = "Deploy", layer = "RentDataPolygons", GDAL1_integer64_policy = TRUE) BRMA_BoundingTable <- read.csv(file ="Deploy/BRMA_BoundingTable.csv", header = TRUE, sep = ",") RentDataTable <- read.csv(file ="Deploy/RentDataTable.csv", header = TRUE, sep = ",") summaryTable <- read.csv(file ="Deploy/summaryTable.csv", header = TRUE, sep = ",")
ac78957f54219da3c2c92d82a6df27184a45feea
adac781cc6578798e356c2ec594fad7d45bca9ab
/man/predict.enetLTS.Rd
1f714d4ff0c718131449688f7d0fb8e205d80fda
[]
no_license
VincentWtrs/enetLTS
db66a581dc2cdc4379ba8f64bfcb23c3ab886cdc
bef548043a51c17e3bd593049e0e9fce6d03328e
refs/heads/master
2021-12-26T11:33:34.270040
2019-03-26T13:47:59
2019-03-26T13:47:59
177,792,321
0
0
null
2019-03-26T13:18:26
2019-03-26T13:18:26
null
UTF-8
R
false
false
5,209
rd
predict.enetLTS.Rd
\name{predict.enetLTS} \alias{predict.enetLTS} %------------------------------------------------- \title{ make predictions from the \code{"enetLTS"} object. } %------------------------------------------------- \description{ Similar to other predict methods, this function predicts fitted values, logits, coefficients and nonzero coefficients from a fitted \code{"enetLTS"} object. } %------------------------------------------------- \usage{ \method{predict}{enetLTS}(object,newX,vers=c("reweighted","raw","both"), type=c("response","coefficients","nonzero","class"),...) } %------------------------------------ \arguments{ \item{object}{the model fit from which to make predictions.} \item{newX}{new values for the predictor matrix \code{X}. Must be a matrix; can be sparse as in \code{Matrix} package. This argument is not used for \code{type=c("coefficients","nonzero")}.} \item{vers}{a character string denoting which fit to use for the predictions. Possible values are \code{"reweighted"} (the default) for predicting values from the reweighted fit, \code{"raw"} for predicting values from the raw fit, or \code{"both"} for predicting values from both fits.} \item{type}{type of prediction required. \code{type="response"} gives the fitted probabilities for \code{"binomial"} and gives the fitted values for \code{"gaussian"}. \code{type="coefficients"} computes the coefficients from the fitted model. \code{type="nonzero"} returns a list of the indices of the nonzero coefficients. \code{type="class"} is available only for \code{"binomial"} model, and produces the class label corresponding to the maximum probability.} \item{\dots}{additional arguments from the \code{enetLTS} object if needed.} } %------------------------------------------------- \details{ The \code{newdata} argument defaults to the matrix of predictors used to fit the model such that the fitted values are computed. \code{coef.enetLTS(...)} is equivalent to \code{predict.enetLTS(object,newX,type="coefficients",...)}, where newX argument is the matrix as in \code{enetLTS}. } %------------------------------------------------- \value{ The requested predicted values are returned. } %------------------------------------------------- \seealso{ \code{\link{enetLTS}}, \code{\link{coef.enetLTS}}, \code{\link{nonzeroCoef.enetLTS}} } %------------------------------------------------- \examples{ ## for gaussian set.seed(86) n <- 100; p <- 25 # number of observations and variables beta <- rep(0,p); beta[1:6] <- 1 # 10\% nonzero coefficients sigma <- 0.5 # controls signal-to-noise ratio x <- matrix(rnorm(n*p, sigma),nrow=n) e <- rnorm(n,0,1) # error terms eps <- 0.1 # contamination level m <- ceiling(eps*n) # observations to be contaminated eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers yout <- c(x \%*\% beta + sigma * eout) # response xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points \donttest{ fit1 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE) predict(fit1,newX=xout) predict(fit1,newX=xout,type="coefficients",vers="both") predict(fit1,newX=xout,type="nonzero",vers="raw") # provide new X matrix newX <- matrix(rnorm(n*p, sigma),nrow=n) predict(fit1,newX=newX,type="response",vers="both") predict(fit1,newX=newX,type="coefficients") predict(fit1,newX=newX,type="nonzero",vers="both")} ## for binomial eps <-0.05 # \%10 contamination to only class 0 m <- ceiling(eps*n) y <- sample(0:1,n,replace=TRUE) xout <- x xout[y==0,][1:m,] <- xout[1:m,] + 10; # class 0 yout <- y # wrong classification for vertical outliers \dontshow{ set.seed(86) n <- 5; p <- 15 beta <- rep(0,p); beta[1:6] <- 1 sigma <- 0.5 x <- matrix(rnorm(n*p, sigma),nrow=n) e <- rnorm(n,0,1) # error terms eps <- 0.1 # contamination level m <- ceiling(eps*n) # observations to be contaminated eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers yout <- c(x \%*\% beta + sigma * eout) # response xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points fit2 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE) predict(fit2,newX=xout) } \donttest{ fit2 <- enetLTS(xout,yout,family="binomial",alphas=0.5,lambdas=0.05,plot=FALSE) predict(fit2,newX=xout) predict(fit2,newX=xout,type="coefficients",vers="both") predict(fit2,newX=xout,type="nonzero",vers="raw") predict(fit2,newX=newX,type="class",vers="both") predict(fit2,newX=newX,type="coefficients",vers="raw") predict(fit2,newX=newX,type="nonzero",vers="both")} } %------------------------------------------------- \author{ Fatma Sevinc KURNAZ, Irene HOFFMANN, Peter FILZMOSER \cr Maintainer: Fatma Sevinc KURNAZ <fatmasevinckurnaz@gmail.com>;<fskurnaz@yildiz.edu.tr>} %------------------------------------------------- \keyword{regression} \keyword{classification}
04fcb8af2b8b48ccc944fac7e3ecb6b3b7fae3c8
c0fd5a7cebb44b61640f8550b82a3e5fe4e37d7e
/cachematrix.R
dcc01dc3a4cac527d4f931e9945368655525ad55
[]
no_license
Diceman01/ProgrammingAssignment2
1cb91c4fb8b2d282c5912fbdc7ba4310e36a5c44
447e57cd8089cd251530e67eb8c0a501c90dcc53
refs/heads/master
2021-01-09T05:34:56.382290
2015-02-15T05:28:06
2015-02-15T05:28:06
30,817,368
0
0
null
2015-02-15T04:04:38
2015-02-15T04:04:38
null
UTF-8
R
false
false
2,865
r
cachematrix.R
## since the only difference between a vector and a matrix are the number of dimensions, ## I reason that the same code should work. So, I have basically just copied the example ## given verbatim, except I changed m to i, mean to inverse and, in cacheSolve, I changed the ## call to the mean function to be a call to the solve function. ## like the example, there are four nested/child function: get, set, getinverse, setinverse. ## i define each child function in turn, and then return a list of all four functions to the ## calling function . this function is usefully thought of as a constructor function. makeCacheMatrix <- function(x = matrix()) { ## set i to NULL, since it hasn't been used before. i <- NULL ## define the set function. get the new matrix and clear any cached inverse. set <- function(y) { x <<- y i <<- NULL } ## define the get function. just return the matrix (non-inverted) to the caller. get <- function() x ## define setinverse. the caller has already calculated the inverse of x and is ## giving it to us, so all that needs doing is to save the value setinverse <- function(inverse) i <<- inverse ## define getinverse. the caller wants to see what is cached for the inverse. ## just return whatever is stored. if it's a NULL, it's up to the caller to ## calculate the inverse and then store it later using setinverse. getinverse <- function () i ## return the list of functions to the caller list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## so cacheSolve is the way that regular folks are going to ask to solve the cacheMatrix. ## cacheSolve works by first seeing if an inverse has been cached by the cacheMatrix. if ## not, then it calculates the inverse (using regular solve) and saves its result to the ## cacheMatrix. it then returns the same value (the calculated inverse) to the calling ## function. cacheSolve <- function(x, ...) { ## check to see if x has an inverse i <- x$getinverse() ## if it has an inverse, tell the user you're using cached data and return the ## cache contents if(!is.null(i)) { message("getting cached data") return(i) ## end of function execution if a cache was found } ## if we're here, then there was no cache found. time to build one. start by ## getting the non-inverted matrix. data <- x$get() ## now, invert it. i <- solve(data) ## don't forget to cache that inverse you worked so hard for. x$setinverse(i) ## tell the user what she got i }
7cf0dbaac5b2c4469ef5711285c0d7cce57e4a54
7b47cf68919d2f6592a185298bc951397cd2279d
/man/Plot.Rd
f790491d7f039380cb83d4a212a754d53769a472
[]
no_license
d3v3l0/xda
16e808b4cf30426f6e8abd75f2af61c3e5dc1b92
86cf14dbfaa96b805a702261e2b078052ccbab70
refs/heads/master
2021-09-04T15:43:02.930410
2018-01-20T00:48:44
2018-01-20T00:48:44
null
0
0
null
null
null
null
UTF-8
R
false
false
623
rd
Plot.Rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/Plot.R \name{Plot} \alias{Plot} \title{Plots all variables of a data frame against the specified dependant variable} \usage{ Plot(df, dep.var, range = "all") } \arguments{ \item{df}{name of the data frame} \item{dep.var}{name the dependant variable} \item{range}{specify which variables to plot using numeric range (default is 'all' which plots all variables)} } \value{ returns multiple plots } \description{ Plots all variables of a data frame against the specified dependant variable } \examples{ data(iris) Plot(iris,'Species') }
2aa1d798840632e6ab20bedf43715e2545cbd0d6
042de20ce89f4ee0781d2cc6fb4e5aed44260049
/Books_Apriori1.R
bbb63b9367f718e6cab0a83af8184c9a2ba6335c
[]
no_license
neeraj2296/Asscociation-Rules-ExcelR
ea6c25081cc42e59adcd285a564306742fc7a6dc
23a3b63f47060873dedc0f1d13fe0bd9c7a4ebe1
refs/heads/master
2022-07-05T23:28:45.645248
2020-05-15T17:15:37
2020-05-15T17:15:37
262,978,331
1
0
null
null
null
null
UTF-8
R
false
false
2,185
r
Books_Apriori1.R
#Including the necassary Libraries library(arules) library(arulesViz) #Loading the data set book <- read.csv(file.choose()) #Factorising the data for better classification book$ChildBks<-as.factor(book$ChildBks) book$YouthBks<-as.factor(book$YouthBks) book$CookBks<-as.factor(book$CookBks) book$DoItYBks<-as.factor(book$DoItYBks) book$RefBks<-as.factor(book$RefBks) book$ArtBks<-as.factor(book$ArtBks) book$GeogBks<-as.factor(book$GeogBks) book$ItalCook<-as.factor(book$ItalCook) book$ItalAtlas<-as.factor(book$ItalAtlas) book$ItalArt<-as.factor(book$ItalArt) book$Florence<-as.factor(book$Florence) #rules$conf str(book) #Applying the Apriori Alogithm and figuring out rules rules = apriori(book) arules::inspect(rule) rules.sorted<-sort(rules, by = 'lift') arules::inspect(rules) #Visualising the rules plot(rules) summary(book) # rules with rhs containing CookBks only rules = apriori(book,parameter = list(minlen = 1,supp = 0.11,conf = 0.5),appearance = list(rhs = "CookBks=1")) plot(rules, jitter = 0) #Summarised view f rules with rhs having CookBks only summary(rules) arules::inspect(rules) ?apriori inspect(rules) # rules with rhs containing CookBks only & with all others taken as bought( i.e. 1) rules = apriori(book,parameter = list(minlen = 1,supp = 0.1,conf = 0.3),appearance = list(rhs = c("CookBks=1"),lhs = c("RefBks=1","ArtBks=1","YouthBks=1","GeogBks=1","DoItYBks=1","ChildBks=1","ItalArt=1","ItalAtlas=1","ItalCook=1"), default ="none")) plot(rules, jitter = 0) #plot(rules,method='grouped') #plot(rules,method = 'graph',control = list(type='items')) summary(rules) arules::inspect(rules) ?apriori summary(rules) inspect(rules) #Finding Redundant Rules. subset.matrix<-is.subset(rules,rules, sparse = FALSE) #subset.matrix <- is.subset(rules.sorted, rules.sorted, sparse = FALSE) subset.matrix[lower.tri(subset.matrix,diag = T)]<-NA redundant<-colSums(subset.matrix,na.rm = T)>=1.65 which(redundant) #Removing Redundant Rules rules.pruned<-rules[!redundant] rules.pruned<-sort(rules.pruned, by='lift') inspect(rules.pruned) plot(rules.pruned) plot(rules.pruned, method = 'grouped')
9107e541cc2c15faa4623322f5160adcb71d2c8f
52694abcc9168ef0ffcd6a428382102c521278f8
/SKRYPTY/MODELING/scripts/fcu/scripts/dist_alert_check.R
79eec7ef0fe56397d6a4bef520fa8d557234f0b7
[]
no_license
MMandziej/magisterka
7d18fa0e1a9a437c4235a912aa0733530fb45e3f
33453fbbd7ede2e5ccb2771e8a3029a927a844c5
refs/heads/master
2023-02-13T04:44:41.581638
2021-01-21T23:37:26
2021-01-21T23:37:26
322,721,319
0
0
null
null
null
null
UTF-8
R
false
false
3,569
r
dist_alert_check.R
dist_alert_check <- function(results, dataset, time_feat='BackupTime', time_frame='days') { unique_intervals <- sort(unique(results$pqc_timestamp)) mean_lastn <- mean(results[results$pqc_timestamp == unique_intervals[length(unique_intervals)], ][['pqc']]) quants <- quantile(results[results$pqc_timestamp < unique_intervals[length(unique_intervals)], ][['pqc']], probs=c(0.25, 0.75)) if(mean_lastn > quants[1] & mean_lastn < quants[2] ) { alert = HTML('<font color=\"#2ab860\"><b>Average score from current scoring within IQR on full production backlog.</b></font>') } else { alert = HTML('<font color=\"#db0000\"><b>Average score from current scoring outside IQR on full production backlog.</b></font>') } features_intervals <- sort(unique(dataset$BackupTime)) features <- c( "TLAssignedName", "ProcessingUnit", "CDDRiskLevel", "FATCA", "CRS", "ScreenedParties", "OwnershipLayers", "ESR", "PartyType", "GroupCases", "FirstGroupCase", "PopulationMatch", "HourNumeric", "Weekday", "Cases_last_5_days_of_DR", "Cases_last_5_days_of_PC", "Cases_last_30_days_of_DR", "Cases_last_30_days_of_PC", "Minor_last_5_checklistsDR", "Major_last_5_checklistsDR", "Critical_last_5_checklistsDR", "Minor_last_10_checklistsDR", "Major_last_10_checklistsDR", "Critical_last_10_checklistsDR", "Minor_last_5_checklistsPC", "Major_last_5_checklistsPC", "Critical_last_5_checklistsPC", "Minor_last_10_checklistsPC", "Major_last_10_checklistsPC", "Critical_last_10_checklistsPC", "ProjectExperience", "TeamExperience") count_diff = 0 count_viol = 0 for(i in features) { decision <- tryCatch( { dist_check_new(dataset = general_data, time_feat = 'BackupTime', feature = i, time_frame = 'days', #weeks/days selected_time = as.POSIXct(features_intervals[length(features_intervals)]), # as.POSIXct selected_lag_time = as.POSIXct(features_intervals[length(features_intervals)-1])) # as.POSIXct }, error=function(cond) { #message(cond) count_viol = count_viol + 1 return("Statistical assumptions for Chi-Squared homogenity tests were violated") }) if (grepl("Statistically significant difference between", decision) == T) { count_diff = count_diff + 1 } else if (grepl("assumptions for Chi-Squared homogenity tests were violated", decision) == T) { count_viol = count_viol + 1 } } if(count_viol > 0.2 * length(features)) { out1 <- HTML(paste("<font color=\"#eb9234\"><b>Chi-square / Kolmogorov Smirnovow test assumptions violated for ", count_viol, " out of ", length(features), " features in current and lagged period.</b></font>")) } else { #out1 <- HTML("<font color=\"#2ab860\"><b>Chi-square / Kolmogorov Smirnovow tests assumptions satisfied.</b></font>") out1 <- "" } if(count_diff > 0.2 * length(features)) { if(out1 == "") { out2 <- HTML(paste("<font color=\"#db0000\"><b>Detected statistically significant difffenrce in distribution for ", count_diff, " out of ", length(features), " features.</b></font>")) } else { out2 <- HTML("<font color=\"#2ab860\"><b>Features distribution stable between current and lagged periods.</b></font>") } } else { out2 <- "" } valuelist <- list(out1, out2, alert) return(valuelist) }
c0afdc757b830e7f7ab3b4e493f2491a1eaf6a6e
151db985686c4db43f0b910b1b17694a62be5f00
/man/peak_type-set.Rd
ece85505fb77b7891fe9b3a1f594bf52862c49d7
[]
no_license
SPKorhonen/rnmrfit
e58c331ccaa707a9666735cb6a8b5cd124c73e0e
6bcd1b815924cf042e4abb51d8079f7b6ede6b7a
refs/heads/master
2021-02-21T22:53:15.977807
2019-05-15T14:07:56
2019-05-15T14:07:56
null
0
0
null
null
null
null
UTF-8
R
false
true
753
rd
peak_type-set.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NMRScaffold.R, R/NMRScaffold1D.R, R/NMRFit1D.R \docType{methods} \name{peak_type-set} \alias{peak_type-set} \alias{peak_type<-,NMRScaffold-method} \alias{peak_type<-,NMRScaffold1D-method} \alias{peak_type<-,NMRFit1D-method} \title{Replace the "peak_type" slot of an NMRScaffold object} \usage{ peak_type(object) <- value \S4method{peak_type}{NMRScaffold}(object) <- value \S4method{peak_type}{NMRScaffold1D}(object) <- value \S4method{peak_type}{NMRFit1D}(object) <- value } \description{ Generic method to replace the peak_type of NMRScaffold1D or NMRScaffold2D object. This is a convenience function that makes some assumptions, see set_peak_type() for more details. }
b7c790e474d8dfcecf9d8ba19861cf437c4ae0af
34658f9b94484ac01746700eab7935cc2299e2da
/plot2.R
0bef2b28a574f751a4f65e7651cf25fa25cbc597
[]
no_license
mshonman/ExData_Plotting1
f47c377f1c81b8ca9d8f18f6cd3b0218773eea1a
c789b755afef262cdc552f968fe715cca3119dd3
refs/heads/master
2020-12-11T09:08:18.162882
2016-03-07T22:31:00
2016-03-07T22:31:00
53,270,460
0
0
null
2016-03-06T18:51:10
2016-03-06T18:51:09
null
UTF-8
R
false
false
491
r
plot2.R
setwd("data") power <- read.table(file = "household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?") power$Date <- as.Date(power$Date, format = "%d/%m/%Y") power <- power[(power$Date == "2007-02-01" | power$Date == "2007-02-02"), ] power$datetime <- as.POSIXct(paste(power$Date, power$Time), format = "%Y-%m-%d %H:%M:%S") png(filename = "plot2.png") plot(power$Global_active_power ~ power$datetime, type="l", xlab = "", ylab = "Global Active Power (kilowatts)") dev.off()
9395489bbe43787717a5fac7ba0d2020fbd388d7
d43b33efc250140edd1c59a1050ef587921f49e6
/man/analysis-methods.Rd
f3725cb72a04bc1fa4f118695e9cf02409476b2f
[]
no_license
nealrichardson/rcrunch
0ab449afb433e29771d82d9ce493a4c324fe7cba
6cfa14655a05d80baa793e204f22b9388243bfc0
refs/heads/main
2023-02-11T03:16:07.684934
2020-09-30T17:22:12
2020-09-30T17:22:12
328,219,695
0
0
null
2021-01-09T18:25:03
2021-01-09T18:25:02
null
UTF-8
R
false
true
2,838
rd
analysis-methods.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/decks.R, R/slides.R \name{filter} \alias{filter} \alias{filter<-} \alias{filter<-,CrunchDeck,ANY-method} \alias{analyses} \alias{analysis} \alias{analysis<-} \alias{query<-} \alias{cube} \alias{cubes} \alias{analyses,CrunchSlide-method} \alias{analysis,CrunchSlide-method} \alias{analysis<-,CrunchSlide,formula-method} \alias{analysis<-,CrunchSlide,Analysis-method} \alias{filter,CrunchSlide-method} \alias{filter<-,CrunchSlide,ANY-method} \alias{query<-,CrunchSlide,ANY-method} \alias{cubes,CrunchSlide-method} \alias{cube,CrunchSlide-method} \alias{cubes,AnalysisCatalog-method} \alias{query<-,Analysis,formula-method} \alias{cube,Analysis-method} \alias{filter,Analysis-method} \alias{filter,ANY-method} \alias{filter<-,Analysis,CrunchLogicalExpr-method} \alias{filter<-,Analysis,CrunchFilter-method} \alias{filter<-,Analysis,NULL-method} \title{Get and set slide analyses} \usage{ filter(x, ...) filter(x) <- value \S4method{filter}{CrunchDeck,ANY}(x) <- value analyses(x) analysis(x) analysis(x) <- value query(x) <- value cube(x) cubes(x) \S4method{analyses}{CrunchSlide}(x) \S4method{analysis}{CrunchSlide}(x) \S4method{analysis}{CrunchSlide,formula}(x) <- value \S4method{analysis}{CrunchSlide,Analysis}(x) <- value \S4method{filter}{CrunchSlide}(x, ...) \S4method{filter}{CrunchSlide,ANY}(x) <- value \S4method{query}{CrunchSlide,ANY}(x) <- value \S4method{cubes}{CrunchSlide}(x) \S4method{cube}{CrunchSlide}(x) \S4method{cubes}{AnalysisCatalog}(x) \S4method{query}{Analysis,formula}(x) <- value \S4method{cube}{Analysis}(x) \S4method{filter}{Analysis}(x, ...) \S4method{filter}{ANY}(x, ...) \S4method{filter}{CrunchSlide,ANY}(x) <- value \S4method{filter}{Analysis,CrunchLogicalExpr}(x) <- value \S4method{filter}{Analysis,CrunchFilter}(x) <- value \S4method{filter}{Analysis,`NULL`}(x) <- value } \arguments{ \item{x}{a \code{CrunchSlide}, \code{AnalysisCatalog}, or \code{Analysis}} \item{...}{ignored} \item{value}{for the setter, a query} } \value{ an \code{AnalysisCatalog}, \code{Analysis}, \code{Cube}, or \code{Filter} } \description{ Slides are composed of analyses, which are effectively \code{CrunchCubes} with some additional metadata. You can get and set a slide's Analysis Catalog with the \code{analyses} method, and access an individual analysis with \code{analysis}. } \details{ You can get the \code{CrunchCube} from a slide or analysis with the \code{cube} method and from a \code{CrunchDeck} with \code{cubes}. Analyses can be changed by assigning a formula into the \code{query} function. } \examples{ \dontrun{ analysis(slide) cube(slide) cubes(deck) query(slide) <- ~ cyl + wt filter(slide) filter(slide) <- NULL # to remove a filter filter(slide) <- filters(ds)[["My filter"]] } }
ea72371a817e72a95f19dbdaf6d8c2304294693b
9e1a1205a77b27a9ce8c607734ae0d63acdee1fe
/cachematrix.R
3fa3fd6b3f07370341e86daee58230e6b05c0756
[]
no_license
ccharles/ProgrammingAssignment2
e4d34d0281984af49824ed53749b837e3e2456fe
d9380932e4deb9b187ca6bbcef327aff3564c99f
refs/heads/master
2021-01-14T14:22:55.124471
2014-12-03T00:39:58
2014-12-03T00:39:58
null
0
0
null
null
null
null
UTF-8
R
false
false
2,922
r
cachematrix.R
# # This package defines "cache matrices", matrix-like objects that wrap around # a native R matrix and can cache their computed inverse. # # The cacheSolve() function defined below should be used in place of solve() # when working with cache matrices. # # --------------------------------------------------------------------------- # # Functions in this package are documented with comments *inside the # function definition*, as suggested in Google's R style guide: # # https://google-styleguide.googlecode.com/svn/trunk/Rguide.xml # makeCacheMatrix <- function(x = matrix()) { # Create a cache matrix, wrapping around the passed R matrix. # # Args: # x: The native R matrix that should be wrapped. # # Returns: # A list of functions that can be used to manipulate the cache matrix: # get(), set(), getInverse(), and setInverse(). cachedInverse <- NULL set <- function(y) { # Set the R matrix wrapped by this object. # # Args: # y: The new R matrix that should be wrapped. # # Returns: # NULL x <<- y cachedInverse <<- NULL } get <- function() { # Get the R matrix wrapped by this object. # # Args: # None # # Returns: # The native R matrix wrapped by this object. x } setInverse <- function(inverse) { # Set this object's cached inverse. # # Args: # The inverse to cache. # # Returns: # The inverse matrix. cachedInverse <<- inverse } getInverse <- function() { # Get this object's cached inverse. # # Args: # None # # Returns: # The cached inverse matrix, or NULL. cachedInverse } # Return references to the four functions defined above so they can be # called externally. list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } cacheSolve <- function(x, ...) { # Get the inverse of a cache matrix, using the cached value if available, # and computing and caching it if not. # # Args: # x: The cache matrix whose inverse should be computed. # ...: Extra arguments to be passed directly into solve(). # # Returns: # The inverse matrix of x. Raises an error if the inverse does not # exist. # Get the cached inverse... inverse <- x$getInverse() # ...and return it if it's not null. if (!is.null(inverse)) { message("Using cached data") return(inverse) } # If we get this far there was no cached inverse, so let's compute it # using the regular solve() function... mtrx <- x$get() inverse <- solve(mtrx, ...) # ...and cache it for next time. x$setInverse(inverse) inverse }
89bd981161f26bc72b5972dd555d9d35c4fc5414
154f590295a74e1ca8cdde49ecbb9cbb0992147e
/man/dh20.Rd
38a37d339d4433eef13b67575c07026761a986be
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-public-domain-disclaimer", "CC0-1.0" ]
permissive
klingerf2/EflowStats
2e57df72e154581de2df3d5de3ebd94c3da0dedf
73891ea7da73a274227212a2ca829084149a2906
refs/heads/master
2017-12-07T10:47:25.943426
2016-12-28T20:52:42
2016-12-28T20:52:42
null
0
0
null
null
null
null
UTF-8
R
false
true
876
rd
dh20.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dh20.R \name{dh20} \alias{dh20} \title{Function to return the DH20 hydrologic indicator statistic for a given data frame} \usage{ dh20(qfiletempf) } \arguments{ \item{qfiletempf}{data frame containing a "discharge" column containing daily flow values} } \value{ dh20 list containing DH20 for the given data frame } \description{ This function accepts a data frame that contains a column named "discharge" and calculates DH20; High flow duration. Compute the 75th percentile value for the entire flow record. Compute the average duration of flow events with flows above a threshold equal to the 75th percentile value for the median annual flows. DH20 is the average (or median-Use Preference option) duration of the events (days-temporal). } \examples{ qfiletempf<-sampleData dh20(qfiletempf) }
3d6e8f41bbbc8c7ee4dc3ba1c454b2e7a8ef05ba
1fe137d59453126db52cbdc39ac68c5c6c3719fa
/Boxplot_nb.R
df356acfa3edd67d2a601d62364c5d9d6a7caa30
[]
no_license
nbumkim/R_import
672325d8d5922e0357f30215cfb245df4b1fc1aa
c5a27bbbe9dad01cb99c78c60e0635922dd6d562
refs/heads/master
2020-03-21T12:38:50.856743
2018-08-03T09:25:21
2018-08-03T09:25:21
138,564,319
0
0
null
null
null
null
UHC
R
false
false
1,661
r
Boxplot_nb.R
## Data import ########################### # setwd("D:/kmong/Pointcc/R") # mydata <- read.csv("type_R.csv", header = TRUE, stringsAsFactors = T) # read the csv data # mydata <- mydata[complete.cases(mydata),] # > head(mydata) # 배열타입 각도 측면부 대상종 소요시간 이동거리 # 1 A 45 개방 무당개구리 43 54 # 2 A 45 개방 무당개구리 78 40 # 3 A 45 개방 무당개구리 24 20 # 4 A 45 개방 무당개구리 24 70 # 5 A 45 개방 무당개구리 164 14 # 6 A 45 개방 무당개구리 44 55 ## Color setting library(RColorBrewer) attach(mydata) par(cex = 1, cex.main = 1.5, ps =14, cex.lab = 2) # label size par(mar=c(4.1, 4.1, 4.1, 9.1), xpd=TRUE) # Graph margin size e.g., right side legend cols <- rainbow(3, s=0.7, v=1.0, alpha=0.5) # color set #brewer.pal(n = 1, name = "Set1") boxplot(소요시간 ~ 대상종+배열타입, main = "소요시간", outline = TRUE, las = 1, at = c(1:3, 5:7, 9:11), col=cols, names = c("", "A", "", "", "B", "","", "C", ""), xlab = " ", ylab = "", #xaxs = FALSE, ylim=c(0, max(소요시간)+20) ) stripchart(소요시간 ~ 대상종+배열타입, vertical = TRUE, method = "jitter", pch = 21, col = "maroon", bg = "bisque", at = c(1:3, 5:7, 9:11), add = TRUE) legend("topright", fill = cols, legend = levels(mydata$대상종), horiz = F, bty ="n", inset=c(-0.25,0))
607cc9e4cb76d81963806d987b6d1a4174b7cf1a
1e5fc0d317afb80ae142116af31b8c181e0bd71b
/Course Project 2 - Week 4/plot1.R
8bedf38cc440df5dc91b24bbf4294909954effaa
[]
no_license
anjanaxramesh/Exploratory-Data-Analysis-by-JHU
0f5c5242f71a51c208f39439db78d424b6cf190a
a025e219ae6050a08a6b2c280bba4a9abc60dfca
refs/heads/master
2022-11-18T22:12:32.062867
2020-07-20T17:36:04
2020-07-20T17:36:04
278,869,880
0
0
null
null
null
null
UTF-8
R
false
false
710
r
plot1.R
# Plot 1 path <- getwd() unzip(zipfile = "exdata_data_NEI_data.zip", exdir = path) NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? # Using the base plotting system, make a plot showing the total PM2.5 emission from all sources # for each of the years 1999, 2002, 2005, and 2008. aggregateTotalEmissions <- aggregate(Emissions ~ year, NEI, sum) png("plot1.png") barplot(height = aggregateTotalEmissions$Emissions, names.arg = aggregateTotalEmissions$year, width = 1, xlab = "Year", ylab = "Total PM2.5 Emission", main = "Total PM2.5 Emissions Over Various Years") dev.off()
6a2965f16bec2754e283751633e96e4d4ee1245c
c66ba8cdf2085e958bed5c9f6fc2d01993c3dd7d
/R/lvclpm.R
de776d81e0ea7ebdac3e0544b0478589069a2fbd
[]
no_license
mkearney/ijpp_osror
40d3ace6aa445d367a90b02d55cf064cf81e7d72
050c287c818c619982aae553b23af71a74d2b99b
refs/heads/master
2021-01-21T22:14:51.217323
2019-04-25T16:02:55
2019-04-25T16:02:55
102,139,225
1
0
null
null
null
null
UTF-8
R
false
false
12,997
r
lvclpm.R
names(Pnl3) Pnl3$PressA <- (Pnl3$PrintA + Pnl3$DigA + Pnl3$CableA)/3 Pnl3$PressB <- (Pnl3$PrintB + Pnl3$DigB + Pnl3$CableB)/3 Pnl3$PressC <- (Pnl3$PrintC + Pnl3$DigC + Pnl3$CableC)/3 describe(Pnl3$PressA) describe(Pnl3$PressB) describe(Pnl3$PressC) Pnl3$PTA <- (Pnl3$PT1a + Pnl3$PT2a + Pnl3$PT3a)/3 Pnl3$PTB <- (Pnl3$PT1b + Pnl3$PT2b + Pnl3$PT3b)/3 Pnl3$PTC <- (Pnl3$PT1c + Pnl3$PT2c + Pnl3$PT3c)/3 describe(Pnl3$PTA) describe(Pnl3$PTB) describe(Pnl3$PTC) #Describe Sample# describe(Pnl3$Age) table(Pnl3$Gender) table(Pnl3$Party7) table(Pnl3$Race) table(Pnl3$BA) table(Pnl3$Edu) table(Pnl3$Inc) table(Pnl3$Ideol) table(Pnl3$PrtSt) describe(Pnl3$PI1a) describe(Pnl3$Age) table(AugR$Gender) table(AugR$Party7) table(Pnl3$Race) table(AugR$BA) table(Pnl3$Edu) table(AugR$Inc) table(Pnl3$Ideol) table(Pnl3$PrtSt) describe(AugR$Party7) describe(AugR$Ideol) describe(AugR$PI_1) describe(Pnl3$Age) table(Pnl$Gender) table(AugR$Party7) table(Pnl$Race) table(Pnl$BA) table(Pnl$Edu) table(Pnl$Inc) table(Pnl3$Ideol) table(Pnl3$PrtSt) describe(Pnl$Party7) describe(Pnl$Ideol) describe(Pnl$PI_1a) #Random Variable Working# Pnl3$Female <- recode(Pnl3$Gender, "1=0;2=1") Pnl3$AfAm <- recode(Pnl3$Race, "1=1;2=0;3=0;4=0;5=0;6=0;7=0;8=0;9=0") Pnl3$Asian <- recode(Pnl3$Race, "1=0;2=0;3=1;4=0;5=0;6=0;7=0;8=0;9=0") Pnl3$Hisp <- recode(Pnl3$Race, "1=0;2=0;3=0;4=0;5=0;6=0;7=0;8=1;9=0") Pnl3$OthRace <- recode(Pnl3$Race, "1=0;2=0;3=0;4=0;5=1;6=1;7=1;8=1;9=0") Pnl3$BA <- recode(Pnl3$BA, "1=1;2=0") Pnl3$Inc <- recode(Pnl3$Inc, "1=1;2=2;3=3;4=4;5=5;6=6;7=7;8=8;9=9;10=NA") Pnl3$PrtSt <- recode(Pnl3$Party7, "1=3;2=2;3=1;4=0;5=1;6=2;7=3") Pnl3$Party3 <- recode(Pnl3$Party7, "1=1;2=1;3=1;4=0;5=2;6=2;7=2") Pnl3$PartyTie <- recode(Pnl3$PartyTie, "1=1;2=2;NA=0") table(Pnl3$Party3) table(Pnl3$PartyTie) Pnl3$Party2 <- Pnl3$Party3 + Pnl3$PartyTie table(Pnl3$Party2) Pnl3$Dem <- recode(Pnl3$Party2, "1=1;2=0") Pnl3$Rep <- recode(Pnl3$Party2, "1=0;2=1") Pnl3$OGftA1 <- Pnl3$FT_3*Pnl3$Rep Pnl3$OGftA2 <- Pnl3$FT_6*Pnl3$Dem Pnl3$OGftA <- Pnl3$OGftA1 + Pnl3$OGftA2 describe(Pnl3$OGftA) table(Pnl3$OGftA) #Create Press Variables# Pnl3$PrintA <- (Pnl3$NwsPprLa + Pnl3$NwsPprCa)/2 Pnl3$DigA <- (Pnl3$ConBloga + Pnl3$LibBloga + Pnl3$OnLinea)/3 Pnl3$CableA <- (Pnl3$Foxa + Pnl3$MSNBCa + Pnl3$CNNa)/3 Pnl3$PrintB <- (Pnl3$NwsPprLb + Pnl3$NwsPprCb)/2 Pnl3$DigB <- (Pnl3$ConBlogb + Pnl3$LibBlogb + Pnl3$OnLineb)/3 Pnl3$CableB <- (Pnl3$Foxb + Pnl3$MSNBCb + Pnl3$CNNb)/3 Pnl3$PrintC <- (Pnl3$NwsPprLc + Pnl3$NswPprCc)/2 Pnl3$DigC <- (Pnl3$ConBlogc + Pnl3$LibBlogc + Pnl3$OnLinec)/3 Pnl3$CableC <- (Pnl3$Foxc + Pnl3$MSNBCc + Pnl3$CNNc)/3 ## Null Model ## IJPP0 <- ' ## no change in variances over time, no covariances PrintA ~~ V1*PrintA DigA ~~ V2*DigA CableA ~~ V3*CableA PT1a ~~ V4*PT1a PT2a ~~ V5*PT2a PT3a ~~ V6*PT3a PrintB ~~ V1*PrintB DigB ~~ V2*DigB CableB ~~ V3*CableB PT1b ~~ V4*PT1b PT2b ~~ V5*PT2b PT3b ~~ V6*PT3b PrintC ~~ V1*PrintC DigC ~~ V2*DigC CableC ~~ V3*CableC PT1c ~~ V4*PT1c PT2c ~~ V5*PT2c PT3c ~~ V6*PT3c ## no change in means over time PrintA ~ T1*1 DigA ~ T2*1 CableA ~ T3*1 PT1a ~ T4*1 PT2a ~ T5*1 PT3a ~ T6*1 PrintB ~ T1*1 DigB ~ T2*1 CableB ~ T3*1 PT1b ~ T4*1 PT2b ~ T5*1 PT3b ~ T6*1 PrintC ~ T1*1 DigC ~ T2*1 CableC ~ T3*1 PT1c ~ T4*1 PT2c ~ T5*1 PT3c ~ T6*1 ' fit0.0 <- lavaan(IJPP0, data=Pnl3, orthogonal=T, missing="fiml", , estimator="MLR") summary(fit0.0, fit=T) ## Configural Invariance ## IJPP0.1 <- ' #define latent variables PressA =~ p11*PrintA + p12*DigA + p13*CableA PressB =~ p21*PrintB + p22*DigB + p23*CableB PressC =~ p31*PrintC +p32* DigC + p33*CableC PTA =~ t11*PT1a + t12*PT2a + t13*PT3a PTB =~ t21*PT1b + t22*PT2b + t23*PT3b PTC =~ t31*PT1c + t32*PT2c + t33*PT3c #residuals PrintA ~~ PrintA PrintB ~~ PrintB PrintC ~~ PrintC DigA ~~ DigA DigB ~~ DigB DigC ~~ DigC CableA ~~ CableA CableB ~~ CableB CableC ~~ CableC PT1a ~~ PT1a PT1b ~~ PT1b PT1c ~~ PT1c PT2a ~~ PT2a PT2b ~~ PT2b PT2c ~~ PT2c PT3a ~~ PT3a PT3b ~~ PT3b PT3c ~~ PT3c #correlated residuals accross time PrintA ~~ PrintB + PrintC DigA ~~ DigB + DigC CableA ~~ CableB + CableC PrintB ~~ PrintC DigB ~~ DigC CableB ~~ CableC PT1a ~~ PT1b + PT1c PT2a ~~ PT2b + PT2c PT3a ~~ PT3b + PT3c PT1b ~~ PT1c PT2b ~~ PT2c PT3b ~~ PT3c #intercepts PrintA ~ P1*1 DigA ~ P2*1 CableA ~ P3*1 PT1a ~ T1*1 PT2a ~ T2*1 PT3a ~ T3*1 PrintB ~ P4*1 DigB ~ P5*1 CableB ~ P6*1 PT1b ~ T4*1 PT2b ~ T5*1 PT3b ~ T6*1 PrintC ~ P7*1 DigC ~ P8*1 CableC ~ P9*1 PT1c ~ T7*1 PT2c ~ T8*1 PT3c ~ T9*1 #latent variances PressA ~~ PressA PressB ~~ PressB PressC ~~ PressC PTA ~~ PTA PTB ~~ PTB PTC ~~ PTC #latent co-variances PressA ~~ PressB + PressC + PTA + PTB + PTC PressB ~~ PressC + PTA + PTB + PTC PressC ~~ PTA + PTB + PTC PTA ~~ PTB + PTC PTB ~~ PTC #latent means PressA ~ 1 PressB ~ 1 PressC ~ 1 PTA ~ 1 PTB ~ 1 PTC ~ 1 #constraints for effects coding p11 == 3 - p12 - p13 t11 == 3 - t12 - t13 p21 == 3 - p22 - p23 t21 == 3 - t22 - t23 p31 == 3 - p32 - p33 t31 == 3 - t32 - t33 P1 == 0 - P2 - P3 T1 == 0 - T2 - T3 P4 == 0 - P5 - P6 T4 == 0 - T5 - T6 P7 == 0 - P8 - P9 T7 == 0 - T8 - T9 ' fit0.1 <- lavaan(IJPP0.1, data=Pnl3, std.lv=F, auto.fix.first=F, missing="fiml", estimator="MLR") summary(fit0.1, standardized=T, fit=T) #Adj CFI = .989 #Adj TLI = .980 ## Loading Invariance ## IJPP0.2 <- ' #define latent variables PressA =~ p11*PrintA + p12*DigA + p13*CableA PressB =~ p11*PrintB + p12*DigB + p13*CableB PressC =~ p11*PrintC +p12* DigC + p13*CableC PTA =~ t11*PT1a + t12*PT2a + t13*PT3a PTB =~ t11*PT1b + t12*PT2b + t13*PT3b PTC =~ t11*PT1c + t12*PT2c + t13*PT3c #residuals PrintA ~~ PrintA PrintB ~~ PrintB PrintC ~~ PrintC DigA ~~ DigA DigB ~~ DigB DigC ~~ DigC CableA ~~ CableA CableB ~~ CableB CableC ~~ CableC PT1a ~~ PT1a PT1b ~~ PT1b PT1c ~~ PT1c PT2a ~~ PT2a PT2b ~~ PT2b PT2c ~~ PT2c PT3a ~~ PT3a PT3b ~~ PT3b PT3c ~~ PT3c #correlated residuals accross time PrintA ~~ PrintB + PrintC DigA ~~ DigB + DigC CableA ~~ CableB + CableC PrintB ~~ PrintC DigB ~~ DigC CableB ~~ CableC PT1a ~~ PT1b + PT1c PT2a ~~ PT2b + PT2c PT3a ~~ PT3b + PT3c PT1b ~~ PT1c PT2b ~~ PT2c PT3b ~~ PT3c #intercepts PrintA ~ P1*1 DigA ~ P2*1 CableA ~ P3*1 PT1a ~ T1*1 PT2a ~ T2*1 PT3a ~ T3*1 PrintB ~ P4*1 DigB ~ P5*1 CableB ~ P6*1 PT1b ~ T4*1 PT2b ~ T5*1 PT3b ~ T6*1 PrintC ~ P7*1 DigC ~ P8*1 CableC ~ P9*1 PT1c ~ T7*1 PT2c ~ T8*1 PT3c ~ T9*1 #latent variances PressA ~~ PressA PressB ~~ PressB PressC ~~ PressC PTA ~~ PTA PTB ~~ PTB PTC ~~ PTC #latent co-variances PressA ~~ PressB + PressC + PTA + PTB + PTC PressB ~~ PressC + PTA + PTB + PTC PressC ~~ PTA + PTB + PTC PTA ~~ PTB + PTC PTB ~~ PTC #latent means PressA ~ 1 PressB ~ 1 PressC ~ 1 PTA ~ 1 PTB ~ 1 PTC ~ 1 #constraints for effects coding p11 == 3 - p12 - p13 t11 == 3 - t12 - t13 P1 == 0 - P2 - P3 T1 == 0 - T2 - T3 P4 == 0 - P5 - P6 T4 == 0 - T5 - T6 P7 == 0 - P8 - P9 T7 == 0 - T8 - T9 ' fit0.2 <- lavaan(IJPP0.2, data=Pnl3, std.lv=F, auto.fix.first=F, missing="fiml", estimator="MLR") summary(fit0.2, standardized=T, fit=T) ## Intercept Invariance ## IJPP0.3 <- ' #define latent variables PressA =~ p11*PrintA + p12*DigA + p13*CableA PressB =~ p11*PrintB + p12*DigB + p13*CableB PressC =~ p11*PrintC +p12* DigC + p13*CableC PTA =~ t11*PT1a + t12*PT2a + t13*PT3a PTB =~ t11*PT1b + t12*PT2b + t13*PT3b PTC =~ t11*PT1c + t12*PT2c + t13*PT3c #residuals PrintA ~~ PrintA PrintB ~~ PrintB PrintC ~~ PrintC DigA ~~ DigA DigB ~~ DigB DigC ~~ DigC CableA ~~ CableA CableB ~~ CableB CableC ~~ CableC PT1a ~~ PT1a PT1b ~~ PT1b PT1c ~~ PT1c PT2a ~~ PT2a PT2b ~~ PT2b PT2c ~~ PT2c PT3a ~~ PT3a PT3b ~~ PT3b PT3c ~~ PT3c #correlated residuals accross time PrintA ~~ PrintB + PrintC DigA ~~ DigB + DigC CableA ~~ CableB + CableC PrintB ~~ PrintC DigB ~~ DigC CableB ~~ CableC PT1a ~~ PT1b + PT1c PT2a ~~ PT2b + PT2c PT3a ~~ PT3b + PT3c PT1b ~~ PT1c PT2b ~~ PT2c PT3b ~~ PT3c #intercepts PrintA ~ P1*1 DigA ~ P2*1 CableA ~ P3*1 PT1a ~ T1*1 PT2a ~ T2*1 PT3a ~ T3*1 PrintB ~ P1*1 DigB ~ P2*1 CableB ~ P3*1 PT1b ~ T1*1 PT2b ~ T2*1 PT3b ~ T3*1 PrintC ~ P1*1 DigC ~ P2*1 CableC ~ P3*1 PT1c ~ T1*1 PT2c ~ T2*1 PT3c ~ T3*1 #latent variances PressA ~~ PressA PressB ~~ PressB PressC ~~ PressC PTA ~~ PTA PTB ~~ PTB PTC ~~ PTC #latent co-variances PressA ~~ PressB + PressC + PTA + PTB + PTC PressB ~~ PressC + PTA + PTB + PTC PressC ~~ PTA + PTB + PTC PTA ~~ PTB + PTC PTB ~~ PTC #latent means PressA ~ 1 PressB ~ 1 PressC ~ 1 PTA ~ 1 PTB ~ 1 PTC ~ 1 #constraints for effects coding p11 == 3 - p12 - p13 t11 == 3 - t12 - t13 P1 == 0 - P2 - P3 T1 == 0 - T2 - T3 ' fit0.3 <- lavaan(IJPP0.3, data=Pnl3, std.lv=F, auto.fix.first=F, missing="fiml", estimator="MLR") summary(fit0.3, standardized=T, fit=T) ## Regressions ## IJPP1.0 <- ' PTC ~ PTB + PressB PressC ~ PressB + PTB PTB ~ PTA + PressA PressB ~ PressA + PTA PressA ~~ PTA PressB ~~ PTB PressC ~~ PTC #define latent variables PressA =~ p11*PrintA + p12*DigA + p13*CableA PressB =~ p11*PrintB + p12*DigB + p13*CableB PressC =~ p11*PrintC +p12* DigC + p13*CableC PTA =~ t11*PT1a + t12*PT2a + t13*PT3a PTB =~ t11*PT1b + t12*PT2b + t13*PT3b PTC =~ t11*PT1c + t12*PT2c + t13*PT3c #residuals PrintA ~~ PrintA PrintB ~~ PrintB PrintC ~~ PrintC DigA ~~ DigA DigB ~~ DigB DigC ~~ DigC CableA ~~ CableA CableB ~~ CableB CableC ~~ CableC PT1a ~~ PT1a PT1b ~~ PT1b PT1c ~~ PT1c PT2a ~~ PT2a PT2b ~~ PT2b PT2c ~~ PT2c PT3a ~~ PT3a PT3b ~~ PT3b PT3c ~~ PT3c #correlated residuals accross time PrintA ~~ PrintB + PrintC DigA ~~ DigB + DigC CableA ~~ CableB + CableC PrintB ~~ PrintC DigB ~~ DigC CableB ~~ CableC PT1a ~~ PT1b + PT1c PT2a ~~ PT2b + PT2c PT3a ~~ PT3b + PT3c PT1b ~~ PT1c PT2b ~~ PT2c PT3b ~~ PT3c #intercepts PrintA ~ P1*1 DigA ~ P2*1 CableA ~ P3*1 PT1a ~ T1*1 PT2a ~ T2*1 PT3a ~ T3*1 PrintB ~ P1*1 DigB ~ P2*1 CableB ~ P3*1 PT1b ~ T1*1 PT2b ~ T2*1 PT3b ~ T3*1 PrintC ~ P1*1 DigC ~ P2*1 CableC ~ P3*1 PT1c ~ T1*1 PT2c ~ T2*1 PT3c ~ T3*1 #latent variances PressA ~~ PressA PressB ~~ PressB PressC ~~ PressC PTA ~~ PTA PTB ~~ PTB PTC ~~ PTC #latent means PressA ~ 1 PressB ~ 1 PressC ~ 1 PTA ~ 1 PTB ~ 1 PTC ~ 1 #constraints for effects coding p11 == 3 - p12 - p13 t11 == 3 - t12 - t13 P1 == 0 - P2 - P3 T1 == 0 - T2 - T3 ' fit1.0 <- lavaan(IJPP1.0, data=Pnl3, std.lv=F, auto.fix.first=F, missing="fiml", estimator="MLR") summary(fit1.0, standardized=T, fit=T) inspect(fit1.0, "modindices") ## Regression Model ## IJPP2.0 <- ' PTC ~ PTB + PressB PressC ~ PressB + PTB PTB ~ PTA + PressA PressB ~ PressA + PTA PressA ~ Age + Female + AfAm + Asian + Hisp + OthRace + Edu + Inc + BA + Ideol + PrtSt + PI1a PTA ~ Age + Female + AfAm + Asian + Hisp + OthRace + Edu + Inc+ BA + Ideol + PrtSt + PI1a PressA ~~ PTA PressB ~~ PTB PressC ~~ PTC #define latent variables PressA =~ p11*PrintA + p12*DigA + p13*CableA PressB =~ p11*PrintB + p12*DigB + p13*CableB PressC =~ p11*PrintC +p12* DigC + p13*CableC PTA =~ t11*PT1a + t12*PT2a + t13*PT3a PTB =~ t11*PT1b + t12*PT2b + t13*PT3b PTC =~ t11*PT1c + t12*PT2c + t13*PT3c #residuals PrintA ~~ PrintA PrintB ~~ PrintB PrintC ~~ PrintC DigA ~~ DigA DigB ~~ DigB DigC ~~ DigC CableA ~~ CableA CableB ~~ CableB CableC ~~ CableC PT1a ~~ PT1a PT1b ~~ PT1b PT1c ~~ PT1c PT2a ~~ PT2a PT2b ~~ PT2b PT2c ~~ PT2c PT3a ~~ PT3a PT3b ~~ PT3b PT3c ~~ PT3c #correlated residuals accross time PrintA ~~ PrintB + PrintC DigA ~~ DigB + DigC CableA ~~ CableB + CableC PrintB ~~ PrintC DigB ~~ DigC CableB ~~ CableC PT1a ~~ PT1b + PT1c PT2a ~~ PT2b + PT2c PT3a ~~ PT3b + PT3c PT1b ~~ PT1c PT2b ~~ PT2c PT3b ~~ PT3c #intercepts PrintA ~ P1*1 DigA ~ P2*1 CableA ~ P3*1 PT1a ~ T1*1 PT2a ~ T2*1 PT3a ~ T3*1 PrintB ~ P1*1 DigB ~ P2*1 CableB ~ P3*1 PT1b ~ T1*1 PT2b ~ T2*1 PT3b ~ T3*1 PrintC ~ P1*1 DigC ~ P2*1 CableC ~ P3*1 PT1c ~ T1*1 PT2c ~ T2*1 PT3c ~ T3*1 #latent variances PressA ~~ PressA PressB ~~ PressB PressC ~~ PressC PTA ~~ PTA PTB ~~ PTB PTC ~~ PTC #latent means PressA ~ 1 PressB ~ 1 PressC ~ 1 PTA ~ 1 PTB ~ 1 PTC ~ 1 #constraints for effects coding p11 == 3 - p12 - p13 t11 == 3 - t12 - t13 P1 == 0 - P2 - P3 T1 == 0 - T2 - T3 ' fit2.0 <- lavaan(IJPP2.0, data=Pnl3, std.lv=F, auto.fix.first=F, missing="fiml", estimator="MLR") summary(fit2.0, standardized=T, fit=T) inspect(fit2.0, "modindices")
4fa94be3da41ab0dc1e1cdc6b9452fb45fe8715d
49a72feeac20b3ae91b2d06f81fa6e0c4070cd75
/R/sphere.R
5c3686aaff227a08c2f643122579ba21530d4326
[]
no_license
euctrl-pru/pruatlas
7056e83dad4a3b080795ebd00105752114bc6c29
6cea86782ba0f7c06dde0e8ecf0042043f9fb833
refs/heads/master
2023-06-25T21:30:46.605603
2023-06-14T16:15:53
2023-06-14T16:15:53
90,118,166
0
0
null
null
null
null
UTF-8
R
false
false
461
r
sphere.R
#' Return the polygon represent the spherical Earth. #' #' @param crs a `proj` projection string #' #' @return A Simple Feature representing the spherical contour of the Earch in the relevant projection. #' @export sphere <- function(crs = pruatlas::pru_laea_proj) { sf::st_graticule(ndiscr = 10000, margin = 10e-6) %>% sf::st_transform(crs = sf::st_crs(crs)) %>% sf::st_convex_hull() %>% sf::st_union() %>% sf::st_sf(geometry = ., name = 'sphere') }
0892678d3baa0caf02748adafdc5a25986376d95
bc2371d173306023c5c766fbbb56b6d5c0de2631
/tests/testthat/test_stopping_conditions.R
2c241ad38f01967066dfa75b913c5da9d41f47a9
[]
no_license
cran/cmaesr
83d63af1701ba8f2b74549638fd5818ac2fa552d
0bbe56aba78455d341fab5aec38eef4b5c9706d2
refs/heads/master
2020-04-06T06:55:04.276297
2016-12-04T16:21:54
2016-12-04T16:21:54
49,934,313
0
0
null
null
null
null
UTF-8
R
false
false
1,522
r
test_stopping_conditions.R
context("CMA-ES stopping conditions") test_that("CMA-ES stopping conditions work fine", { # test everything on sphere function here fn = makeSphereFunction(2L) # stop on maximum iterations reached max.iters = 50L control = list(stop.ons = list(stopOnMaxIters(max.iters))) res = cmaes(fn, control = control, monitor = NULL) expect_true(grepl("iterations", res$message, ignore.case = TRUE)) expect_equal(res$n.iters, max.iters) # test maximum time max.time = 2 control = list(stop.ons = list(stopOnTimeBudget(max.time))) res = cmaes(fn, control = control, monitor = NULL) expect_true(grepl("budget", res$message, ignore.case = TRUE)) expect_true((res$past.time - max.time) < 1) # stop on low gap to optimal params opt.param = getGlobalOptimum(fn)$param control = list(stop.ons = list(stopOnOptParam(as.numeric(opt.param)))) res = cmaes(fn, control = control, monitor = NULL) expect_true(grepl("parameters", res$message, ignore.case = TRUE)) # stop on maximum number of function evaluations max.evals = 100L control = list(stop.ons = list(stopOnMaxEvals(max.evals))) res = cmaes(fn, control = control, monitor = NULL) expect_true(grepl("evaluations", res$message, ignore.case = TRUE)) # stop on low gap to optimal objective function value opt.value = getGlobalOptimum(fn)$value control = list(stop.ons = list(stopOnOptValue(opt.value))) res = cmaes(fn, control = control, monitor = NULL) expect_true(grepl("function value", res$message, ignore.case = TRUE)) })
f1e2d5bd4af28c4faa684e4e90249049a722d14c
8e94f2b785062eb5042a536615a5d5cd204c48cc
/DiamondPrices/ui.R
16df4d31e2dfb1a54d8b196d2da3d41836f70733
[]
no_license
shibashismukherjee/ddp
2c45fb070f6723eb9ca0a01a7dafa34536541ae2
4fca2a9d0faf0ace595d6dba95dc2e12596e7751
refs/heads/master
2021-01-13T02:44:37.495725
2016-12-25T20:49:26
2016-12-25T20:49:26
77,340,565
0
0
null
null
null
null
UTF-8
R
false
false
1,128
r
ui.R
# # This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) shinyUI(fluidPage( # Application title titlePanel("Diamonds Pricing Data Analysis"), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( helpText( h1("Instructions"), p("This is a shiny web application that analyzes Diamond prices by the three properties of diamonds - Calrity, Color and Cut. It uses the Diamond dataset available in the ggplot package. The user can choose one of the properties from the drop down below and the application displays a plot of price against the property chosen.") ), selectInput("var", label = "Select Diamond Property", choices = list("Clarity" = 1, "Color" = 2, "Cut" = 3), selected = 1) ), # Show a plot of the pricing analysis mainPanel( plotOutput("pricePlot") ) ) ))
69a00983fa2cd7534cf699a39be5216021456db4
38f004dc32a78e058728250e965adfda049f7da7
/man/fars_read.Rd
a160a04248dc15ee2bf19f06ec402b782dcf8de2
[]
no_license
RedTent/farsfunctionsJT
40e8209a14c2d7f9962dc997235157c0fd581bec
cbab36bd8d530192dad15644b4c10372ba126706
refs/heads/master
2021-01-01T16:36:39.300693
2018-03-24T10:36:40
2018-03-24T10:36:40
97,868,964
0
0
null
2018-03-24T10:36:41
2017-07-20T18:58:47
R
UTF-8
R
false
true
791
rd
fars_read.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars_functions_package0.1.R \name{fars_read} \alias{fars_read} \title{Read a datafile from the Fatality Analysis Reporting System} \usage{ fars_read(filename) } \arguments{ \item{filename}{A character string with the filename. The file should be in csv-format. Zipped csv-files can also be read} } \value{ Returns a tibble with all the data from the csv file } \description{ This function reads a csv datafile. Though designed speficially to read FARS data it can be used to read all kinds of csv files. It uses the \code{readr::read_csv}) function } \details{ If the file doesn't exists it will throw an error. } \examples{ \dontrun{ fars_read("accident_2015.csv.bz2")) fars_read(make_filename(year=2015)) } }
966896a404c922a574b295c14592ee0edd6dab36
c3d2fb0a02e5eabbd0234860186f246651c9fb39
/R/Archive/R_Coding_Common_Usage/textmining.r
2b9d6678fbeb4d1ed20d38c43da4b8bbce5849f8
[]
no_license
ppbppb001/Snippets
09216417c52af40c947114bc106aee97776674f7
49e254eecd55f5e777d87c3f06c720cb881fb2dd
refs/heads/master
2023-08-14T08:16:07.814866
2023-08-07T11:57:15
2023-08-07T11:57:15
93,360,154
0
1
null
null
null
null
UTF-8
R
false
false
1,796
r
textmining.r
setwd('../projects/r practice/r_coding_common_usage') rm(list=ls()) library(tm) library(SnowballC) library(wordcloud) library(ggplot2) # load files into a Corpus object docs <- Corpus(DirSource("textmining_data")) # inspect a particular document writeLines(as.character(docs[[30]])) # function that replaces a specified character by space toSpace <- content_transformer(function(x, pattern) {return(gsub(pattern, " ", x))}) # use toSpace to eliminate colons and hypens: docs <- tm_map(docs, toSpace, "-") docs <- tm_map(docs, toSpace, ":") # remove punctuation, replace punctuation marks with " " docs <- tm_map(docs, removePunctuations) # transform to lower case docs <- tm_map(docs, content_transformer(tolower)) # remove digits docs <- tm_map(docs, removeNumbers) # remove stopwords such as a, an and the docs <- tm_map(docs, removeWords, stopwords("english")) # stem document, reduce related words to their common root docs <- tm_map(docs, stemDocument) # further clean up docs <- tm_map(docs, content_transformer(gsub), pattern='organiz', replacement='organ') # create document term matrix (DTM), documents by rows and words by columns dtm <- DocumentTermMatrix(docs) # inspect DTM inspect(dtm[1:2, 1000:1005]) # frequency of occurrence of each word freq <- colSums(as.matrix(dtm)) ord <- order(freq, decreasing=TRUE) freq[head(ord)] freq[tail(ord)] # create DTM, include words with length 4~20, words that occur in 3~27 documents dtmr <- DocumentTermMatrix(docs, control=list(wordLengths=c(4, 20), bounds=list(global=c(3, 27)))) # terms that occur at least 100 times findFreqTerms(dtmr, lowfreq=80) findAssocs(dtmr, "project", 0.6) # word cloud wordcloud(names(freqr), freqr, min.freq=70, colors=brewer.pal(6, "Dark2"))
650e056d56982207af371f1a3f6b71c02ba07f75
558cbef99ead5c7712cbeacca5f01afc8ba59922
/Code/FeedForward/FeedForwardTrain.R
9ce58a84f6ecba9de4e28d68659cadde21dfd4e7
[]
no_license
rPromptt/Neural_Networks_R
3ab19c01a5e67bb8ff3cebb3fbe546c750b56fdd
3bb99272f344f4b9f4f4cb41b1c303744f445c8f
refs/heads/master
2020-03-17T05:33:23.482864
2017-10-20T08:30:48
2017-10-20T08:30:48
null
0
0
null
null
null
null
UTF-8
R
false
false
663
r
FeedForwardTrain.R
NN_classification <- function(T_input, T_output, nodes, learning_rate = 0.3, reg_type = "None", reg_factor = 0.0, epochs = 7000) { #T_input,T_output represents training data, X are inputs (matrix, #Samples by #featsTypes), Y is output (matrix, #Samples by #Classes ) # T_input == Feature set # T_output == expected result/classification #Learning Rate - defines the rate at which the weights change during training #reg - For regularization #nodes - number of nodes for each hidden layer to implement #hidden_layers - number of hidden layers to implement in NN, defined by length of nodes vector #epochs - Number of training iterations }
7a90513f19da91b5f5b8f024919b3a5730999858
159fd40a1ccc4dc373fa258c4950605bcb8918fa
/12_map_per_capita_stop_rates_by_race.R
8b368b1114b28ab07114809e900461206d19d69b
[]
no_license
greatjack1/stop-question-frisk
e55cf9433356dada5547e48ff93a22a031ef0c13
4e0887b7ba00d41ed27028ab99bd3878727eefdc
refs/heads/master
2020-09-23T03:13:02.716740
2019-11-28T02:36:10
2019-11-28T02:36:10
null
0
0
null
null
null
null
UTF-8
R
false
false
3,808
r
12_map_per_capita_stop_rates_by_race.R
library(pacman) library(here) library(tidyverse) library(leaflet) library(tigris) library(tmap) library(maptools) library(tmaptools) library(sp) library(webshot) library(htmlwidgets) # Load stop and frisk data for 2003-2013 load(here("clean_data", "sqf_03_13.RData")) # Load census data with race distributions on the precinct level load(here("clean_data", "census_race_data.RData")) # Load precinct shapefiles load(here('clean_data', 'precinct_shape_file.RData')) # Rename and summarize SQF data similarly sqf_race_dist <- sf_data1 %>% select(addrpct, race) %>% filter(race != " " & race != "U" & race != "X") %>% mutate(race = recode_factor(race,"P" = "B", "I" = "Z"), race = recode_factor(race, "W" = "White", "B" = "Black", "Q" ="Hispanic", "A" = "Asian", "Z" = "Other")) %>% rename("precinct" = "addrpct") %>% group_by(precinct, race) %>% summarize(sqf_count = n()) %>% ungroup() # Join the data frames joint <- left_join(census_race_dist, sqf_race_dist) %>% mutate(stop_rate = sqf_count/census_count) # Create separate data frames with only White and Black race data white_rates <- joint %>% filter(race == "White") %>% filter(precinct != 22) black_rates <- joint %>% filter(race == "Black") %>% filter(precinct != 22) # Join stop rate ratios with precinct shape data white_precinct_rates <- geo_join(police_precincts, white_rates, "Precinct", "precinct") black_precinct_rates <- geo_join(police_precincts, black_rates, "Precinct", "precinct") #Map the results: mypopupW <- paste0("Precinct: ", white_precinct_rates$Precinct, "<br>", "Stop Rate: ", white_precinct_rates$stop_rate) mypopupB <- paste0("Precinct: ", black_precinct_rates$Precinct, "<br>", "Stop Rate: ", black_precinct_rates$stop_rate) mypal <- colorNumeric( palette = "YlOrRd", domain = c(-log10(35), log10(35)) ) # Create a map of NYC with the color of each precinct indicating the # probability of being stopped there for a white person # Note: Coloring is on a log scale, but the popups and legend are not # (This was done for increased human-readability) white_stop_rates <- leaflet(white_precinct_rates) %>% addTiles() %>% addPolygons(fillColor = ~mypal(log10(white_precinct_rates$stop_rate)), fillOpacity = .9, weight = 1, popup = mypopupW) %>% addProviderTiles("CartoDB.Positron") %>% addLegend(pal = mypal, values = c(-1.5,1.5), labFormat = labelFormat(transform = function(x) signif(10^x, 1)), position = "topleft", title = "White<br>Stop Rate") white_stop_rates saveWidget(white_stop_rates, here("figures", "white_stop_rates_by_precinct.html"), selfcontained = FALSE) webshot(here("figures", "white_stop_rates_by_precinct.html"), file = here("figures", "white_stop_rates_by_precinct.png"), cliprect = "viewport") # Same as above, but for a black person black_stop_rates <- leaflet(black_precinct_rates) %>% addTiles() %>% addPolygons(fillColor = ~mypal(log10(black_precinct_rates$stop_rate)), fillOpacity = .9, weight = 1, popup = mypopupB) %>% addProviderTiles("CartoDB.Positron") %>% addLegend(pal = mypal, values = c(-1.5,1.5), labFormat = labelFormat(transform = function(x) signif(10^x, 1)), position = "topleft", title = "Black<br>Stop Rate") black_stop_rates saveWidget(black_stop_rates, here("figures", "black_stop_rates_by_precinct.html"), selfcontained = FALSE) webshot(here("figures", "black_stop_rates_by_precinct.html"), file = here("figures", "black_stop_rates_by_precinct.png"), cliprect = "viewport") sessionInfo()
2f23da8b8f2e3ddc3217eb691e79e3b6ae52761a
c416b94f77e5d3f52d1cf4231136e31a3ca3e514
/cachematrix.R
1175bee7a80218512bb2bbd583554b52745f7beb
[]
no_license
zlamp/ProgrammingAssignment2
2e3bdf6ec8c2af34ef118e55f9dd9c20d4e0368f
d7e7ac32895b0e7793042f1ede161f423ee91e9d
refs/heads/master
2020-12-11T09:03:42.848492
2017-03-14T09:54:11
2017-03-14T10:39:19
53,131,078
0
0
null
2016-03-06T09:58:42
2016-03-04T11:31:52
R
UTF-8
R
false
false
1,130
r
cachematrix.R
## The first function, makeCasheMatrix creates a list containing a function to ##set the value of the matrix ##get the value of the matrix ##set the value of the inverted matrix ##get the value of the inverted matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(inv) m <<- inv getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## The following function calculates the inverted matrix created with the above function. ##However, it first checks to see if the inverted matrix has already been calculated. ##If so, it gets the inverted matrix from the cache and skips the computation. ##Otherwise, it calculates the inverted matrix of the data and sets the value of theinverted matrix in the cache via the setmean function. cacheSolve <- function(x, ...) { m <- x$getinv() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m ## Return a matrix that is the inverse of 'x' }
0941773b3b5096bde9db0098528356380f316df3
213a7c7b301537e173bb099e58799a3caf46ec53
/R/airflow.R
0774d3f3448d899fb00b859e8ebb7ea0da451bde
[ "MIT" ]
permissive
misha-lisovich/laminar
07c2ff105f613cd4cb80fdcce2d90291e81666b6
b0a04c601ff09c236f8ae5fbc53dca5c2b75a806
refs/heads/master
2020-04-20T17:21:13.369235
2019-02-10T16:45:52
2019-02-10T16:45:52
168,986,398
5
0
null
null
null
null
UTF-8
R
false
false
2,040
r
airflow.R
#' Get Airflow csrf token #' #' Get csrf token by scraping Airflow's queryview page #' @param airflow_url base url for airflow instance #' @return csrf token string #' @export get_csrf_token <- function(airflow_url){ airflow_url %>% paste0("/admin/queryview") %>% httr::GET() %>% httr::content() %>% xml2::xml_find_first('.//input[@name="_csrf_token"]') %>% xml2::xml_attr('value') } # date conversion py_to_r.pendulum.date.Date <- function(x) {lubridate::as_datetime(as.character(x))} # timedelta conversion py_to_r.datetime.timedelta <- function(x) {as.character(x)} # recursively (re)convert list of reticulate into R equivalents # Example: py_run_string("from datetime import timedelta; x = {'a' : 1, 'b' : timedelta(minutes=5)}") # py_to_r_reconvert(py$x) py_to_r_reconvert <- function(x){ rapply(x, function(object) { if (inherits(object, "python.builtin.object")) reticulate::py_to_r(object) else object }, how = 'replace') } #' Get airflow dag args #' #' Get specified args (attributes) from all Airflow dags contained in dag_dir #' @param dag_dir airflow dag directory #' @param args attributes to extract. NOTE: currently only extracts start_date and schedule_interval - needs generalization. #' @return data frame of the form (dag_id, dag_args) #' @export get_af_dag_args <- function(dag_dir = config::get()$dag_dir, args = c('start_date', 'schedule_interval')){ pydag_filepath <- system.file(package = 'laminar') pydag <- reticulate::import_from_path('dag', path = pydag_filepath) af_dags <- pydag$list_dir_dags(dag_dir) af_dag_args_lst <- py_to_r_reconvert(pydag$get_dag_args(af_dags, args)) af_dag_args <- af_dag_args_lst %>% {data_frame(dag_id = names(.), schedule_interval = purrr::map_chr(., 'schedule_interval', .null = NA_character_), start_date = purrr::map_df(., 'start_date') %>% tidyr::gather(dag_id, start_date) %>% .$start_date )} af_dag_args }
cf598cf416490d95fff32a385e3d7e99031b78c4
8866b741411e2edfa61972369143de26fde5f821
/man/CheckInput.i_MMC.Rd
36413749fde8ff3253fbbd2858b54015dfdda5f7
[]
no_license
cran/queueing
6232577c0eb67cae7c716ef1432cc54194fb26d4
7712782a0d82d73599f128f68e94536b0cf8d4e5
refs/heads/master
2020-12-25T17:36:19.513391
2019-12-08T21:10:02
2019-12-08T21:10:02
17,698,913
0
2
null
null
null
null
UTF-8
R
false
false
998
rd
CheckInput.i_MMC.Rd
% File man/CheckInput.i_MMC.Rd \name{CheckInput.i_MMC} \alias{CheckInput.i_MMC} \title{Checks the input params of a M/M/c queueing model} \description{ Checks the input params of a M/M/c queueing model } \usage{ \method{CheckInput}{i_MMC}(x, \dots) } \arguments{ \item{x}{a object of class i_MMC} \item{\dots}{aditional arguments} } \details{Checks the input params of a M/M/c queueing model. The inputs params are created calling previously the \link{NewInput.MMC}} \references{ [Sixto2004] Sixto Rios Insua, Alfonso Mateos Caballero, M Concepcion Bielza Lozoya, Antonio Jimenez Martin (2004).\cr \emph{Investigacion Operativa. Modelos deterministicos y estocasticos}.\cr Editorial Centro de Estudios Ramon Areces. } \seealso{ \code{\link{NewInput.MMC}}. } \examples{ ## See example 10.9 in reference [Sixto2004] for more details. ## create input parameters i_mmc <- NewInput.MMC(lambda=5, mu=10, c=2, n=0, method=0) ## check the parameters CheckInput(i_mmc) } \keyword{M/M/c}
4fc616d851b922b2bd2f356f37da0f9d1b48093d
97aa63070e0046ce8812afad052fd03dfb8bc72a
/server.R
20326eb864ed4534e51326e7455d6e30318a0a25
[]
no_license
caniraban/Coursera-Trees
183b2ed9ebc4ab912af2c71c137f30dae428c777
f926e88ae2561d128a4249431a469504e9830dee
refs/heads/main
2023-01-22T09:16:44.906227
2020-12-02T14:30:02
2020-12-02T14:30:02
317,887,922
0
0
null
null
null
null
UTF-8
R
false
false
1,022
r
server.R
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) # Define server logic required to draw a histogram shinyServer(function(input, output) { height <- lm(Height ~ Girth, data = trees) heightpred <- reactive({ GirthInput <- input$sliderGirth predict(height, newdata = data.frame(Girth=GirthInput)) }) output$plot <- renderPlot({ GirthInput <- input$sliderGirth plot(trees$Girth, trees$Height, xlab = "Girth of the tree", ylab = "Height of the tree", bty = "n", pch = 16, xlim = c(8,25), ylim = c(60,90)) if(input$showmodel){ abline (height, col = "red", lwd = 2) } points(GirthInput, heightpred(), col = "red", pch = 16, cex = 2) }) output$pred <- renderText({ heightpred() }) })