blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef0f7de27e4687a687777588f7482adf7ab5bb66 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/prospectr/examples/movav.Rd.R | 44f289b53f37be7661d9650fbf24534c8b1a69d8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 462 | r | movav.Rd.R | library(prospectr)
### Name: movav
### Title: Moving average
### Aliases: movav
### ** Examples
data(NIRsoil)
wav <- as.numeric(colnames(NIRsoil$spc))
spc <- 1/10^NIRsoil$spc # conversion to reflectance
spc <- spc + rnorm(length(spc),0,0.001) # adding some noise
matplot(wav,t(spc[1:10,]),type='l',xlab='Wavelength /nm',ylab='Reflectance')
mov <- movav(spc,w=11) # window size of 11 bands
matlines(as.numeric(colnames(mov)),t(mov[1:10,])) # smoothed data
|
ab1c7f82ec291157478adb2bfa478df9248cb307 | 917683f603471a3e9c0ddf2244c798e42543456a | /R/proportion.R | c8e985688ef836719fcd2f593072747df0704276 | [] | no_license | rramadeu/Paternity-Functions | 615aaa9e67dd60fe28e2bd79ea38251a57fa80e9 | 78c8bca17f405ddd935d688266173ab471148c47 | refs/heads/main | 2023-04-26T05:36:05.628209 | 2021-05-27T20:03:56 | 2021-05-27T20:03:56 | 356,289,937 | 0 | 0 | null | 2021-04-09T13:59:12 | 2021-04-09T13:59:12 | null | UTF-8 | R | false | false | 1,765 | r | proportion.R | #########################################################################
#
# Package:
#
# File: proportion.R
# Contains: proportion
#
# Written by Samuel Beazley
#
# First version: March-2021
# Last update: 5-Apr-2021
#
#########################################################################
#'
#' Test parentage of individual
#'
#' Given individual and a vectors of possible parents, function returns dataframe of proportion of pedigree conflict with each possible trio
#'
#' @param parents a vector with strings related to the name of the suspected parents
#' @param individual a string value with the individual name you are testing
#' @param data the dataframe from which the data is from
#'
#' @return A dataframe of different combinations of parents and individual with the proportion of pedigree conflicts in each trio
#'
#' @examples
#' data(potato.data)
#' proportion(parents = c("W6511.1R","VillettaRose","W9914.1R"),
#' individual = "W15268.1R",
#' data = potato.data)
#'
#' @export
proportion <- function(parents, individual, data)
{
table <- gtools::combinations(n = length(parents), r = 2, repeats.allowed = F, v = parents) #unique combinations of parents
table <- cbind(table, rep(individual, dim(table)[1]) ) #creating table of parents to test
vec <- c() #initializing vector
for(i in 1:dim(table)[1])
{
vec <- cbind(vec, paternity(cbind(data[[ table[i,1] ]], data[[ table[i,3] ]], data[[ table[i,2] ]]) )) #vector of statistic values
}
table <- cbind(table, t(vec)) #adding statistic column
colnames(table) <- c("Parent1", "Parent2", "Individual", "Statistic") #labelling columns
DF <- as.data.frame(subset(table, select = c("Parent1", "Parent2", "Statistic"))) #final dataframe
return(DF)
}
|
94eb13582c4e55b9fd7529756286f047d6505b8c | 6408034915bfada6fa84cc78a1180690f5ced9e7 | /scripts/plot_entropy_tile.R | aa74b822b5e63166208ae29cf3078420d77050a0 | [] | no_license | pangenome/chromosome_communities | 04ad781faebfcf8dfa5bdc9686453deda2c7b408 | 546645820c37e5984f4e77be5ba1e81021e182e1 | refs/heads/main | 2023-07-15T17:57:22.955437 | 2023-06-28T12:51:12 | 2023-06-28T12:51:12 | 358,575,774 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,068 | r | plot_entropy_tile.R | args <- commandArgs()
path_entropy_tsv <- args[6]
title <- args[7]
x_max <- as.numeric(args[8])
height <- as.numeric(args[9])
library(ggplot2)
library(ggforce)
x <- read.delim(path_entropy_tsv)
x <- x[x$shannon_div_index != -1, ]
p <- ggplot(x, aes(x = start + (end - start) / 2, width = end - start, y = query, alpha=shannon_div_index, fill = ground.target)) +
geom_tile() +
ggtitle(title) +
facet_col(~ground.target, scales = "free_y", space = "free", labeller = labeller(variable = labels)) +
theme(
plot.title = element_text(hjust = 0.5),
text = element_text(size = 32),
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.title = element_text(size = 32),
legend.text = element_text(size = 32),
legend.position = "top",
#axis.title.y=element_blank()
) +
labs(alpha="Shannon Diversity Index", fill="Target") +
xlim(0, x_max)
ggsave(plot = p, paste0(path_entropy_tsv, '.entropy.pdf'), width = 120, height = height, units = "cm", dpi = 100, bg = "transparent", limitsize = FALSE)
|
4948b95c6f42d95ab11d5948a2d00afbb651c92e | 59f69aa5e4aea094ac12357627c55102a5a2d5cd | /Experiment_Design/lmm/pr_lmm.r | 9ebecf5c7b37ff5bd48f44a7a776c122772d7a31 | [] | no_license | marianaw/Applied_Statistics_assignments | 3f27b067c282cfa470f7150b6d6ac7f2b07ee6af | b482ec4980063831e4cea71bfe54684f0b8e7bf2 | refs/heads/master | 2021-01-20T18:57:49.468318 | 2017-11-16T04:38:31 | 2017-11-16T04:38:31 | 65,623,100 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,889 | r | pr_lmm.r | setwd("/home/mariana/Documents/MestriaUNC/Applied_Statistics_assignments/Experiment_Design/lmm/")
require(ggplot2)
library('nlme')
#------------------------------------------------------------------------------------------------
#-------------------------------------- Ejercicio 1 ---------------------------------------------
#------------------------------------------------------------------------------------------------
df = read.csv('ex_1.csv')
df$Cobayo = factor(df$Cobayo)
# a) Diagrama de dispersión de peso vs. tiempo separado por vitamina:
attach(df)
plot(Tiempo, Peso, col = c('red', 'blue', 'green')[VitE]);
detach(df)
# Se observan distintos comportamientos. Puede verse cómo con la alta dosis de vitamina el crecimiento en el peso es mayor.
#-------------------------------------------------------------------
# b) Diagrama de puntos y boxplot para cada tiempo:
plot = ggplot(df, aes(x = Tiempo, y= Peso, fill = VitE)) + xlab('Tiempo') + ylab('Peso')
plot + geom_boxplot()
medias_tiempo_porVit = aggregate(df$Peso, by = list(df$Tiempo, df$VitE), mean)
names(medias_tiempo_porVit) = c('Tiempo', 'VitE', 'Media_Peso')
plot(medias_tiempo_porVit$Tiempo, medias_tiempo_porVit$Media_Peso, col = c('red', 'blue', 'green')[medias_tiempo_porVit$VitE])
# Se observa un comportamiento cúbico.
# Se observa como la media de peso para la dosis alta de vitamina es mayor, y cómo la media para los que no reciben vitamina es
# menor.
# Hacemos un diagrama para mirar la interacción de tiempo y vitamina para cada cobayo:
attach(df)
interaction.plot(VitE, Cobayo, Peso)
detach(df)
#-------------------------------------------------------------------
# c) Ajustamos un modelo lineal mixto:
# # El siguiente modelo ajusta una linea sin distinción de tipos de vitamina (sólo estamos experimentando) con la misma pendiente
# # para todos los tiempos y errores aleatorios para los cobayos:
# modelo_1 = lme(Peso ~ Tiempo, data = df, random = ~ 1|Cobayo)
# summary(modelo_1)
# # R arroja que tanto el intercepto como el slope son significativos.
#
# # Agregamos un efecto aleatorio sobre la pendiente del tiempo:
# modelo_2 = update(modelo_1, random = ~ Tiempo | Cobayo)
# summary(modelo_2)
#
# # Comparemos ambos modelos:
# anova(modelo_1, modelo_2)
# # El p-valor muestra que el modelo_2 es mejor que el modelo_1.
# # Notar que aún no hemos considerado el factor vitamina, sólo analizamos el peso en función del tiempo.
#
# # Ahora agregamos la interacción entre vitamina y tiempo:
# modelo_3 = update(modelo_2, fixed. = ~ Tiempo * VitE)
# Como el comportamiento es cúbico ajustamos un modelo lineal cúbico en el tiempo:
modelo = lme(Peso ~ poly(Tiempo, 3) * VitE, data = df, random = ~ Tiempo|Cobayo, method = 'ML')
summary(modelo)
# Vemos que con respecto a la base E0 del nivel VitE los niveles E1 y E2 no son significativos, es decir, no hacen diferencia.
# Este es un contraste de E1 y E2 con respecto a E0.
# Ahora vamos a correr otro modelo en el que consieramos E0 y E1 y E2, juntos, como otro nivel.
modelo_2 = lme(Peso ~ poly(Tiempo, 3) * E0vsE12, data = df, random = ~ Tiempo|Cobayo)
summary(modelo_2)
#-------------------------------------------------------------------
# d) Conclusiones sobre el modelo anterior:
# Vemos que el factor vitamina no es significativo, y que la correlación entre el nivel E1 de este factor y el tiempo, tampoco.
#-------------------------------------------------------------------
# e) Agregamos correlación:
# Correlación de simetría compuesta:
modelo_3_simcomp = lme(Peso ~ poly(Tiempo, 3) * VitE, data = df, random = ~Tiempo|Cobayo, correlation = corCompSymm())
summary(modelo_3_simcomp)
# Correlación sin estructura:
modelo_3_sinestr = lme(Peso ~ poly(Tiempo, 3) * VitE, data = df, random = ~Tiempo|Cobayo, correlation = corSymm(),
control = lmeControl(maxIter = 1000, opt = 'optim'))
summary(modelo_3_sinestr)
# Correlación autorregresiva de primer orden:
modelo_3_autoreg = lme(Peso ~ poly(Tiempo, 3) * VitE, data = df, random = ~Tiempo|Cobayo, correlation = corAR1(),
control = lmeControl(maxIter = 1000, opt = 'optim'))
summary(modelo_3_autoreg)
#-------------------------------------------------------------------
# f) Miramos la heterocedasticidad:
par(mfrow = c(2,1))
plot(modelo_3_autoreg)
qqnorm(modelo_3_autoreg)
plot(modelo)
# Todo luce homocedástico.
# g) Pruebas a posteriori:
# # Tukey:
#
# library(multcomp)
# glht(modelo, linfct=mcp(Tiempo='Tukey'))
#
# df$ints = interaction(df$VitE, df$Tiempo)
# modelo_posthoc = lme(Peso ~ 1 - ints, data = df, random = ~ Tiempo|Cobayo)
# comp = glht(modelo_posthoc, linfct = mcp(ints = 'Tukey'), test = adjusted(type = 'bonferroni'))
#------------------------------------------------------------------------------------------------
#-------------------------------------- Ejercicio 2 ---------------------------------------------
#------------------------------------------------------------------------------------------------
semillas_db = read.csv('ex_2_Semillas.csv')
semillas_db$cajas = factor(semillas_db$cajas)
# a) Gráfico de dispersión de biomasa vs. tiempo separado por tamaño de semilla:
plot = ggplot(semillas_db, aes(x = tiempo, y = biomasa, shape = semillas, color = semillas)) +
geom_point() +
geom_smooth(method=lm)
plot
plot + facet_grid(. ~ semillas)
#------------------------------------------------------
# b) Dispersión de biomasa vs. tiempo particionado por semillas y cajas:
plot2 = ggplot(semillas_db, aes(x = tiempo, y = biomasa, shape = cajas, color = semillas,
group = interaction(semillas, cajas))) +
geom_point() +
geom_line()
plot2
plot2 + facet_grid(. ~ semillas + cajas)
# Observar que en las semillas grandes hay poca variabilidad entre caja y caja, mientras que en las semillas pequeñas la caja
# cinco parece tener un comportamiento diferenciado, y la uno también. Las cajas 2, 3, y 4 se comportan de manera similar.
#------------------------------------------------------
# Modelo con estructura de correlación compuesta:
# Probamos un modelo con efectos aleatorios sólo en el intercepto:
sem_corr_comp_sim = corCompSymm(value = 0.2, form = ~ 1 | cajas)
sem_corr_comp_sim = Initialize(sem_corr_comp_sim, data = semillas_db)
corMatrix(sem_corr_comp_sim)
sem_cc_1 = gls(biomasa ~ semillas * tiempo, data = semillas_db, correlation = corCompSymm(form = ~1|cajas))
summary(sem_cc_1)
# sem_cc_1 = lme(biomasa ~ semillas * tiempo, data = semillas_db, random = ~ 1|cajas, correlation = corCompSymm(form = ~1|cajas))
# summary(sem_cc_1)
#
# sem_cc_1_a = lme(biomasa ~ semillas * tiempo, data = semillas_db, random = ~ 1|cajas)
# summary(sem_cc_1_a)
#
# anova(sem_cc_1, sem_cc_1_a)
# Heterocedasticidad del modelo que convergió:
plot(sem_cc_1)
qqnorm(sem_cc_1)
qqline(sem_cc_1)
#------------------------------------------------------
# Modelo de parcelas divididas:
sem_pardiv = lme(biomasa ~ semillas * tiempo, data = semillas_db, random = ~1|cajas)
summary(sem_pardiv)
plot(sem_pardiv)
qqnorm(sem_pardiv)
# Como hemos agregado un efecto aleatorio en este segundo modelo que, a diferencia del primero, es mixto, podemos ver
# cómo los errores lucen mucho mejor no obstante tener los mismos coeficientes.
# Prueba a posteriori: Tukey,
lsmeans(sem_pardiv, pairwise ~ semillas * tiempo, adjust='')
# Intento correr tukey con multcomp:
library(multcomp)
# Agregamos a los datos un término de interacción y corremos el modelo de nuevo:
semillas_db$semTiem = interaction(semillas_db$semillas, semillas_db$tiempo, drop = TRUE)
sem_pardiv_int = lme(biomasa ~ semTiem, data = semillas_db, random = ~1|cajas)
summary(sem_pardiv_int)
tukey = glht(sem_pardiv_int, linfct=mcp(semTiem = 'Tukey'))
summary(tukey)
# Fisher:
summary(tukey,test=univariate())
# Bonferroni:
summary(tukey, test=adjusted(type="bonferroni")) |
c9af92d62a0484d5126fbe5360400ec5f2dd2504 | 0d406b4ff1cb7b0975d0c767f503518505859fa6 | /R/search-series.R | 9d20f034bd9cdaa5aef087dc4826dd23f077db7d | [] | no_license | iverworld/PortfolioSelectionPackage | 4ccd98dc4e4f1e5692110c8d6f9a59578a93e237 | 7e952d0d3201d952724f82aa34677078a72f16f7 | refs/heads/master | 2020-03-29T20:15:47.401020 | 2015-11-03T22:32:45 | 2015-11-03T22:32:45 | 34,119,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | search-series.R | #' Search series name from Quandl.
#'
#' @param keyword String
#' @param page Integer, number of pages
#' @param source String, the source filtering
search_series <- function(keyword, page = 1, source = NULL){
Quandl.search(keyword, page = 1, source=NULL);
} |
ca0140c0c2c789abb48cd2d649e250a4df0150f0 | 70020f492cba464adfc0a520ae6cdeb036cb717d | /man/calc_tidy.Rd | 84e57d9523c7300aa2246f0bbcd77f970bd62756 | [] | no_license | jsm19-tds-demo/simpr | 8107083de6f2cbc5a38b8c4aac4f5a20467a3281 | ee193068ac4de9aa45d1018e4a1e818e7a98fc8c | refs/heads/master | 2020-09-19T18:54:10.114751 | 2019-11-25T20:37:07 | 2019-11-25T20:37:07 | 224,268,635 | 0 | 0 | null | 2019-11-26T19:21:19 | 2019-11-26T19:21:19 | null | UTF-8 | R | false | true | 536 | rd | calc_tidy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simpr_fxns.R
\name{calc_tidy}
\alias{calc_tidy}
\title{Tidy the simulated model results output into tibble of components (broom analogue)}
\usage{
calc_tidy(simpr_mod)
}
\arguments{
\item{simpr_mod}{simulated model results (output of simpr::fit)}
}
\value{
tidied model components of fitted simulated data
}
\description{
Turn fitted model of simulated data (output of simpr::fit) into a tidy tibble of model components.
}
\examples{
calc_tidy(simpr_mod)
}
|
96d1ef8ffd469f0bd692efe947470370633e5484 | 44cf65e7ab4c487535d8ba91086b66b0b9523af6 | /data/Newspapers/2000.09.13.editorial.57569.0438.r | d0e6791502bf9ea0de8d2c75155c1a097df65d84 | [] | no_license | narcis96/decrypting-alpha | f14a746ca47088ec3182d610bfb68d0d4d3b504e | 5c665107017922d0f74106c13d097bfca0516e66 | refs/heads/master | 2021-08-22T07:27:31.764027 | 2017-11-29T12:00:20 | 2017-11-29T12:00:20 | 111,142,761 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,555 | r | 2000.09.13.editorial.57569.0438.r | de multa vreme , telespectatorii fideli ai zgomotosului post de televiziune n - au mai avut parte de o seara in care sa fie purtati din chifla in chifla .
pentru ca toata Romania vuieste din cauza crimei de la Iasi , PRO TV a zis ca sparge piata daca il aduce in direct , la o ora de maxima audienta , pe autorul emisiunilor despre infractiuni .
si astfel , la ora fixa , in Piata din Iasi ni s - a aratat Emanuel Isopescu , cel reconditionat , trimis special in capitala Moldovei .
tocmai ziceam noi ca avem noroc si aflam de la fata locului dedesubturile acestei crime .
am fost naivi !
Emanuel Isopescu se afla la Iasi nu pentru a face o ancheta jurnalistica , nu pentru a patrunde in necunoscutele afacerii .
el transmitea din piata centrala a orasului unde indeplinea rolul de Nufarul - RASUB - retusuri - remaieri . Microfonul sau avea rolul unui sapun folosit pentru a spala autoritati care nu si - au facut datoria .
evident , dupa curatenia efectuata , am asistat la incercarea reporterului PRO TV de a aseza cuvintele pe post de caramizi pentru un maret monument inchinat politiei romane .
si , ca pensionarii la coada la castraveti , generalii si coloneii din politie asteptau la rind sa le puna domnul Isopescu cite o intrebare istorica .
n - apucau ei sa zica bla - bla - ul de rigoare , ca Emanuel Isopescu le lua sapunul si zicea , lasa , dom' le , avem alte chestiuni mai importante .
si , astfel , reconditionatul reporter al TVR ne - a prezentat la PRO TV un fel de spectacol jurnalistic de doi lei .
a adus ofiteri , a adus sociologi , ba ni l - a adus si pe Fanfan " Rechinul Puscariilor " sa ne declame o poezie de ocna ( taiata si ea pe la jumatate ) .
de fapt , Emanuel Isopescu a jucat lamentabil rolul unui " spalator de cadavre " .
pe linga faptul ca a incercat sa faca un fel de " Incoruptibilii " din niste generali si colonei ( pusi intr - un context nefericit , acestia aratau chiar inodori si incolori ) n - am aflat mare lucru despre cazul Sahleanu .
abia tirziu ne - am dat seama ca reporterul PRO TV nu voia sa afle nimic , nici despre criminali , nici despre cei din spatele lor , nici despre mort , nici macar despre politisti .
el trebuia sa convinga telespectatorul ca n - a fost o crima politica .
presedintia si guvernul s - au speriat de moarte ca asasinatul de la Iasi ar putea fi trecut in contul lor , mai ales ca Ion Solcanu ( si el un fel de minte cu cioc crescut direct din crestet ) a lansat afirmatia respectiva in speranta ca ii ingroapa pe adversarii PDSR - ului .
in loc sa ne aduca informatii , dovezi , documente si puncte de vedere , Emanuel Isopescu ne - a tocat nervii cu ce i - au spus sefii lui : te duci acolo si zici ca n - a fost crima politica !
dar sa nu faci transmisia din circiuma ca te dam inapoi la TVR !
n - au apucat sa - l scoata din emisie pe " urmaritul general " ca telespectatorul a fost luat in brate de Andrei Gheorghe , promovat in scaun de moderator TV .
pentru ca la masa era si Traian Basescu ( iar unde se afla acesta , e cel putin un spectacol , daca nu si o executie ) , multi au ramas in fata micului ecran .
un duel intre primar si tarabagii e oricind o sursa de scintei , mai ales ca saptaminile trecute subiectul tinea capul de afis .
si ce - am vazut ?
Andrei Gheorghe se purta la PRO TV ca la o emisiune de radio pentru pusti si pentru prosti .
habar nu avea de subiect .
nici de argumentele celor doua parti , nici de slabiciunile lor , drept pentru care , in loc sa limpezeasca o situatie , a ingropat - o de tot .
s - a facut de bacanie si in fata telespectatorilor , i - a bagat in ceata si pe tarabagii si i - a prilejuit lui Traian Basescu remarca de baza a emisiunii .
Sprintarul comentator de radio era prea tinar pentru a fi pus Gheorghe la PRO TV .
cit despre tagma tarabagiilor , oameni si ei , daca mai defileaza mult cu Dumitru Dinca risca sa se trezeasca nu doar cu buldozerele trimise de primar , dar si cu o mina de ajutor din partea populatiei . La demolat , evident .
pe slabiciunile lui Andrei Gheorghe ( inca departe de a stapini subiecte care depasesc birfa lejera ) si pe incapacitatea chioscarilor de a se apara , Traian Basescu a mai inscris citeva puncte , importante in sondaje .
pro TV a inceput grila de toamna cu mare tam - tam , dar , dupa ce s - a dus zgomotul spectacolului americanesc , ne - a lasat sa vedem doua sfecle intr - un peisaj sarac nu doar in idei , dar si in materie de " ingrasaminte " .
postul pare mai degraba preocupat sa vada pe unde scoate camasa decit de calitatea programelor .
|
23eef49d2203cee405014720cc5ee9523c9b4f8d | 205e1e0a2e23f362b7987804ebe8e17a23ac6010 | /man/pie_opts.Rd | 65142b9f3c0236448b6f1537bde0e5c38914407a | [
"MIT"
] | permissive | dreamRs/apexcharter | 97f93ec61d2ad96f8bf2446fe50e2cb22f4824df | 11d244e9922a9abe41aee90124224d8f5cababa9 | refs/heads/master | 2023-06-22T11:11:57.709837 | 2023-06-14T12:05:06 | 2023-06-14T12:05:06 | 142,926,526 | 135 | 15 | NOASSERTION | 2023-03-22T15:30:53 | 2018-07-30T20:47:09 | R | UTF-8 | R | false | true | 1,433 | rd | pie_opts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apex-options.R
\name{pie_opts}
\alias{pie_opts}
\title{Pie options}
\usage{
pie_opts(
size = NULL,
donut = NULL,
customScale = NULL,
offsetX = NULL,
offsetY = NULL,
dataLabels = NULL,
...
)
}
\arguments{
\item{size}{Numeric. Custom size of the pie which will override the default size calculations.}
\item{donut}{List with two fields \code{size} (Donut / ring size in percentage relative to the total pie area.)
and \code{background} (The background color of the pie).}
\item{customScale}{Numeric. Transform the scale of whole pie/donut overriding the default calculations.}
\item{offsetX}{Numeric. Sets the left offset of the whole pie area.}
\item{offsetY}{Numeric. Sets the top offset of the whole pie area.}
\item{dataLabels}{List with field \code{offset} (Numeric, Offset by which labels will move outside / inside of the donut area)}
\item{...}{Additional parameters.}
}
\value{
A \code{list} of options that can be used in \code{\link[=ax_plotOptions]{ax_plotOptions()}}.
}
\description{
Use these options in \code{\link[=ax_plotOptions]{ax_plotOptions()}}.
}
\note{
See \url{https://apexcharts.com/docs/options/plotoptions/pie/}.
}
\examples{
data("mpg", package = "ggplot2")
apex(mpg, aes(cyl), type = "donut") \%>\%
ax_plotOptions(
pie = pie_opts(
donut = list(size = "90\%", background = "#BABABA")
)
)
}
|
31d159c2322ac5a6a62021996b517ae215434c6d | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/yakmoR/R/orthoKMeansPredict.R | 4fa9ae67c2b5534e3636a6672722a98902b4351e | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 912 | r | orthoKMeansPredict.R |
#' orthogonal kmeans prediction function
#'
#' @param x data to assign clusters
#' @param obj an object returned by orthoKMeansTrain
#' @param verbose show verbose messages?
#' @return a matrix with as many colums as rounds trained
#' @examples
#' obj = yakmoR::orthoKMeansTrain (x = as.matrix(iris[seq(1,150,2),1:4]),
#' k = 3, rounds = 3)
#' predictions = yakmoR::orthoKMeansPredict (x = as.matrix(iris[seq(2, 150, 2),1:4]),
#' obj = obj)
#' @export
orthoKMeansPredict <- function (x, obj = NULL, verbose = FALSE) {
# checkmate checks
checkmate::assertClass (obj, "yakmoR")
checkmate::assertMatrix(x, min.rows = 1)
checkmate::assertFlag (verbose)
# if multiple orthogonal rounds have been trained,
# we return a matrix of all predictions
# # call
r = .Call('yakmoR_orthoKMeansPredictCpp', PACKAGE = 'yakmoR',
x = x,
obj$centers,
obj$nf,
obj$k,
verbose = verbose)
}
|
7e307f197019f7e2ab9cfcfc68311e843d2c05e7 | 83b6cfc21dfb6bab9e2b184b1d9ced973a9aa719 | /R/get_features.R | 380f80d0847e8d8d1df5843dbef03d5d3cb29240 | [] | no_license | sottorivalab/revolver | ee38c07df09da77c5c3359e6f6bd4e583f7bf5f7 | 3e4de13316b39b65a297cc09415106bd8c12b664 | refs/heads/master | 2022-03-28T22:59:26.667493 | 2020-01-12T10:03:17 | 2020-01-12T10:03:17 | 258,231,923 | 1 | 0 | null | 2020-04-23T14:27:40 | 2020-04-23T14:27:39 | null | UTF-8 | R | false | false | 4,392 | r | get_features.R | #' Return summary features for the cohort.
#'
#' @description
#'
#' Computes a set of summary features for the cohort, in the
#' form of a matrix. The fits must be available inside the cohort
#' to retrieve some of these features, which are:
#'
#' \itemize{
#' \item the matrix of driver events with mean CCF or binary value;
#' \item the matrix of the drivers occurrence acrross all the cohort;
#' \item the matrix of the clonal drivers occurrence acrross all the cohort;
#' \item the matrix of the subclonal drivers occurrence acrross all the cohort;
#' \item the matrix of the occurrence of all evolutionary trajectories across patients
#' }
#'
#' The function returns a named list, so that names can be used
#' to access the matrices.
#'
#' @param x A \code{REVOLVER} cohort.
#' @param patients A vector of patient ids for which the features are extracted.
#'
#' @return A list of matrices.
#'
#' @family Getters
#'
#' @export
#'
#' @examples
#' # Data released in the 'evoverse.datasets'
#' data('TRACERx_NEJM_2017_REVOLVER', package = 'evoverse.datasets')
#'
#' features = get_features(TRACERx_NEJM_2017_REVOLVER)
#'
#' print(names(features))
#'
#' print(features)
get_features = function(x, patients = x$patients)
{
Np = length(patients)
Nd = x$n$drivers
# =-=-=-=-=-=-=-=-
# Get all data that we need for driver calls across patients
# =-=-=-=-=-=-=-=-
All_drivers = lapply(patients, function(p) {
samples = Samples(x, p)
drivers = Drivers(x, p)
drivers$mean_CCF = apply(drivers[, samples], 1, mean)
drivers
})
All_drivers = Reduce(bind_rows, All_drivers) %>%
select(variantID, patientID, is.clonal, mean_CCF) %>%
mutate(value = 1)
# =-=-=-=-=-=-=-=-
# Matrix of mean CCF
# =-=-=-=-=-=-=-=-
Matrix_mean_CCF = All_drivers %>%
select(variantID, patientID, mean_CCF) %>%
spread(variantID, mean_CCF) %>%
replace(is.na(.), 0)
# =-=-=-=-=-=-=-=-
# 0/1 drivers matrices, all mutations and then split by clonality status
# =-=-=-=-=-=-=-=-
# All together is trivial
Matrix_drivers = All_drivers %>%
select(variantID, patientID, value) %>%
spread(variantID, value) %>%
replace(is.na(.), 0)
# Only clonal ones
Matrix_clonal_drivers = All_drivers %>%
filter(is.clonal) %>%
select(variantID, patientID, value) %>%
spread(variantID, value) %>%
replace(is.na(.), 0)
# Only subclonal
Matrix_subclonal_drivers = All_drivers %>%
filter(!is.clonal) %>%
select(variantID, patientID, value) %>%
spread(variantID, value) %>%
replace(is.na(.), 0)
# Now we want to make them all the same dimension
# to standardize this output... we create a template matrix and use it
# to complement missing columns in each of the clonal/ subclonal ones
complement = function(M, N)
{
# missing patients and driver genes
miss_Pat = setdiff(M$patientID, N$patientID)
miss_drv = setdiff(colnames(M), colnames(N))
# Add template 0-ed matrices with the right rows/ columns
if(length(miss_Pat) > 0)
{
empty = M %>% filter(patientID %in% !!miss_Pat)
empty[, 2:ncol(empty)] = 0
N = bind_rows(N, empty)
}
if(length(miss_drv) > 0)
N = bind_cols(N,
M %>%
select(!!miss_drv) %>%
replace(TRUE, 0)
)
N[, colnames(M)]
}
Matrix_clonal_drivers = complement(Matrix_drivers, Matrix_clonal_drivers) %>%
replace(is.na(.), 0)
Matrix_subclonal_drivers = complement(Matrix_drivers, Matrix_subclonal_drivers) %>%
replace(is.na(.), 0)
# =-=-=-=-=-=-=-=-
# Get all data that we need for trajectories
# =-=-=-=-=-=-=-=-
All_trajectories = lapply(patients, function(p) {
ITransfer(x, p, rank = 1, type = 'drivers', data = 'fits') %>%
mutate(patientID = p)
})
All_trajectories = Reduce(bind_rows, All_trajectories) %>%
mutate(
trajectory = paste0(from, ' --> ', to),
value = 1
) %>%
select(trajectory, patientID, value)
Matrix_trajectories = All_trajectories %>%
spread(trajectory, value) %>%
replace(is.na(.), 0)
return(
list(
Matrix_mean_CCF = Matrix_mean_CCF,
Matrix_drivers = Matrix_drivers,
Matrix_clonal_drivers = Matrix_clonal_drivers,
Matrix_subclonal_drivers = Matrix_subclonal_drivers,
Matrix_trajectories = Matrix_trajectories
)
)
}
|
140e9af68c8af2435555533117060eec3605db90 | 54d860f5c2f1c799dcf3c75428dc981bfa4d21e8 | /Unit 12/NYT NB Classifier/NTY_NB_Classifier/ui.R | bc2292f3e6b60efc6ffd5c2f3befb0b711a5f58f | [] | no_license | BivinSadler/MSDS_6306_Doing-Data-Science | baac443976978e5da1573063f6acd2e2cd299572 | 896d2b1517ac93e0ae64f537c4b74c8348606f52 | refs/heads/Master | 2023-09-01T03:11:21.754687 | 2023-08-17T13:03:30 | 2023-08-17T13:03:30 | 202,908,565 | 37 | 124 | null | 2021-06-08T19:12:31 | 2019-08-17T16:55:01 | HTML | UTF-8 | R | false | false | 877 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("NYT Article Classifier!"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateInput("bdate", label = h3("Begin Date"), value = "2014/01/01"),
dateInput("edate", label = h3("End Date"), value = "2015/01/01")
),
# Show a plot of the generated distribution
mainPanel(
column(12,
verbatimTextOutput("bdateText"),
verbatimTextOutput("edateText"),
verbatimTextOutput("ConfusionMatrix")
)
)
)
))
|
38e9654720d48e4a3b3066cd62cad674bf343e3a | e707b78faa593cf76b8cc5f13f7bbbe56fcd5862 | /man/ggs_caterpillar.Rd | f88d12cd3896a03380062bdebb3a0eb1adf9761b | [] | no_license | jrnold/ggmcmc | 39986e8facda5e1e397bbf2c93aec5d427d80b4f | ae6a46ea65c7fdb2f81436d6ea14064666ab7ebe | refs/heads/master | 2021-01-17T23:43:40.592484 | 2012-10-18T04:26:45 | 2012-10-18T04:26:45 | 6,309,864 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,173 | rd | ggs_caterpillar.Rd | \name{ggs_caterpillar}
\alias{ggs_caterpillar}
\title{Caterpillar plot with thick and thin CI}
\usage{
ggs_caterpillar(D, parameter.family,
thick.ci = c(0.05, 0.95), thin.ci = c(0.025, 0.975),
line = NA, horizontal = TRUE)
}
\arguments{
\item{D}{data frame whith the simulations}
\item{parameter.family}{Name of the family of parameters
to plot. A family of parameters is considered to be any
group of parameters with the same name but different
numerical value between square brackets (as beta[1],
beta[2], etc). Not implemented.}
\item{thick.ci}{vector of length 2 with the quantiles of
the thick band for the credible interval}
\item{thin.ci}{vector of length 2 with the quantiles of
the thin band for the credible interval}
\item{line}{plot a line indicating a concrete position,
usually used to mark where zero is. By default do not
plot any line.}
\item{horizontal}{logical value for the plot being
horizontal, which is the default}
}
\value{
a ggplot object
}
\description{
Caterpillar plots are plotted combining all chains for
each parameter.
}
\examples{
data(samples)
ggs_caterpillar(ggs(S, parallel=FALSE))
}
|
cd1ff3f94a545c82f32510b6b401c3256f088926 | e73b3595adef874f7473bffdfbd16c3cb04a211b | /man/new_mirvie_learning_curve_cv.Rd | 0e91ed411ef1b0743468056a1d4a8bd98aaeb90a | [
"MIT"
] | permissive | mirvie/mirmodels | bf0cf66948e1213e7082c2f37bbb1ef72848a180 | afe784a0b3257ed089ce7ffa7b3c367874e3fc68 | refs/heads/main | 2023-04-13T20:21:38.580623 | 2022-01-11T22:15:20 | 2022-01-11T22:15:20 | 420,176,840 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 846 | rd | new_mirvie_learning_curve_cv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learning-curve.R
\name{new_mirvie_learning_curve_cv}
\alias{new_mirvie_learning_curve_cv}
\title{Construct a \code{mirvie_learning_curve} object.}
\usage{
new_mirvie_learning_curve_cv(tib)
}
\arguments{
\item{tib}{A tibble with columns \code{training_samples} (integer, the number of
training samples on that iteration) and \code{score} (double, a model metric
score).}
}
\value{
A \code{mirvie_learning_curve_cv} object.
}
\description{
A \code{mirvie_learning_curve_cv} object is what is output by \code{\link[=learn_curve_cv]{learn_curve_cv()}}.
}
\details{
This just tacks \code{"mirvie_learning_curve_cv"} onto the front of the \code{class}
attribute of an appropriate object. This should only be used inside of
\code{\link[=learn_curve_cv]{learn_curve_cv()}}.
}
|
0efacdfcfc9e7de8c9b86f43e55087ff028b6764 | 57ac42a473e68a13e7461c009ba9cb7f499d4726 | /cachematrix.R | 557346273c3d31749b758c49cd6dec3e2a963c17 | [] | no_license | rbgm/ProgrammingAssignment2 | 74973e26036092a2d32b6070bbc8222e41569033 | 9bcdeec68b13961d7fb3921876f7a4225cffa87e | refs/heads/master | 2021-01-18T05:29:23.534550 | 2014-10-25T18:34:13 | 2014-10-25T18:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,567 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## The functions makeCacheMatrix and cacheSolve are intended to show
## how to make use of environment management in R through the implementation
## of a cache functionality to compute the inverse of a matrix
## Write a short comment describing this function
## This function creates a special "matrix" object (as a list) that can
## cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
## For this assignment, it's assumed that the matrix supplied is always
## invertible
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
## Example calls
#a <- matrix(1:4, nrow = 2, ncol = 2)
#b <- makeCacheMatrix(a)
#cacheSolve(b)
## Calling it again it retrieves cached data
#cacheSolve(b)
|
1ad127bd0b6272c6984e46190f850f0aa204eee7 | 52a1a626f0b4e16c061b7e3f2b960870fc8810cb | /t12_data_integration/data_integration_visualisations.R | 16e024df79e8559aed77489e77fdbdc392cbbcb8 | [] | no_license | pboesu/moult_phenology | a42acddaa7793e26a16130fbf4cea1ee5118abd8 | e9b637f73cb29afdad12bdcfbe6d5ad7922b765c | refs/heads/main | 2023-04-16T08:38:22.674392 | 2023-02-15T17:17:21 | 2023-02-15T17:17:21 | 585,987,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,974 | r | data_integration_visualisations.R | library(dplyr)
library(moultmcmc)
library(ggplot2)
backfits <- lapply(list.files('simulation_outputs/euring', pattern = 'integrated', full.names = T), readRDS)
names(backfits) <- stringr::str_extract(list.files('simulation_outputs/euring', pattern = 'integrated', full.names = T), '[0-9]+')
reference_level = tibble(parameter = c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)"),
sim_value = c(167, 75, 29))
pretty_names = tibble(parameter = c("mean_(Intercept)", "duration_(Intercept)",
"sd_(Intercept)"),
pretty_par = c("Start date", "Duration", "Start date std. deviation"))
# bind_rows(backfits) %>% group_by(model, parameter) %>% summarise(estimate = mean(estimate), lci = mean(lci), uci = mean(uci)) %>% filter(parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")) %>%
# ungroup() %>%
# left_join(pretty_names) %>%
# ggplot(aes(x = model, y = estimate, ymin = lci, ymax = uci, col = parameter)) + geom_pointrange() + facet_wrap(~pretty_par, scales = 'free_y') +
# xlab("Proportion of birds with feather scores in active moult category") +
# ylab("Posterior mean +/- 95% CI") +
# theme_classic(base_size = 20, base_family = "Formata-CondensedLight") + theme(legend.position = "none") +
# scale_colour_manual(values = c("#D54B0B","#A2559D", "#A6053F"))
# ggsave('figures/euring_poster_integration.png', dpi = 600, width = 12, height = 7)
#+ geom_hline(aes(yintercept = sim_value), data = reference_level, lty = 2)
#+
bind_rows(backfits[1:18]) %>% group_by(model, parameter) %>% summarise(estimate = mean(estimate), lci = mean(lci), uci = mean(uci)) %>%
ungroup() %>%
left_join(pretty_names) %>%
ggplot(aes(x = model, y = estimate, ymin = lci, ymax = uci, col = parameter)) + geom_pointrange() + facet_wrap(~parameter, scales = 'free_y') +
xlab("Proportion of birds with feather scores in active moult category") +
ylab("Posterior mean +/- 95% CI") +
theme(legend.position = "none") + geom_hline(aes(yintercept = sim_value), data = reference_level, lty = 2)
bind_rows(backfits) %>%
left_join(pretty_names) %>%
ggplot(aes(x = model, y = estimate, ymin = lci, ymax = uci, col = parameter)) + geom_pointrange() + facet_wrap(~parameter, scales = 'free_y') +
xlab("Proportion of birds with feather scores in active moult category") +
ylab("Posterior mean +/- 95% CI") +
theme(legend.position = "none") + geom_hline(aes(yintercept = sim_value), data = reference_level, lty = 2)
backfits_df <- bind_rows(backfits, .id = 'set') %>%
tidyr::separate(col = model, into = c("model_type", "rej_rate"), sep = '_', remove = FALSE) %>%
mutate(rej_rate = ifelse(is.na(rej_rate), ifelse(model_type == 'uz1', 100, 0), as.numeric(rej_rate))) %>%
mutate(prop_scored = (100-rej_rate)/100)
backfits_df %>%
left_join(pretty_names) %>%
filter(parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")) %>%
ggplot(aes(x = prop_scored, y = estimate, ymin = lci, ymax = uci, col = model_type)) + geom_pointrange(position = position_dodge(0.15)) + facet_wrap(~parameter, scales = 'free_y') +
xlab("Proportion of moult records with feather scores") +
ylab("Posterior mean +/- 95% CI") +
theme(legend.position = "bottom") + geom_hline(aes(yintercept = sim_value), data = filter(reference_level, parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")), lty = 2) + theme_classic()
ggsave('figures/euring_intercepts_results_raw.png')
backfits_df %>%
left_join(pretty_names) %>%
left_join(reference_level) %>%
filter(parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")) %>%
group_by(pretty_par,model_type,prop_scored) %>%
summarize(mean_estimate = mean(estimate), lci = quantile(estimate, prob=0.025), uci = quantile(estimate, prob=0.975), sim_value = unique(sim_value)) %>%
ggplot(aes(x = prop_scored, y = mean_estimate, ymin = lci, ymax = uci, col = model_type)) + geom_pointrange(position = position_dodge(0.05)) + facet_wrap(~pretty_par, scales = 'free_y') +
xlab("Proportion of moult records with feather scores") +
ylab("Posterior mean +/- 95% CI") + theme_classic() +
theme(legend.position = "bottom") + geom_hline(aes(yintercept = sim_value), lty = 2)
bias_plot <- backfits_df %>%
left_join(pretty_names) %>%
left_join(reference_level) %>%
filter(parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")) %>%
mutate(sim_value_adj = case_when(parameter == 'mean_(Intercept)' ~ 75,
TRUE ~ sim_value)) %>%
group_by(pretty_par,model_type,prop_scored) %>%
summarize(mean_bias= mean(estimate-sim_value)/sim_value_adj, lci = quantile((estimate-sim_value)/sim_value_adj, prob=0.025), uci = quantile((estimate-sim_value)/sim_value_adj, prob=0.975)) %>%
ggplot(aes(x = prop_scored, y = mean_bias, ymin = lci, ymax = uci, col = model_type)) +
geom_hline(aes(yintercept = 0), lty = 2, col = 'grey') +
geom_pointrange(position = position_dodge(0.075)) +
facet_wrap(~pretty_par) +
xlab("Proportion of moult records with continuous scores") +
ylab("Relative bias") + theme_classic() +
scale_color_manual(name = 'Model', labels = c('T1','T12','T2','T2S'),
values = unname(palette.colors(n=4))) +
theme(legend.position = "bottom")
bias_plot
cv_plot <- backfits_df %>%
left_join(pretty_names) %>%
left_join(reference_level) %>%
filter(parameter %in% c("mean_(Intercept)", "duration_(Intercept)", "sd_(Intercept)")) %>%
mutate(sim_value_adj = case_when(parameter == 'mean_(Intercept)' ~ 75,
TRUE ~ sim_value)) %>%
group_by(pretty_par,model_type,prop_scored) %>%
summarize(mean_cv= mean(sd/sim_value_adj), lci = quantile(sd/sim_value_adj, prob=0.025), uci = quantile(sd/sim_value_adj, prob=0.975)) %>%
ggplot(aes(x = prop_scored, y = mean_cv, ymin = lci, ymax = uci, col = model_type)) + geom_pointrange(position = position_dodge(0.075)) + facet_wrap(~pretty_par) +
xlab("Proportion of moult records with continuous scores") +
ylab("CV") +
theme_classic() +
scale_color_manual(name = 'Model', labels = c('T1','T12','T2','T2S'),
values = unname(palette.colors(n=4))) +
theme(legend.position = "bottom")
cv_plot
cowplot::save_plot('figures/t12_data_integration_bias_precision.png',
cowplot::plot_grid(bias_plot + theme(legend.position = 'none'),
cv_plot+ theme(legend.position = 'none'),
cowplot::get_legend(bias_plot),
nrow = 3,
rel_heights = c(1,1,0.2),
labels = c('A','B',''),
axis = 'l',
align = 'v'),
base_height = 5, base_width = 5
)
|
69a3542a56bd346d37ba9f124e29609e003c36f1 | 2b7bb0a817d293a007c1597b57ad9a083c4c614a | /R/TradeVolumeVol.R | 7dd4380aa18b818e22b2b312c34cba098fca6eae | [] | no_license | helenristov/aCompiler | 777585a77ada30fbbb750339fd28dfe439d0cf1e | cc0a0146c7dd20c17829190c9eac3e65ad71d940 | refs/heads/master | 2021-01-23T07:33:51.970028 | 2018-11-13T03:22:08 | 2018-11-13T03:22:08 | 102,508,631 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,696 | r | TradeVolumeVol.R | #'
#' Adjust Volatility based on Liquidity (Daily Traded Volume)
#'
#' Calculates Volatility Controllling for Potential Limited Liquidity.
#'
#' Compares recent daily traded volume to a minimally-accepted daily volume and adjusts volatility if violated.
#'
#'@param OR_Contracts List of outright contracts to liquidity test. Derive other types from OR_Contracts vector.
#'@param StartTime Beginning date of liquidity analysis.
#'@param EndTime Ending date of liquidity analysis.
#'@param Types Types of combinations to examine for liquidity.
#'@param MinTrdVolume Minimum amount of liquidity necessary to prevent a liquidity risk adjustment to the regular volatility measurement.
#'
#'@author Nicholas Dregne
#'
#'@export
TradeVolumeVol <- function (OR_Contracts, StartTime, EndTime, Types, MinTrdVolume, ID = 'X.RIC', DataSource = 'TRS'){
# directory <- "/data/tick/"
#
# KnownTypes <- c('OR','PK','PS','FB2','FB3','FB4','FB5','DF'
# 'CS1' ,'CS2' ,'CS3' ,'CS4' ,'CS5' ,'CS6' ,'CS7' ,'CS8' ,'CS9' ,'CS10','CS11','CS12',
# 'CS13','CS14','CS15','CS16','CS17','CS18','CS19','CS20','CS21','CS22','CS23','CS24',
# 'FL1' ,'FL2' ,'FL3' ,'FL4' ,'FL5' ,'FL6' ,'FL7' ,'FL8' ,'FL9' ,'FL10','FL11','FL12')
Root <- substr(OR_Contracts[1], 1, nchar(OR_Contracts[1]) - 2)
RL <- nchar(Root)
NumOfDays <- length(GetWeekDays(StartTime, EndTime)) - ifelse(as.POSIXlt(StartTime)$hour > 12, 1, 0)
VolAdj <- list()
for(Type in Types){
StringNum <- which(Type == Types)
TL <- nchar(Type)
if('OR' %in% Type){
Contracts <- OR_Contracts
names(Contracts) <- OR_Contracts
ContractData <- lapply(Contracts, function(x){ Pull.Algo.Data('Volume', x, StartTime, EndTime) })
# if(DataSource == 'MDR'){
# ContractData <- Pull.MDR.Data(Contracts, StartTime, EndTime, TOB = TRUE)
# }else{
# ContractData <- Pull.TRS.Data(Contracts, StartTime, EndTime, incr = 0, ID = ID)
# }
Avg.Daily.Vols <- unlist(lapply(Contracts, function(x) {sum(ContractData[[x]][,paste0(x,".Volume.1sec")], na.rm = TRUE)/NumOfDays}))
VolAdj[[Type]] <- as.matrix(sapply(Avg.Daily.Vols, function(x) {min(x, MinTrdVolume[StringNum])/MinTrdVolume[StringNum]}))
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- 'OR_Vols'
}else if('CS1' %in% Type){
n <- length(OR_Contracts) - 1
Contracts <- paste0(OR_Contracts[-(n+1)], ".", substr(OR_Contracts[-1], RL + 1, RL + 2))
names(Contracts) <- Contracts
ContractData <- lapply(Contracts, function(x){ Pull.Algo.Data('Volume', x, StartTime, EndTime) })
# if(DataSource == 'MDR'){
# ContractData <- Pull.MDR.Data(Contracts, StartTime, EndTime, TOB = TRUE)
# }else{
# ContractData <- Pull.TRS.Data(Contracts, StartTime, EndTime, incr = 0, ID = ID)
# }
Avg.Daily.Vols <- unlist(lapply(Contracts, function(x) {sum(ContractData[[x]][,paste0(x,".Volume.1sec")], na.rm = TRUE)/NumOfDays}))
VolAdj[[Type]] <- as.matrix(sapply(Avg.Daily.Vols, function(x) {min(x, MinTrdVolume[StringNum])/MinTrdVolume[StringNum]}))
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- 'CS1_Vols'
}else if('CS' %in% substr(Type, 1, 2) && length(which(c(2:24) %in% substr(Type, 3, TL))) > 0){
m <- as.numeric(substr(Type, 3, TL))
n <- length(OR_Contracts) - m
Contracts <- paste0(OR_Contracts[-c((n+1):(n+m))], ".", substr(OR_Contracts[-c(1:m)], RL + 1, RL + 2))
names(Contracts) <- Contracts
if(MinTrdVolume[StringNum] >= 1){
ContractData <- lapply(Contracts, function(x){ Pull.Algo.Data('Volume', x, StartTime, EndTime) })
# if(DataSource == 'MDR'){
# ContractData <- Pull.MDR.Data(Contracts, StartTime, EndTime, TOB = TRUE)
# }else{
# ContractData <- Pull.TRS.Data(Contracts, StartTime, EndTime, incr = 0, ID = ID)
# }
Avg.Daily.Vols <- unlist(lapply(Contracts, function(x) {sum(ContractData[[x]][,paste0(x,".Volume.1sec")], na.rm = TRUE)/NumOfDays}))
VolAdj[[Type]] <- as.matrix(sapply(Avg.Daily.Vols, function(x) {min(x, MinTrdVolume[StringNum])/MinTrdVolume[StringNum]}))
}else{
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['CS1']], m))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
}
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if('FL' %in% substr(Type, 1, 2) && length(which(c(1:12) %in% substr(Type, 3, TL))) > 0){
m <- as.numeric(substr(Type, 3, TL))
n <- length(OR_Contracts) - m * 2
Contracts <- paste0(Root, "BF", substr(OR_Contracts[1:n] , RL + 1, RL + 2), ".",
substr(OR_Contracts[(1+m):(n+m)] , RL + 1, RL + 2), ".",
substr(OR_Contracts[(1+m*2):(n+m*2)], RL + 1, RL + 2))
names(Contracts) <- Contracts
if(MinTrdVolume[StringNum] >= 1){
ContractData <- lapply(Contracts, function(x){ Pull.Algo.Data('Volume', x, StartTime, EndTime) })
# if(DataSource == 'MDR'){
# ContractData <- Pull.MDR.Data(Contracts, StartTime, EndTime, TOB = TRUE)
# }else{
# ContractData <- Pull.TRS.Data(Contracts, StartTime, EndTime, incr = 0, ID = ID)
# }
Avg.Daily.Vols <- unlist(lapply(Contracts, function(x) {sum(ContractData[[x]][,paste0(x,".Volume.1sec")], na.rm = TRUE)/NumOfDays}))
VolAdj[[Type]] <- as.matrix(sapply(Avg.Daily.Vols, function(x) {min(x, MinTrdVolume[StringNum])/MinTrdVolume[StringNum]}))
}else{
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['CS1']], m*2))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
}
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if('PK' %in% Type){
n <- length(OR_Contracts) - 3
Contracts <- paste0(Root, ":PK 01Y ", substr(OR_Contracts[1:n], RL + 1, RL + 2))
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['OR']], 4))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if('PS' %in% Type){
n <- length(OR_Contracts) - 7
Contracts <- paste0(Root, ":PS ", substr(OR_Contracts[1:n], RL + 1, RL + 2), "-", substr(OR_Contracts[5:(n+4)], RL + 1, RL + 2))
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['CS4']], 4))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if('PB' %in% Type){
n <- length(OR_Contracts) - 11
Contracts <- paste0(Root, ":PB ", substr(OR_Contracts[1:n], RL + 1, RL + 2), "-", substr(OR_Contracts[5:(n+4)], RL + 1, RL + 2), "-", substr(OR_Contracts[9:(n+8)], RL + 1, RL + 2))
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['CS4']], 8))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if('FB' %in% substr(Type, 1, TL - 1) && length(which(c(2:5) %in% substr(Type, TL, TL))) > 0){
m <- as.numeric(substr(Type, TL, TL))
n <- length(OR_Contracts) - m * 4 + 1
Contracts <- paste0(Root, ":FB 0", m, "Y ", substr(OR_Contracts[1:n], RL + 1, RL + 2))
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['OR']], m*4))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else if(substr(Type, 1, 2) %in% c('DF', 'CN', 'DC')){
m <- as.numeric(substr(Type, 3, TL))
if(substr(Type, 1, 2) == 'DC'){ n <- length(OR_Contracts) - 4 * m }else{ n <- length(OR_Contracts) - 3 * m }
if(substr(Type, 1, 2) == 'DC'){
Contracts <- paste0(Root, ":", substr(Type, 1, 2), " ", substr(OR_Contracts[1:n ], RL + 1, RL + 2),
substr(OR_Contracts[2:(n+1)], RL + 1, RL + 2),
substr(OR_Contracts[4:(n+3)], RL + 1, RL + 2),
substr(OR_Contracts[5:(n+4)], RL + 1, RL + 2))
}else{
Contracts <- paste0(Root, ":", substr(Type, 1, 2), " ", substr(OR_Contracts[1:n ], RL + 1, RL + 2),
substr(OR_Contracts[2:(n+1)], RL + 1, RL + 2),
substr(OR_Contracts[3:(n+2)], RL + 1, RL + 2),
substr(OR_Contracts[4:(n+3)], RL + 1, RL + 2))
}
VolAdj[[Type]] <- na.trim(as.matrix(SMA(VolAdj[['CS1']], ifelse(substr(Type, 1, 2) == 'DC', 4, 3)))) * ifelse(MinTrdVolume[StringNum] <= 0, 1, MinTrdVolume[StringNum])
rownames(VolAdj[[Type]]) <- Contracts
colnames(VolAdj[[Type]]) <- paste0(Type, '_Vols')
}else{
stop(paste0(Type, " Doesn't Exist Yet!"))
}
}
return(VolAdj)
} |
0e7507fac1649f8592dccf3459c40fbd422fc5f7 | 109734b597c2d760725a1a050174a5d11b3c1a9b | /man/plot.linnet.Rd | c6a0d550f5dc962512702f2660e7f4fee3c36868 | [] | no_license | rubak/spatstat | c293e16b17cfeba3e1a24cd971b313c47ad89906 | 93e54a8fd8276c9a17123466638c271a8690d12c | refs/heads/master | 2020-12-07T00:54:32.178710 | 2020-11-06T22:51:20 | 2020-11-06T22:51:20 | 44,497,738 | 2 | 0 | null | 2020-11-06T22:51:21 | 2015-10-18T21:40:26 | R | UTF-8 | R | false | false | 1,236 | rd | plot.linnet.Rd | \name{plot.linnet}
\alias{plot.linnet}
\title{
Plot a linear network
}
\description{
Plots a linear network
}
\usage{
\method{plot}{linnet}(x, ..., main=NULL, add=FALSE,
vertices=FALSE, window=FALSE,
do.plot=TRUE)
}
\arguments{
\item{x}{
Linear network (object of class \code{"linnet"}).
}
\item{\dots}{
Arguments passed to \code{\link{plot.psp}}
controlling the plot.
}
\item{main}{
Main title for plot. Use \code{main=""} to suppress it.
}
\item{add}{
Logical. If code{TRUE}, superimpose the graphics
over the current plot. If \code{FALSE}, generate a new plot.
}
\item{vertices}{
Logical. Whether to plot the vertices as well.
}
\item{window}{
Logical. Whether to plot the window containing the linear network.
}
\item{do.plot}{
Logical. Whether to actually perform the plot.
}
}
\details{
This is the plot method for class \code{"linnet"}.
}
\value{
An (invisible) object of class \code{"owin"}
giving the bounding box of the network.
}
\author{
Ang Qi Wei \email{aqw07398@hotmail.com} and
\adrian
}
\seealso{
\code{\link{linnet}}
}
\examples{
plot(simplenet)
}
\keyword{spatial}
|
aa1b6a095908c7209b742bd85720d055d4471bd3 | 02fa21dea9505bbec73eafc0b219466c2f97d4a8 | /man/bootstrapml.Rd | 124f0216c1f09d8d515f66cfe1c21aa52124e5fa | [
"MIT"
] | permissive | JonasMoss/univariateML | 56bd6957bab05f71e2cb12fe8c5217b0378ad8fd | 2ae9f2e9422c0268ae9a39f9d886d046c6c33824 | refs/heads/master | 2022-01-27T04:44:12.105751 | 2022-01-25T13:29:50 | 2022-01-25T13:29:50 | 200,917,156 | 8 | 6 | NOASSERTION | 2022-01-23T17:07:15 | 2019-08-06T20:06:58 | R | UTF-8 | R | false | true | 2,934 | rd | bootstrapml.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap.R
\name{bootstrapml}
\alias{bootstrapml}
\title{Parametric Bootstrap on Distributions Fitted with Maximum Likelihood}
\usage{
bootstrapml(
object,
reps = 1000,
map = identity,
reducer = stats::quantile,
...
)
}
\arguments{
\item{object}{A \code{univariateML} object.}
\item{reps}{Positive integer. The number of bootstrap samples.}
\item{map}{A function of the parameters of the \code{univariateML} object.
Defaults to the identity.}
\item{reducer}{A reducer function. Defaults to \code{stats::quantile} with
default argument \code{probs = c(0.025, 0.975)}.}
\item{...}{Passed to \code{reducer}.}
}
\value{
The transposed map-reduced bootstrap samples.
}
\description{
The parametric bootstrap is a resampling technique using random variates
from a known parametric distribution. In this function the distribution
of the random variates is completely determined by the \code{unvariateML}
object \code{object}.
}
\details{
For each bootstrap iteration a maximum likelihood estimate is calculated
using the \verb{ml***} function specified by \code{object}. The resulting
numeric vector is then passed to \code{map}. The values returned by
\code{map} is collected in an array and the \code{reducer} is called on
each row of the array.
By default the \code{map} function is the identity and the default
\code{reducer} is the quantile function taking the argument \code{probs},
which defaults to \code{c(0.025, 0.975)}. This corresponds to a 95\\%
basic percentile confidence interval and is also reported by
\code{\link[=confint]{confint()}}
\emph{Note:} The default confidence intervals are percentile intervals,
not empirical intervals. These confidence intervals will in some cases
have poor coverage as they are not studentized, see e.g. Carpenter,
J., & Bithell, J. (2000).
}
\examples{
\donttest{
set.seed(1)
object <- mlgamma(mtcars$qsec)
## Calculate c(0.025, 0.975) confidence interval for the gamma parameters.
bootstrapml(object)
# 2.5\% 97.5\%
# shape 68.624945 160.841557
# rate 3.896915 9.089194
## The mean of a gamma distribution is shape/rate. Now we calculate a
## parametric bootstrap confidence interval for the mean with confidence
## limits c(0.05, 0.95)
bootstrapml(object, map = function(x) x[1] / x[2], probs = c(0.05, 0.95))
# 5\% 95\%
# 17.33962 18.31253
## Print a histogram of the bootstrapped estimates from an exponential.
object <- mlexp(mtcars$qsec)
hist(bootstrapml(object, reducer = identity))
}
}
\references{
Efron, B., & Tibshirani, R. J. (1994). An introduction to the bootstrap.
CRC press.
Carpenter, J., & Bithell, J. (2000). Bootstrap confidence intervals:
when, which, what? A practical guide for medical statisticians.
Statistics in medicine, 19(9), 1141-1164.
}
\seealso{
\code{\link[=confint]{confint()}} for an application of \code{bootstrapml}.
}
|
a143df62d7e3aff12f2bf59336aed46e38ffb0c1 | c804ec169952554c879540d4241a4068443dd44d | /Rscripts/generate_figures.R | cf68a1265938bebcb29a350cabf7ae98f890ddc6 | [] | no_license | akleinhesselink/indirect_effects | 69b8060c0b4316259ebb1a24a41a0df9189902e9 | 2039fc96f5fc0b49d198c18c0693d006c01a721f | refs/heads/master | 2019-01-21T17:22:32.749141 | 2015-08-26T22:20:13 | 2015-08-26T22:20:13 | 35,643,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,256 | r | generate_figures.R | # Generate figures for main text
# R-code supporting 'Indirect effects of environmental change in resource competition models'
# Written on R version 3.2.1 (2015-06-18)
# Author: Andrew R. Kleinhesselink
# Contact: arklein@aggiemail.usu.edu
# Requires loading the essential_resource_functions.R and the
# substitutable_resource_functions.R scripts for function definitions
rm(list = ls())
source('essential_resource_functions.R')
source('substitutable_resource_functions.R')
# Basic figure settings and labels --------------------------------------------
par(mfrow = c(1, 1), oma = c(2, 2, 2, 2), mar = c(5, 8, 2, 2), las = 1, cex.axis = 0.8)
rho_UTF <- '\u03C1' # UTF code for Greek letter rho for use in plots
yl1 <- expression("Indirect effects" ~ ~~italic(frac("dN"["F indirect"]^"*", "dS"[1])))
yl2 <- expression("Indirect effects" ~ ~~italic(frac("dN"["F indirect"]^"*", "dS"[2])))
xl <- substitute( "Niche overlap" ~ "(" * italic(rho_UTF) * ")", list(rho_UTF = rho_UTF))
xldirect <- expression("Direct effects" ~ ~"(" * italic(frac(partialdiff * "N"["F"]^"*", partialdiff * "S"[1])) * ")")
xldirect2 <- expression("Focal species sensitivity" ~ "(" * italic(frac("a"[1], "m"[1] * "q"["F2"])) * ")")
title1 <- expression(italic(frac(partialdiff * "N"["F"]^"*", partialdiff * "S"[1]) ~ "="))
title2 <- expression(italic(frac("a"[1], "m"[1] * "q"["F2"])) ~ "=")
title3 <- as.expression( substitute("" * italic(rho_UTF) ~ "=", list( rho_UTF = rho_UTF)))
# ------------------------------------------------------------------------------
# Figure 3A Indirect effect strength as a function of niche overlap ------------
curve(expr = calc_essential_ifx1(x, dfxN1 = 0.5), from = 0, to = 1, n = 100, ylim = c(0, 5),
xlab = "", ylab = yl1, lty = 3, lwd = 2, cex.lab = 1)
mtext(xl, side = 1, line = 3, cex.lab = 1)
curve(expr = calc_essential_ifx1(x, dfxN1 = 2), add = TRUE, lty = 2, lwd = 2)
curve(expr = calc_essential_ifx1(x, dfxN1 = 5), add = TRUE, lty = 1)
legend(x = 0.1, y = 4.2, legend = c("0.5", "2", "5"), lty = c(3, 2, 1), lwd = c(2, 2, 1),
cex = 0.9, bty = "n", title = title1)
mtext("Figure 3A", side =3, line = 1 )
# Figure 3B Indirect effect strength as a function of direct effects ------------
curve(expr = calc_essential_ifx1(rho = 0.75, dfxN1 = x), from = 0, to = 5, n = 10,
xlab = "", ylab = yl1, lty = 3, lwd = 2, cex.lab = 1, ylim = c(0, 5))
mtext(xldirect, side = 1, line = 4, cex.lab = 1)
curve(expr = calc_essential_ifx1(rho = 0.5, dfxN1 = x), add = TRUE, lty = 2, lwd = 2)
curve(expr = calc_essential_ifx1(rho = 0.25, dfxN1 = x), add = TRUE, lty = 1)
legend(x = 0.4, y = 4.8, legend = c("0.75", "0.5", "0.25"), lty = c(3, 2, 1), lwd = c(2, 2, 1), cex = 0.9, bty = "n", title = title3)
mtext("Figure 3B", side =3, line = 1 )
# Figure 4A Indirect effects of change in non-limiting resource as a function of rho.
fSense <- c(0.5, 2, 5)
curve(expr = calc_essential_ifx2(rho = x, focalSensitivity = fSense[1]), from = 0, to = 1, n = 100,
xlab = '', ylab = yl2, ylim = c(-5, 0), lty = 3, lwd = 2, cex.lab = 1)
mtext(xl, side = 1, line = 3, cex.lab = 1)
curve(expr = calc_essential_ifx2(rho = x, focalSensitivity = fSense[2]), add = TRUE, lty = 2, lwd = 2)
curve(expr = calc_essential_ifx2(rho = x, focalSensitivity = fSense[3]), add = TRUE, lty = 1, lwd = 1)
legend(x = 0, y = -3, legend = as.character(fSense), lty = c(3, 2, 1), lwd = c(2, 2, 1), cex = 0.9,
bty = "n", title = title2)
mtext("Figure 4A", side =3, line = 1 )
# Figure 4B Indirect effects of a change in non-limiting resource as a function of
# direct effects on the focal species.
rhos <- c(0.75, 0.5, 0.25)
curve(expr = calc_essential_ifx2(rho = rhos[1], focalSensitivity = x), from = 0, to = 5, n = 10, xlab = "", ylab = yl2, lty = 3, lwd = 2, cex.lab = 1, ylim = c(-5, 0))
curve(expr = calc_essential_ifx2(rho = rhos[2], focalSensitivity = x), add = TRUE, lty = 2, lwd = 2)
curve(expr = calc_essential_ifx2(rho = rhos[3], focalSensitivity = x), add = TRUE, lty = 1)
legend(x = 0, y = -3, legend = as.character(rhos), lty = c(3, 2, 1), lwd = c(2, 2, 1), cex = 0.9, bty = "n", title = title3)
mtext(xldirect2, side = 1, line = 4, cex.lab = 1)
mtext("Figure 4B", side =3, line = 1 )
## Figure 5 Indirect effects as a function of rho in a substitutable resource model
curve(expr = calc_substitutable_ifx(dirN1 = 4, dirN2 = 0, rho = x, Beta = 1),
from = 0, to = 1, n = 100, xlab = '', ylab = yl1, ylim = c(-10, 10), lty = 1, lwd = 1, cex.lab = 1)
mtext(xl, side = 1, line = 3, cex.lab = 1)
curve(expr = calc_substitutable_ifx(dirN1 = 2, dirN2 = 1, rho = x, Beta = 1), add = TRUE, lty = 2, lwd = 1)
curve(expr = calc_substitutable_ifx(dirN1 = 1, dirN2 = 1, rho = x, Beta = 1), add = TRUE, lty = 1, lwd = 1)
curve(expr = calc_substitutable_ifx(dirN1 = 1, dirN2 = 2, rho = x, Beta = 1), add = TRUE, lty = 2, lwd = 1)
curve(expr = calc_substitutable_ifx(dirN1 = 0, dirN2 = 4, rho = x, Beta = 1), add = TRUE, lty = 1, lwd = 1)
text(0.75, 9, label = expression(+4), cex = 0.9)
text(0.75, -9, label = expression(-4), cex = 0.9)
text(0.99, 6.6, label = expression(+1), cex = 0.9)
text(0.99, 1, label = expression(0), cex = 0.9)
text(0.99, -6.6, label = expression(-1), cex = 0.9)
mtext("Figure 5", side =3, line = 1 )
|
6c89cb2ccd919fcbf0f58309bd4703e8c458d56d | 932dba523258a20ba386695ed34a6f91da4688c7 | /man/tidy_counts.Rd | dc2d0ddd21ee7f301165fe1421f44f2709ce78b1 | [] | no_license | trinker/termco | 7f4859a548deb59a6dcaee64f76401e5ff616af7 | aaa460e8a4739474f3f242c6b2a16ea99e1304f5 | refs/heads/master | 2022-01-18T23:43:46.230909 | 2022-01-05T19:06:43 | 2022-01-05T19:06:43 | 39,711,923 | 27 | 4 | null | 2018-02-02T05:57:38 | 2015-07-26T03:17:09 | R | UTF-8 | R | false | true | 1,586 | rd | tidy_counts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_counts.R
\name{tidy_counts}
\alias{tidy_counts}
\title{Convert `term_count` & `token_count` to Tidy Form}
\usage{
tidy_counts(x, ...)
}
\arguments{
\item{x}{A `term_count` object.}
\item{\ldots}{ignored.}
}
\value{
Returns a tibble with tags and counts in long form (retains all other
variables in the `term_count` object.
}
\description{
Converts a wide matrix of counts to tidy form (tags are stretched long-wise
with corresponding counts of tags).
}
\note{
\code{n.words} or \code{n.tokens} will be repeated for each row element
id (\code{element_id}) and thus are nested.
}
\examples{
## On term counts
discoure_markers <- list(
AA__response_cries = c("\\\\boh", "\\\\bah", "\\\\baha", "\\\\bouch", "yuk"),
AA__back_channels = c("uh[- ]huh", "uhuh", "yeah"),
BB__summons = "\\\\bhey",
CC__justification = "because"
)
terms1 <- with(presidential_debates_2012,
term_count(dialogue, TRUE, discoure_markers)
)
tidy_counts(terms1)
terms2 <- with(presidential_debates_2012,
term_count(dialogue, list(person, time), discoure_markers)
)
tidy_counts(terms2)
## On token count
library(dplyr)
token_list <- lexicon::nrc_emotions \%>\%
textshape::column_to_rownames() \%>\%
t() \%>\%
textshape::as_list()
token1 <- presidential_debates_2012 \%>\%
with(token_count(dialogue, TRUE, token_list))
tidy_counts(token1)
token2 <- presidential_debates_2012 \%>\%
with(token_count(dialogue, list(person, time), token_list))
tidy_counts(token2)
}
\keyword{tidy}
|
6d5ffee5bf7ee6ad0a327ebe2b4dfc55e12b8f69 | ddf3a62e275230dfc6930c6f11e8a825a6933e73 | /Ejercicio 25.R | d76704a0ba2f27e55d833b6c85f9b16c64459030 | [] | no_license | ErickRojas19/R-Ejercicio | 5ba97db9ad772c51960012c13d9e65ba85017cf9 | b3451dbe49c753389d85d3a24057c92f1533ae11 | refs/heads/master | 2020-07-27T13:22:07.364475 | 2019-10-31T19:56:55 | 2019-10-31T19:56:55 | 209,104,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 203 | r | Ejercicio 25.R | a =as.integer(readline(prompt="Digite 4 digitos: "))
if (a>10000) {
break
}
b = as.integer(a/1000)
c = a-(b*1000)
d=as.integer(c/100)
e =c-(d*100)
f = as.integer(e/10)
g=e-(f*10)
paste(g,f,d,b) |
6f66bbf6d9a3eb6bc820b1381ecc8881a330adaa | f7d8d87f13420a57d9191eed1bd187f2673e5147 | /man/outbreak_velocity.Rd | 0255dd71eccd13a1aae26c204df609b369c1f7a9 | [] | no_license | kathryntmorrison/outbreakvelocity | 011f530af15aebe28b341ea073a4cb73449258df | 3917615cef8d3b10c4ddfe32200eab34b9846bff | refs/heads/master | 2021-01-17T12:36:08.269821 | 2017-07-28T03:55:28 | 2017-07-28T03:55:28 | 62,245,446 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 746 | rd | outbreak_velocity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outbreak_velocity.R
\name{outbreak_velocity}
\alias{outbreak_velocity}
\title{The front-wave velocity}
\usage{
outbreak_velocity(ds, max.order = 10, measure = "r2",
manual.order = FALSE)
}
\arguments{
\item{ds}{Dataframe providing the date of outbreak and X and Y coordinates}
\item{max.order}{Integer of highest order polynomial to attempt; defaults to 10}
\item{measure}{The measure of model fit, defaults to R-squared: 'r2', can be AIC: 'aic' or BIC: 'bic'}
\item{manual.order}{If TRUE, the max.order integer is the polynomial model used, regardless of best fit measures; defaults to FALSE}
}
\description{
Calculates the front-wave velocity of an outbreak
}
|
f6242dcaa326a68b7d97e69d7a2cbf89b1d1e48d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BalancedSampling/examples/hlpm.Rd.R | 1f5ffa21aa894b9cfd66562384e4c1192efe0832 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 556 | r | hlpm.Rd.R | library(BalancedSampling)
### Name: hlpm
### Title: Hierarchical local pivotal method
### Aliases: hlpm
### ** Examples
## Not run:
##D ############
##D ## Example with two subsamples
##D ############
##D
##D N = 100; # population size
##D X = cbind(runif(N),runif(N)); # auxiliary variables
##D n = 10; # size of initial sample
##D p = rep(n/N,N); # inclusion probabilities of initial sample
##D sizes = c(7,3); # sizes of the two subsamples
##D hlpm(p,X,sizes) # selection of samples using hierarchical local pivotal method
##D
## End(Not run)
|
ae148656637fedfbe36623d6b82af6c63a57a23f | da725622bc962b639e1eb6df535b433e4366bcc5 | /funding/ui.r | 8c3d22d2f4ce840c51b793559f5663a48bf98d21 | [] | no_license | bekahdevore/rKW | 5649a24e803b88aa51a3e64020b232a23bd459fa | 970dcf8dc93d4ec0e5e6a79552e27ddc0f850b91 | refs/heads/master | 2020-04-15T12:41:49.567456 | 2017-07-25T16:29:31 | 2017-07-25T16:29:31 | 63,880,311 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 723 | r | ui.r | library(shiny)
library(shinythemes)
library(streamgraph)
library(grid)
library(gridBase)
library(gridExtra)
shinyUI(fluidPage(
br(),
h1("KentuckianaWorks"),
theme = shinytheme("united"),
# Show a plot of the generated distribution
fluidRow(
br(),
br(),
column(12,
h3("Funding (in millions)"),
h5("2003 - 2019 (projected)"),
align = "center",
streamgraphOutput("fundingStream"),
br(),
br()),
column(12,
br(),
br(),
align = "center",
plotOutput("fundingTree2003")
#plotlyOutput("laborForcePlot")
),
column(12,
align = "center"
)
)))
|
c3ce84938c3876bed922c59815cf6317dde28a2f | 8731ee3a10374614cb3949a9c8edb8f2415788e5 | /진보보수_ver12040932.R | b955b21243334d2e83bdab42bee23d7e39fdc49a | [] | no_license | iamhch24/R-Basics | c3c3a981faede712e04794b375a100d9f4e87b66 | 0eb1113fa7cd2cc3177f439e48310954c24a86c3 | refs/heads/main | 2023-02-05T14:52:43.131008 | 2020-12-30T05:18:31 | 2020-12-30T05:18:31 | 325,461,352 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,319 | r | 진보보수_ver12040932.R | library(dplyr)
library(ggplot2)
# 1. data 수정
data = read.csv("진보보수_기본데이터.csv")
data
table(is.na(data)) # na 값 확인
# 각 채널의 연령대별 구독자수(명)를 구하고 새 칼럼에 할당하는 과정
data2 = data
data2$age_13_17 = data2$total * data2$a_13_17
data2$age_18_24 = data2$total * data2$a_18_24
data2$age_25_34 = data2$total * data2$a_25_34
data2$age_35_44 = data2$total * data2$a_35_44
data2$age_45_54 = data2$total * data2$a_45_54
data2$age_55_64 = data2$total * data2$a_55_64
data2$age_65_up = data2$total * data2$a_65_
data3 = data2[,10:17] # 새 칼럼들만 data3에 할당 (10열에서 17열까지)
data3
# 보수 채널의 연령대별 구독자수를 합산하는 과정
bosu = data3 %>% filter(group == 1) # 보수 채널의 데이터들만(group이 1 값) bosu에 할당
bosu = bosu[,-1] # 1열(group) 삭제
bosu
bosu_t = t(bosu) # 파이 그래프에 적절하게 행과 열을 변환
bosu_t # 칼럼명이 존재하지 않은 상태
colnames(bosu_t) = c("V1","V2","V3","V4","V5","V6","V7","V8","V9","V10") # 칼럼명 부여
bosu_df = data.frame(bosu_t) # class가 행렬이므로 데이터 프레임으로 변환
bosu2 = bosu_df %>%
mutate(subscriber = V1+V2+V3+V4+V5+V6+V7+V8+V9+V10) # 연령대별 구독자수 합산
# 시각화에 필요한 칼럼들 추가 및 불필요한 칼럼 제거
bosu2$age_group = rownames(bosu_df) # 파이 차트에 쓸 x 축에 사용할 연령대 칼럼 생성
bosu2$camp = "보수" # 막대그래프 fill(hue) 값에 부여할 camp 칼럼 생성
bosu3 = bosu2[,11:13] # 그래프에 필요한 칼럼들만 할당
bosu_sum = sum(bosu3$subscriber)
bosu3$rate = round(bosu3$subscriber/bosu_sum, 3) # 연령대별 비율 추가(연령대별 구독자/총 구독자)
bosu3
# 진보 데이터의 수정 과정
jinbo = data3 %>% filter(group == 2)
jinbo = jinbo[,-1]
jinbo_t = t(jinbo)
colnames(jinbo_t) = c("V1","V2","V3","V4","V5","V6","V7","V8","V9","V10")
jinbo_df = data.frame(jinbo_t)
jinbo2 = jinbo_df %>%
mutate(subscriber = V1+V2+V3+V4+V5+V6+V7+V8+V9+V10)
jinbo2$age_group = rownames(jinbo_df)
jinbo2$camp = "진보"
jinbo3 = jinbo2[,11:13]
jinbo_sum = sum(jinbo3$subscriber)
jinbo3$rate = round(jinbo3$subscriber/jinbo_sum, 3)
jinbo3
# 2. 시각화
# 파이 차트에 사용할 라벨과 색
youtu_label = c('13~17세', '18~24세', '25~34세', '35~44세',
'45~54세', '55~64세', '65세 이상')
bosu_col = c('#F0E4D4', '#F9D9CA','#D18063','#917B56','#E4A99B',
'#838BB2','#CACFE3')
jinbo_col = c('#C0D8F0', '#EDB2BC', '#A2CDE8', '#6F94A6', '#96E3D6',
'#E6939D', '#F2E0B6')
# 보수 pie
pie(bosu3$subscriber,labels=paste(youtu_label,"\n", bosu3$rate*100,"%"),
radius = 0.9, col = bosu_col, border = "#FFBFC1",
main = "보수 채널의 연령대 구독자 비율")
# 진보 pie
pie(jinbo3$subscriber,labels=paste(youtu_label,"\n", jinbo3$rate*100,"%"),
radius = 0.9, col = jinbo_col, border = "#BFBFFF",
main = "진보 채널의 연령대 구독자 비율")
# 막대그래프
youtu= bind_rows(bosu3,jinbo3) # 데이터프레임 세로 결합
youtu
ggplot(youtu, aes(x = age_group, y = rate, fill = camp)) + # fill = 파이썬 hue
geom_col(position = "dodge", alpha=0.8) + # dodge는 두 막대 분리, alpha 색 투명도 조절
ggtitle("보수, 진보 채널의 연령대별 구독자 비율 비교") +
xlab("연령대") + ylab("구독자(%)") +
theme(legend.title=element_blank())
# 3. 검증
# T 검증으로 비교할 두 개의 변수 생성
gum_df = read.csv("진보보수_검증데이터.csv")
gum_df
gum_df$구독자 = gum_df$구독자 * 1000 #구독자 수, 천명 단위를 명 단위로
gum_df$age_55up = gum_df$구독자 * gum_df$X55세.이상.비율 # 고령(55세 이상)만 할당
gum_bosu = gum_df %>% filter(진영 == "보수") %>% select(age_55up)
gum_jinbo = gum_df %>% filter(진영 == "진보") %>% select(age_55up)
gum_bosu
t.test(gum_bosu, gum_jinbo, var.equal = T)
# p-value = 0.0296으로, 유의수준 0.05보다 작으므로 귀무가설을 기각한다.
# 즉, 진보와 보수 채널에서 55세 이상 구독자의 평균 차이가
# 통계적으로 유의하다고 할 수 있다.
|
e62b9fea393ddc64ea04e040a989bf1dc409219d | 2af28e8d57448daa6b4207ba212963a9b2264f8f | /funcoes_v2.R | f0b8c2bad1e98c7983dc5a2cf88c65da6bb0f512 | [] | no_license | viniciuskm/Financial_Series | 1fac2c1f8d954c07076c263ad7f8ccad2ecccd7f | f1e9e8c64117d0e25041551b64ce8bfed0647584 | refs/heads/main | 2023-01-21T13:03:23.993399 | 2020-11-28T11:37:40 | 2020-11-28T11:37:40 | 315,301,653 | 0 | 1 | null | 2020-11-26T01:00:09 | 2020-11-23T12:01:54 | HTML | UTF-8 | R | false | false | 2,920 | r | funcoes_v2.R | library(quantmod)
importa_retorno <- function(nome){
# COletanto os Bitcoins
preco <- getSymbols(nome, auto.assign = F,)
# Coletando somente o pre?o adjustado
preco_adj <- preco[,ncol(preco)]
# preco_adj %>% plot()
# Removendo NAs
preco_adj <- preco_adj %>% na.locf()
# Calculando o log do retorno
ret <- diff(log(preco_adj), lag = 1)
ret <- ret %>% na.omit()
return(list(preco_adj,ret))
}
"%ni%" <- Negate("%in%")
datas_comuns <- function(serie1, serie2){
# buscando datas duas séries
serie1_idx <- serie1 %>% index()
serie2_idx <- serie2 %>% index()
# buscando datas que não existem na outra série
# ibov_idx[ibov_idx %ni% petr_idx]
# petr_idx[petr_idx %ni% ibov_idx]
# Filtrando somente pelas datas que existem nas duas séries
serie1_filtered <- serie1[serie1_idx %in% serie2_idx]
# serie2_filtered <- serie2[serie2_idx %in% serie1_idx]
return(serie1_filtered)
# return(list(serie1_filtered,serie2_filtered))
}
datas_comuns_5 <- function(serie1,serie2,serie3,serie4,serie5){
# buscando datas duas séries
serie1_idx <- serie1 %>% index()
serie2_idx <- serie2 %>% index()
serie3_idx <- serie3 %>% index()
serie4_idx <- serie4 %>% index()
serie5_idx <- serie5 %>% index()
# buscando datas que não existem na outra série
# ibov_idx[ibov_idx %ni% petr_idx]
# petr_idx[petr_idx %ni% ibov_idx]
# Filtrando somente pelas datas que existem nas duas séries
serie1_filtered <- serie1[serie1_idx %in% serie2_idx]
serie1_idx <- serie1_filtered %>% index()
serie1_filtered <- serie1_filtered[serie1_idx %in% serie3_idx]
serie1_idx <- serie1_filtered %>% index()
serie1_filtered <- serie1_filtered[serie1_idx %in% serie4_idx]
serie1_idx <- serie1_filtered %>% index()
serie1_filtered <- serie1_filtered[serie1_idx %in% serie5_idx]
# serie2_filtered <- serie2[serie2_idx %in% serie1_idx]
return(serie1_filtered)
# return(list(serie1_filtered,serie2_filtered))
}
datas_N_comuns_5 <- function(serie1,serie2,serie3,serie4,serie5){
# buscando datas duas séries
serie1_idx <- serie1 %>% index()
serie2_idx <- serie2 %>% index()
serie3_idx <- serie3 %>% index()
serie4_idx <- serie4 %>% index()
serie5_idx <- serie5 %>% index()
# buscando datas que não existem na outra série
# ibov_idx[ibov_idx %ni% petr_idx]
# petr_idx[petr_idx %ni% ibov_idx]
# Filtrando somente pelas datas que existem nas duas séries
serie1_2_idx <- serie1_idx %ni% serie2_idx
serie1_3_idx <- serie1_idx %ni% serie3_idx
serie1_4_idx <- serie1_idx %ni% serie4_idx
serie1_5_idx <- serie1_idx %ni% serie5_idx
serie1_2_3 <- union(serie1_2_idx,serie1_3_idx)
serie1_2_3_4 <- union(serie1_2_3,serie1_4_idx)
serie1_2_3_4_5 <- union(serie1_2_3_4,serie1_5_idx)
serie1_2_3_4_5 <- serie1_2_3_4_5 %>% unique()
return(serie1_2_3_4_5)
# return(list(serie1_filtered,serie2_filtered))
}
|
bc75c496bfbc15e0ac991c19109dc92adba14f6a | a4bd05fdf74fa9a6172d5902f588b643a75d33c9 | /Inference/VI/Berlin/1.preprocessing-berlin.R | d5547d288774d629aca57f8cc33c93785b280610 | [] | no_license | Fanfanyang/Projects | 53abfd6ee02105aa5cc1b9d04a21a7fcba783375 | 615c3ca5e751fa699a710a5ec91e743b090d401f | refs/heads/master | 2020-03-28T09:50:35.763635 | 2018-10-29T02:23:05 | 2018-10-29T02:23:05 | 148,063,753 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,987 | r | 1.preprocessing-berlin.R | load("~/Google Drive/Online/Lab/@multi-agent simulation/update/benchmark with berlin dataset/obs.matrix.RData")
load("~/Google Drive/Online/Lab/@multi-agent simulation/update/benchmark with berlin dataset/person.state.d.RData")
n.row=nrow(person.state.d)
n.col=ncol(person.state.d)
locations=sort(unique(as.vector(person.state.d)))
n.locations=length(locations)
person.state.d=matrix(match(as.vector(person.state.d), locations),nrow = n.row,ncol = n.col)
loc.d = t(apply(person.state.d, 1, function(x) table(factor(x, levels=locations ))))
td=seq(from=1,to=nrow(person.state.d),length.out = 5)
td=c(1,round(td[2:(length(td)-1)]),nrow(person.state.d))
m.time=list()
for(i in 1:4){
p.s.d=person.state.d[td[i]:td[i+1],]
m=table(c(head(p.s.d,-1)), c(tail(p.s.d,-1)))
m=sweep(m,1,rowSums(m),"/")
m.time[[i]]=m
}
#only consider neighbors, but rate_in rate_out should have length=length(locations)
rate_in=list()
rate_out=list()
loc_in=list()
loc_out=list()
for(i in 1:dim(m.time)[3]){
m=m.time[,,i]
diag(m)=0
rownames(m)=1:length(locations)
colnames(m)=1:length(locations)
rate_in[[i]]=lapply(1:ncol(m), function(n) {
m[,n][m[,n]!=0]
})
loc_in[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_in[[i]][[n]]))
})
rate_out[[i]]=lapply(1:ncol(m), function(n) {
m[n,][m[n,]!=0]
})
loc_out[[i]]=lapply(1:ncol(m), function(n) {
as.integer(names(rate_out[[i]][[n]]))
})
}
rate_in_f=function(i) rate_in[[ceiling((i)/(nrow(loc.d))*length(rate_in))]]
rate_out_f=function(i) rate_out[[ceiling((i)/(nrow(loc.d))*length(rate_out))]]
loc_in_f=function(i) loc_in[[ceiling((i)/(nrow(loc.d))*length(loc_in))]]
loc_out_f=function(i) loc_out[[ceiling((i)/(nrow(loc.d))*length(loc_out))]]
maxloc.d=apply(loc.d,2, max )
max.person=ifelse(maxloc.d<=10,maxloc.d+5,maxloc.d+10)
obs.matrix=exp(obs.matrix)
dataempty=lapply(1:nrow(loc.d), function(n){
lapply(1:length(locations), function(m){
rep(1,max.person[m]+1)
})
})
sliceempty=lapply(1:length(locations), function(m){
rep(0,max.person[m]+1)
})
start=sliceempty
for( i in 1:length(locations)) start[[i]][loc.d[1,i]+1]=1
end=sliceempty
for( i in 1:length(locations)) end[[i]][loc.d[nrow(loc.d),i]+1]=1
la=dataempty
la[[1]]=start
lb=dataempty
lb[[length(lb)]]=end
alloc = function(x){
old.t = attr(x,'t')
old.c = attr(x,'c')
if(length(attr(x, 't'))==length(x)) length(x) = length(x)*2 #alloc memory
attr(x,'t') = old.t
attr(x,'c') = old.c
x
}
# read a slice from filtration, previous nearest one
getSlice <- function(x, t ){
tt = attr(x, 't')
if(attr(x,'c')=="a"){
t0 = which(tt==max(tt[tt<=t]))
y=x[[t0]]
}
if(attr(x,'c')=="b"){
t0 = which(tt==min(tt[tt>=t]))
y=x[[t0]]
}
y
}
lg=la
for(i in 1:length(lg)){
lg[[i]]=lapply(1:length(locations), function(n) la[[i]][[n]]*lb[[i]][[n]]/sum(la[[i]][[n]]*lb[[i]][[n]]))
}
attr(la,'t') =attr(lb,'t') = attr(lg,'t') = 1:nrow(loc.d)
attr(la,'c')="a"
attr(lb,'c')="b"
attr(lg,'c')="a"
obs.scale=10
grep("road",colnames(obs))
observable=sort(sample(1:length(locations),ceiling(length(locations)/obs.scale) ) ) # setdiff(dimnames(obs.prob)$location,c("h","w"))
unobservable=setdiff(1:length(locations),observable)
observable_nominal=as.character(observable)
#obs = t(apply(person.state.d[,observable], 1, function(x) table(factor(x, levels=locations) )))
#obs=sapply(1:ncol(obs),function(n) pmin(max.person[n],round(obs[,n]*obs.scale)))
obs=loc.d
observation=lapply(observable, function(n) obs[,n])
names(observation)=observable_nominal
remove(list = setdiff(ls(),c('observation','obs.matrix','lg','loc.d','rate_in','obs','person.state.d',
'rate_out','rate_in_f','rate_out_f',
'loc_in','loc_out','loc_in_f','loc_out_f',
'la','lb','m.time','max.person','observable_nominal','unobservable','observable','alloc','getSlice','locations')))
save.image(file = "benchmark with berlin dataset/inference.RData") |
83daf34c1a1ccff0b0f4b46b0a3dec6262e537c5 | 6849478a9c7de705de7dc23ded3bc8828b9ade37 | /app/server.R | 1af2ac07d9f76405bb73b51edc318bc631515f4b | [] | no_license | sunandagarg/shiny_traffic_app | 578307112568fcd1c5015203ea91054c17369448 | b52d4b653925eb9c86acdd2fa134f28520dbffd6 | refs/heads/master | 2020-04-06T06:55:50.798594 | 2014-07-08T10:50:01 | 2014-07-08T10:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,738 | r | server.R |
## @knitr server.R
## server.R
require(rCharts)
require(dplyr)
########Server Code
shinyServer(function(input, output) {
# Generating the dropdows -------------------------------------------------
##creating the dynamic dropdown for sites based on selected network
output$sitecontrol <- renderUI({
site_choice <- unique(results[results$net.name == input$network,"site.name"])
site_choice <- site_choice[order(site_choice)]
selectInput("sites","Sites",choices=site_choice,multiple=F)
})
##creating the function for appropriate data
dataset <- reactive({
if(input$all_sites) {
data <- subset(mltd_traffic_net,net.name == input$network)
} else {
data <- subset(mltd_results,net.name == input$network & site.name == input$sites)
}
})
data_table <- reactive({
if(input$all_sites) {
data <- subset(agg_traffic,net.name == input$network)
} else {
data <- subset(results,net.name == input$network & site.name == input$sites)
}
})
output$myChart <- renderChart2({
mytooltip = "#! function(item){return item.Traffic + ' in ' + item.week} !#"
p1 <- rPlot(Traffic ~ week|variable,
data = dataset(),type = 'bar',tooltip = mytooltip,color='variable')
p1$set(height = 450)
return(p1)
})
# Generate an HTML table view of the data
output$table <- renderTable({
data.frame(x=data_table())
})
#Downloading the data
output$downloadData <- downloadHandler(
filename = function() { paste(input$network, '.csv',sep = "") },
content = function(file) {
write.csv(data_table(), file)
}
)
})
|
40305b23a0e086ba56720c4ac7fee069960a9511 | 740ca7f39bba1f2471efdd816319198c600a9cbd | /man/call_hs_hmrc_ots_with_key_word_search.Rd | 199ac3a4607df7e5468c091789708db87b698eb3 | [] | no_license | FruzsiG/hmrcwrappER | f4e4492d2ffc189c2e16a6df70d4a52e68ce4ef4 | 6e7e9e08440680021da6acc76469349882ee2a99 | refs/heads/main | 2023-06-27T00:21:58.365157 | 2021-08-05T14:12:43 | 2021-08-05T14:12:43 | 318,568,719 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,642 | rd | call_hs_hmrc_ots_with_key_word_search.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HMRC_API.R
\name{call_hs_hmrc_ots_with_key_word_search}
\alias{call_hs_hmrc_ots_with_key_word_search}
\title{HMRC OTS query base on key word and HS}
\usage{
call_hs_hmrc_ots_with_key_word_search(
time_window = NULL,
key_word = NULL,
filter_agg = "none",
country_filter = NULL,
flow_filter = NULL,
groupby = NULL
)
}
\arguments{
\item{time_window}{like "MonthId gt 202000 and MonthId lt 202100" which is everything from 2020 or "MonthId gt 201908 and MonthId lt 202009" last 12 month up to end of August}
\item{key_word}{anything like "cheese" or "whisky" or "sprout" stick to single words}
\item{filter_agg}{the aggregation level at which you are looking for the keyword like "Cn8" or "hs6"}
\item{country_filter}{use ISO-2-alpha code e.g. "FR" check HMRC for inconsistencies such as former Yugoslavian countries.}
\item{flow_filter}{choose from "export" "import" or "EU"}
\item{groupby}{takes a vector of keywords the full range is c("country", "year", "month", "HS2", "HS4", "HS6", "CN8")}
}
\description{
Querying the HMRC OTS data using HS nomenclature. The arguments are not case-sensitive the API itself is still in BETA thus we continue reporting issues.
}
\examples{
call_hs_hmrc_ots_with_key_word_search(filter_agg = "cn8", time_window = "MonthId eq 201901", groupby = c("hs4", "country"))
call_hs_hmrc_ots_with_key_word_search(key_word = "cheese", filter_agg = "hs4", time_window = "MonthId gt 201906 and MonthId lt 202008", country_filter = "FR", flow_filter = "EXport", groupby = c("CN8", "COUNTRY"))
}
\keyword{API}
\keyword{HMRC}
|
2da1cfae6d9f40b4f456d727d904c26881a0bd91 | be99370107699d2d02a39ccc4e79523241925ebc | /Asinara_marzo_R_code.r | c0b6bed12eda7503ccacbc4e169a1a573f08423e | [] | no_license | FedericoTossani/Asinara | 186a95d695fafce0e65b6238dbee7e84088f879f | 38ea413fbdcfd795c355e7f8ad592c223fd2abd5 | refs/heads/main | 2023-06-12T10:28:01.125445 | 2021-07-05T06:56:52 | 2021-07-05T06:56:52 | 374,436,760 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,515 | r | Asinara_marzo_R_code.r | require(raster)
require(rgdal)
require(RStoolbox)
require(rasterdiv)
require(rasterVis)
require(ggplot2)
require(gridExtra)
require(viridis)
require(purrr)
setwd("/Users/federicotossani/Asinara/L_image_mar")
source("/Users/federicotossani/Asinara/spectralrao.r")
### Import
p87<-raster("Asi_p193r32_1987.grd")
p88<-raster("Asi_p193r32_1988.grd")
p90<-raster("Asi_p193r32_1990.grd")
p94<-raster("Asi_p193r32_1994.grd")
p99<-raster("Asi_p193r32_1999.grd")
p02<-raster("Asi_p193r32_2002.grd")
p09<-raster("Asi_p193r32_2009.grd")
p14<-raster("Asi_p193r32_2014.grd")
p17<-raster("Asi_p193r32_2017B.grd")
p19<-raster("Asi_p193r32_2019.grd")
p21<-raster("Asi_p193r32_2021.grd")
p87<-brick("Asi_p193r32_1987.grd")
p88<-brick("Asi_p193r32_1988.grd")
p90<-brick("Asi_p193r32_1990.grd")
p94<-brick("Asi_p193r32_1994.grd")
p99<-brick("Asi_p193r32_1999.grd")
p02<-brick("Asi_p193r32_2002.grd")
p09<-brick("Asi_p193r32_2009.grd")
p14<-brick("Asi_p193r32_2014.grd")
p17<-brick("Asi_p193r32_2017B.grd")
p19<-brick("Asi_p193r32_2019.grd")
p21<-brick("Asi_p193r32_2021.grd")
### NDVI
nir87<-p87$P193R32_1987_SR_B4
nir88<-p88$P193R32_1988_SR_B4
nir90<-p90$P193R32_1990_SR_B4
nir94<-p94$P193R32_1994_SR_B4
nir99<-p99$P193R32_1999_SR_B4
nir02<-p02$P193R32_2002_SR_B4
nir09<-p09$P193R32_2009_SR_B4
nir14<-p14$P193R32_2014_SR_B4
nir17<-p17$P193R32_2017B_SR_B4
nir19<-p19$P193R32_2019_SR_B4
nir21<-p21$P193R32_2021_SR_B4
red87<-p87$P193R32_1987_SR_B3
red88<-p88$P193R32_1988_SR_B3
red90<-p90$P193R32_1990_SR_B3
red94<-p94$P193R32_1994_SR_B3
red99<-p99$P193R32_1999_SR_B3
red02<-p02$P193R32_2002_SR_B3
red09<-p09$P193R32_2009_SR_B3
red14<-p14$P193R32_2014_SR_B3
red17<-p17$P193R32_2017B_SR_B3
red19<-p19$P193R32_2019_SR_B3
red21<-p21$P193R32_2021_SR_B3
ndvi87 <- (nir87-red87)/(nir87+ red87)
ndvi88 <- (nir88-red88)/(nir88+ red88)
ndvi90 <- (nir90-red90)/(nir90+ red90)
ndvi94 <- (nir94-red94)/(nir94+ red94)
ndvi99 <- (nir99-red99)/(nir99+ red99)
ndvi02 <- (nir02-red02)/(nir02+ red02)
ndvi09 <- (nir09-red09)/(nir09+ red09)
ndvi14 <- (nir14-red14)/(nir14+ red14)
ndvi17 <- (nir17-red17)/(nir17+ red17)
ndvi19 <- (nir19-red19)/(nir19+ red19)
ndvi21 <- (nir21-red21)/(nir21+ red21)
### PCA
p87pca <- rasterPCA(p87)
p88pca <- rasterPCA(p88)
p90pca <- rasterPCA(p90)
p94pca <- rasterPCA(p94)
p99pca <- rasterPCA(p99)
p02pca <- rasterPCA(p02)
p09pca <- rasterPCA(p09)
p14pca <- rasterPCA(p14)
p17pca <- rasterPCA(p17)
p19pca <- rasterPCA(p19)
p21pca <- rasterPCA(p21)
p87pc1<-p87pca$map$PC1
p88pc1<-p88pca$map$PC1
p90pc1<-p90pca$map$PC1
p94pc1<-p94pca$map$PC1
p99pc1<-p99pca$map$PC1
p02pc1<-p02pca$map$PC1
p09pc1<-p09pca$map$PC1
p14pc1<-p14pca$map$PC1
p17pc1<-p17pca$map$PC1
p19pc1<-p19pca$map$PC1
p21pc1<-p21pca$map$PC1
### PDF creator
pdf("NDVI.pdf")
ggplot()+
geom_raster(ndvi87, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 1987")
ggplot()+
geom_raster(ndvi88, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 1988")
ggplot()+
geom_raster(ndvi90, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 1990")
ggplot()+
geom_raster(ndvi94, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 1994")
ggplot()+
geom_raster(ndvi99, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 1999")
ggplot()+
geom_raster(ndvi02, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2002")
ggplot()+
geom_raster(ndvi09, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2009")
ggplot()+
geom_raster(ndvi14, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2014")
ggplot()+
geom_raster(ndvi17, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2017")
ggplot()+
geom_raster(ndvi19, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2019")
ggplot()+
geom_raster(ndvi21, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("NDVI Asinara 2021")
dev.off()
pdf("PCA.pdf")
ggplot()+
geom_raster(p87pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 1987")
ggplot()+
geom_raster(p88pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 1988")
ggplot()+
geom_raster(p90pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 1990")
ggplot()+
geom_raster(p94pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 1994")
ggplot()+
geom_raster(p99pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 1999")
ggplot()+
geom_raster(p02pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2002")
ggplot()+
geom_raster(p09pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2009")
ggplot()+
geom_raster(p14pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2014")
ggplot()+
geom_raster(p17pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2017")
ggplot()+
geom_raster(p19pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2019")
ggplot()+
geom_raster(p21pc1, mapping=aes(x = x, y = y, fill = layer))+
scale_fill_viridis()+
ggtitle("PCA Asinara 2021")
dev.off()
|
99f9b30ea9694d0701038f7013e10f3c867ecb8b | 7fc48750caa47f04beb0c6a76d750d497c95aa51 | /man/getNodeData.Rd | 37aac3e876c3a2800b93bb80c83eeda5862ae6f0 | [] | no_license | anna-neufeld/splinetree | bc614ef797de03285ba9059406a441b4d66e1e39 | 352a5bc29e04ecc12f7c0499830d77004ddabd88 | refs/heads/master | 2020-03-24T11:34:04.509134 | 2019-08-26T18:38:00 | 2019-08-26T18:38:00 | 142,689,128 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,544 | rd | getNodeData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/other_useful_fns.R
\name{getNodeData}
\alias{getNodeData}
\title{Retrieve the subset of the data found at a given terminal node}
\usage{
getNodeData(tree, node, dataType = "all")
}
\arguments{
\item{tree}{a model returned from splineTree()}
\item{node}{The number of the node to retrieve data from. Must be valid
number of a terminal node. Node numbers can be seen using stPrint(tree)
or treeSummary(tree).}
\item{dataType}{If "all", the data returned is from the original dataset (one row per individual observation
with original response values). If "flat", the data returned is the flattened data (one row per person/unit),
with individual spline coefficients instead of response values.}
}
\value{
A dataframe which holds all the data that falls into this node of the tree.
}
\description{
Given a terminal node number, this function returns the data belonging to
this terminal node. If the dataType argument is 'all', returns all rows of data from the
original dataset that fall in this node. Otherwise, the flattened data that belongs to
this node is returned (one row of data per ID, original responses replaced by spline coefficients).
}
\examples{
\dontrun{
split_formula <- BMI ~ HISP + WHITE + BLACK + SEX +
Num_sibs + HGC_FATHER + HGC_MOTHER
tree <- splineTree(split_formula, BMI~AGE, 'ID', nlsySample, degree=1,
df=3, intercept=TRUE, cp=0.006, minNodeSize=20)
}
node6data <- getNodeData(tree, 6, dataType = 'all')
plot(BMI~AGE, data=node6data)
}
|
5b565de20cd57da8b236e1765ff499fb6ff50207 | d24d4310a57b8c188bbc24348dc1469699a76a61 | /oilwine/modal_summary_tab.R | 307047a29565ad1b87cd557e27cc6e1be2656338 | [] | no_license | jan2nov/oxrep | 6932d31f20add8d5b1b381b6a471016f47e51448 | b541b30094d7af4318ff2cfcfdb3b03019b12e96 | refs/heads/master | 2020-03-23T19:39:04.843265 | 2018-08-05T18:32:57 | 2018-08-05T18:32:57 | 141,993,141 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,833 | r | modal_summary_tab.R | info_reference <- function(data) {
data_return <- paste0(data$author,". ",data$year,". \"",data$title,"\"") %>% as.data.frame(stringsAsFactors=FALSE)
row_na <- which(!is.na(data$journal))
data_return[row_na,] <- str_c(data_return[row_na,],paste("",data[row_na,]$journal))
row_na <- which(!is.na(data$issue))
data_return[row_na,] <- str_c(data_return[row_na,],paste0(" ",data[row_na,]$issue,","))
row_na <- which(!is.na(data$start))
data_return[row_na,] <- str_c(data_return[row_na,],paste("",data[row_na,]$start))
row_na <- which(!is.na(data$end))
data_return[row_na,] <- str_c(data_return[row_na,],paste0("-",data[row_na,]$end,"."))
colnames(data_return) <- "References"
data_return
}
info_renderTable <- function(data, ...) {
nice_col_headings2 <-
plyr::mapvalues(
colnames(data),
from = display_table_labels$data.name,
to = display_table_labels$display.name,
warn_missing = FALSE
) %>%
tools::toTitleCase() %>% trimws()
the_na_labels <- display_table_labels %>%
filter(data.name %in% colnames(data)) %>%
select(label.for.null.fields) %>%
.[[1]] %>%
as.list()
the_na_labels <- setNames(the_na_labels, colnames(data))
data <- data %>%
replace_na(the_na_labels)
colnames(data) <- nice_col_headings2
data
}
############### map ##################
output$summary_leaflet_map <- renderLeaflet({
modal_row_data <- modal_row_data()
centre_latitude <- modal_row_data %>%
select(latitude) %>%
.[[1]]
centre_longitude <- modal_row_data %>%
select(longitude) %>%
.[[1]]
modal_row_data %>%
collect() %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(
lat = ~ latitude,
lng = ~ longitude,
fillOpacity = 0.7,
stroke = TRUE,
color = "#756bb1",
weight = 2,
label = ~ site,
popup = ~ map_point_labeller(site, province, country)
) %>%
setView(zoom = 6,
lat = centre_latitude,
lng = centre_longitude)
})
######################################
output$renderTable_Summary <- renderTable({
modal_row_data <- modal_row_data()
modal_row_data %>%
select(site, location, province, country) %>%
info_renderTable()
},
width = '100%', align = 'l', na = 'missing')
output$renderTable_location <- renderTable({
modal_row_data <- modal_row_data()
modal_row_data %>%
select(longitude, latitude, coordinatesAccuracy) %>%
info_renderTable()
},
width = '100%', align = 'l', na = 'missing')
output$renderTable_timeline <- renderTable({
modal_row_data <- modal_row_data()
modal_row_data %>%
select(notBeforeConstructionDate, notAfterConstructionDate, notBeforeAbandonmentDate, notAfterAbandonmentDate) %>%
info_renderTable()
},
width = '100%', align = 'l', na = 'missing')
output$renderTable_presses <- renderTable({
modal_row_data <- modal_row_data()
modal_row_data %>%
select(countOfOilPresses, countOfWinePresses, countOfUndefinedPresses, totalPresses) %>%
info_renderTable()
},
width = '100%', align = 'l', na = 'missing')
output$notes <- renderTable({
modal_row_data <- modal_row_data()
modal_row_data %>%
select(notes) %>%
info_renderTable()
},
width = '100%', align = 'l', na = 'missing')
output$references <- renderTable({
modal_row_data <- modal_row_data()
ref_data <- modal_row_data %>%
select(ID)
ref_data <- filter(dt_references_data,pressesSiteRes %in% ref_data) %>%
select(publicationRef)
ref_data <- filter(dt_pub_data,pubID %in% ref_data)
if (nrow(ref_data) != 0) {
info_reference(ref_data)
}
},
width = '100%', align = 'l', na = 'missing')
############## timeline plot ###################
output$summary_timeline <- renderPlot({
modal_row_data <- modal_row_data()
year_min <- modal_row_data$notBeforeConstructionDate
year_max <- modal_row_data$notAfterConstructionDate
year_plus <- ceiling((abs(year_min)+abs(year_max))/4)
year_min <- year_min - year_plus
year_max <- year_max + year_plus
set_break <- floor((abs(year_min)+abs(year_max))/10)
gg_timeline_plot(
start = modal_row_data$notBeforeConstructionDate,
end = modal_row_data$notAfterConstructionDate,
minyear = year_min,
maxyear = year_max,
breaks = set_break
)
})
################################################
output$modal_summaryTab <- renderUI({
fluidPage(
fluidRow(
column(
br(),
tableOutput("renderTable_Summary"),
tableOutput("renderTable_location"),
tableOutput("renderTable_timeline"),
plotOutput("summary_timeline", height = "50px"),
tableOutput("renderTable_presses"),
width = 8),
column(leafletOutput("summary_leaflet_map"),width = 4)
),
p(),
tableOutput("notes"),
tableOutput("references")
)
}) |
63644396a730f757ba9caa936b4ebf75b5c4d7e2 | eb9e60d992409a096fb40a01812331942b603f91 | /man/sparseBC-package.Rd | 195211d6467e216b4dc1f5e98acec295ddcf67ab | [] | no_license | cran/sparseBC | 617a7fb7be6c27a3024e0f408f62927c76213a0b | 423615956acb9d8516fbd6d986eeb98ab464c1f7 | refs/heads/master | 2020-05-29T21:28:45.062936 | 2019-03-19T04:40:05 | 2019-03-19T04:40:05 | 17,699,851 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,969 | rd | sparseBC-package.Rd | \name{sparseBC-package}
\alias{sparseBC-package}
\docType{package}
\title{
Fit sparse biclustering and matrix-variate normal biclustering
}
\description{
This package is called sparseBC, for "Sparse biclustering". It implements two methods:Sparse biclustering and matrix-variate normal biclustering. All are described in the paper "Sparse biclustering of tranposable data", by KM Tan and D Witten (2014), \emph{Journal of Computational and Graphical Statistics}.
The main functions are as follows:
(1) sparseBC
(2) matrixBC
The first function, sparseBC, performs sparse biclustering. matrixBC performs matrix-variate normal biclustering. There are also cross-validation functions for tuning parameter that controls the sparsity level of the estimated mean matrix: sparseBC.BIC and matrixBC.BIC. Function that choose the number of biclusters K and R are also included for sparseBC, called sparseBC.choosekr.
}
\details{
\tabular{ll}{
Package: \tab sparseBC\cr
Type: \tab Package\cr
Version: \tab 1.1\cr
Date: \tab 2015-02-09\cr
License: \tab GPL (>=2.0) \cr
LazyLoad: \tab yes \cr
}
The package includes the following functions:
\tabular{ll}{
\code{\link{sparseBC}}: \tab Perform sparse biclustering\cr
\code{\link{sparseBC.choosekr}}: \tab Cross-validation to select the number of row and column clusters \cr
\code{\link{sparseBC.BIC}}: \tab Select sparsity tuning parameter for sparseBC \cr
\code{\link{summary.sparseBC}}: \tab Display information for the object sparseBC \cr
\code{\link{image.sparseBC}}: \tab Image plot for the estimated bicluster mean matrix \cr
\code{\link{matrixBC}}:\tab Perform matrix-variate normal biclustering \cr
\code{\link{matrixBC.BIC}}:\tab Select sparsity tuning parameter for matrixBC\cr
}
}
\author{
Kean Ming Tan
Maintainer: Kean Ming Tan <keanming@u.washington.edu>
}
\references{
KM Tan and D Witten (2014) Sparse biclustering of transposable data. \emph{Journal of Computational and Graphical Statistics} 23(4):985-1008.
}
\seealso{
\code{\link{sparseBC}}
\code{\link{matrixBC}}
}
\keyword{ package }
\examples{
# An example that violates the assumption of contiguous biclusters
# Create mean matrix and the data matrix
#set.seed(5)
#u<-c(10,9,8,7,6,5,4,3,rep(2,17),rep(0,75))
#v<-c(10,-10,8,-8,5,-5,rep(3,5),rep(-3,5),rep(0,34))
#u<-u/sqrt(sum(u^2))
#v<-v/sqrt(sum(v^2))
#d<-50
#mus<-d*tcrossprod(u,v)
#binaryX<-(mus!=0)*1
#X<-mus+matrix(rnorm(100*50),100,50)
#X<-X-mean(X)
# The number of biclusters are chosen automatically
# Commented out for short run-time
#KR<-sparseBC.choosekr(X,1:6,1:6,0,0.1,trace=TRUE)
#k<-KR$estimated_kr[1]
#r<-KR$estimated_kr[2]
# The value of lambda is chosen automatically
#lambda<-sparseBC.BIC(X,k,r,c(0,10,20,30,40,50))$lambda
# Perform sparse biclustering using the K, R, and lambda chosen
#biclustering<-sparseBC(X,k,r,lambda)
# Display some information on the object sparseBC
#summary(biclustering)
# Plot the estimated mean matrix from sparseBC
#image(biclustering)
}
|
103dbb532ad4f82aff730142a02ab4dbcb1e3453 | 41499d1033c02c113ffb5b22e9f7f9d39d085f02 | /CallOfData_EdithClarke/Jose/Jose.R | a6c384b428f521eedf09f27ada49b77558fe4fb5 | [
"MIT"
] | permissive | rladies/CallOfData_Madrid_2017 | 3ed78ee0abc0bfdebe6283160cec9566f08ceb91 | 467684060d182c926465eee24622554a18e03df4 | refs/heads/master | 2021-01-25T06:44:54.890199 | 2017-06-15T11:17:25 | 2017-06-15T11:17:25 | 93,601,373 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,883 | r | Jose.R | library(data.table)
oda_per_capita <- data.table::fread("~/call-of-data_2017/data/Banco Mundial/ODA recibida per Capita.csv")
View(oda_per_capita)
for(cpo in as.character(1960:2015))
{
print(cpo)
# set(oda_per_capita, j=cpo, value=as.numeric(oda_per_capita[,(cpo), with=FALSE]))
}
oda_per_capita$"1960" <- as.numeric(oda_per_capita$"1960")
oda_per_capita$"1961" <- as.numeric(oda_per_capita$"1961")
oda_per_capita$"1962" <- as.numeric(oda_per_capita$"1962")
oda_per_capita$"1963" <- as.numeric(oda_per_capita$"1963")
oda_per_capita$"1964" <- as.numeric(oda_per_capita$"1964")
oda_per_capita$"1965" <- as.numeric(oda_per_capita$"1965")
oda_per_capita$"1966" <- as.numeric(oda_per_capita$"1966")
oda_per_capita$"1967" <- as.numeric(oda_per_capita$"1967")
oda_per_capita$"1968" <- as.numeric(oda_per_capita$"1968")
oda_per_capita$"1969" <- as.numeric(oda_per_capita$"1969")
oda_per_capita$"1970" <- as.numeric(oda_per_capita$"1970")
oda_per_capita$"1971" <- as.numeric(oda_per_capita$"1971")
oda_per_capita$"1972" <- as.numeric(oda_per_capita$"1972")
oda_per_capita$"1973" <- as.numeric(oda_per_capita$"1973")
oda_per_capita$"1974" <- as.numeric(oda_per_capita$"1974")
oda_per_capita$"1975" <- as.numeric(oda_per_capita$"1975")
oda_per_capita$"1976" <- as.numeric(oda_per_capita$"1976")
oda_per_capita$"1977" <- as.numeric(oda_per_capita$"1977")
oda_per_capita$"1978" <- as.numeric(oda_per_capita$"1978")
oda_per_capita$"1979" <- as.numeric(oda_per_capita$"1979")
oda_per_capita$"1980" <- as.numeric(oda_per_capita$"1980")
oda_per_capita$"1981" <- as.numeric(oda_per_capita$"1981")
oda_per_capita$"1982" <- as.numeric(oda_per_capita$"1982")
oda_per_capita$"1983" <- as.numeric(oda_per_capita$"1983")
oda_per_capita$"1984" <- as.numeric(oda_per_capita$"1984")
oda_per_capita$"1985" <- as.numeric(oda_per_capita$"1985")
oda_per_capita$"1986" <- as.numeric(oda_per_capita$"1986")
oda_per_capita$"1987" <- as.numeric(oda_per_capita$"1987")
oda_per_capita$"1988" <- as.numeric(oda_per_capita$"1988")
oda_per_capita$"1989" <- as.numeric(oda_per_capita$"1989")
oda_per_capita$"1990" <- as.numeric(oda_per_capita$"1990")
oda_per_capita$"1991" <- as.numeric(oda_per_capita$"1991")
oda_per_capita$"1992" <- as.numeric(oda_per_capita$"1992")
oda_per_capita$"1993" <- as.numeric(oda_per_capita$"1993")
oda_per_capita$"1994" <- as.numeric(oda_per_capita$"1994")
oda_per_capita$"1995" <- as.numeric(oda_per_capita$"1995")
oda_per_capita$"1996" <- as.numeric(oda_per_capita$"1996")
oda_per_capita$"1997" <- as.numeric(oda_per_capita$"1997")
oda_per_capita$"1998" <- as.numeric(oda_per_capita$"1998")
oda_per_capita$"1999" <- as.numeric(oda_per_capita$"1999")
oda_per_capita$"2000" <- as.numeric(oda_per_capita$"2000")
oda_per_capita$"2001" <- as.numeric(oda_per_capita$"2001")
oda_per_capita$"2002" <- as.numeric(oda_per_capita$"2002")
oda_per_capita$"2003" <- as.numeric(oda_per_capita$"2003")
oda_per_capita$"2004" <- as.numeric(oda_per_capita$"2004")
oda_per_capita$"2005" <- as.numeric(oda_per_capita$"2005")
oda_per_capita$"2006" <- as.numeric(oda_per_capita$"2006")
oda_per_capita$"2007" <- as.numeric(oda_per_capita$"2007")
oda_per_capita$"2008" <- as.numeric(oda_per_capita$"2008")
oda_per_capita$"2009" <- as.numeric(oda_per_capita$"2009")
oda_per_capita$"2010" <- as.numeric(oda_per_capita$"2010")
oda_per_capita$"2011" <- as.numeric(oda_per_capita$"2011")
oda_per_capita$"2012" <- as.numeric(oda_per_capita$"2012")
oda_per_capita$"2013" <- as.numeric(oda_per_capita$"2013")
oda_per_capita$"2014" <- as.numeric(oda_per_capita$"2014")
oda_per_capita$"2015" <- as.numeric(oda_per_capita$"2015")
oda_per_capita_4_13 <- oda_per_capita[,c(colnames(oda_per_capita)[1:2],as.character(2004:2013)),with=F]
colnames(oda_per_capita_4_13)[1:2] <- c('pais', 'cod_pais')
for(j in colnames(oda_per_capita_4_13)) set(oda_per_capita_4_13, which(is.na(oda_per_capita_4_13[[j]])), j, 0) # NAs a cero
View(oda_per_capita_4_13)
summary(oda_per_capita_4_13)
unique(oda_per_capita_4_13$`Country Name`)
fichname = "~/call-of-data_2017/data/Banco Mundial/oda_per_capita_4_13"
save(oda_per_capita_4_13, file=paste0(fichname, ".RData"))
write.table(oda_per_capita_4_13, file = paste0(fichname, ".csv"), append = F, row.names=F, col.names=T, quote=T, sep=",")
melt(oda_per_capita_4_13, 1:2, 3:12, variable.name='anyo', value.name = 'aod_per_capita_recibida')
poblacion <- fread("~/call-of-data_2017/data/poblacion.csv", header = T)
summary(poblacion)
length(colnames(poblacion))
sapply(poblacion, class)
poblacion2 <- cbind(poblacion[,c(1:2, 59),with=F], sapply(poblacion[,c(3:58),with=F], as.numeric))
poblacion2 <- melt(poblacion2, id.vars = c(1:3), measure.vars = (4:59), variable.name='anyo', value.name = 'poblacion')
View(poblacion2)
fichname <- '~/call-of-data_2017/data/poblacion_v2'
write.table(poblacion2, file = paste0(fichname, ".csv"), append = F, row.names=F, col.names=T, quote=T, sep=",")
|
2951e884fe49611d83579186bbc16dfc9b5dd985 | 502d6b35f42b36119d8c23c05ced2b7abbdd91bd | /run_analysis.R | 30b2c54c0631b68f2554666bb3aa2fb7174548cf | [] | no_license | and-ber/GetData_CourseProject | 49edb3761918f6794a5789247ce4d244a6af9a3f | ad6fc8ec8a733b6b7f34f7c689f0fef943e439d8 | refs/heads/master | 2021-01-22T11:41:16.987522 | 2015-02-10T01:08:17 | 2015-02-10T01:08:17 | 30,563,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,003 | r | run_analysis.R | ## The following code loads the data "Human Activity Recognition Using Smartphones Dataset, Version 1.0"
## and does the following:
## 1. Merges the training and the test sets to create one data set
## 2. Assign descriptive activity names to name the activities in the data set, as per file "activity_labels.txt"
## 3. Extracts only the measurements on the mean and standard deviation for each measurement
## 4. Appropriately labels the data set variables with descriptive names as per file "features.txt"
## 5. From the data set in step 4, creates a second, independent tidy data set with the average
## of each variable for each activity and each subject
## 6. Sort the data per subject and per activity and write them in two files, respectively in txt and csv format
## NOTE: the code works as long as the data directory "UCI HAR Dataset" is in the working directory
#Loads the feature name of each variable (file "features.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
# Loads the description of each activity
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
## Loads and reshapes the training data
# Loads all training data
data1 <- read.table("./UCI HAR Dataset/train/X_train.txt")
# Assigns descriptive variable names to each column
colnames(data1)<-features[,2]
# Loads the Subject id corresponding to each set of data and gives a descriptive name to the variable
subject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
colnames(subject) <- "SubjectId"
# Loads the Activity id corresponding to each set of data
activity <- read.table("./UCI HAR Dataset/train/y_train.txt")
# Translate every activity id with a description and give descriptive names to each column
activity[,2] <- activity_labels[activity[,1],2]
colnames(activity) <- c("ActivityId", "Activity")
# Merges the subject and activities columns to the whole training data set
data1 <- cbind(subject,activity,data1)
## Loads and reshapes the test data (follows the same steps as per training data)
data2 <- read.table("./UCI HAR Dataset/test/X_test.txt")
colnames(data2)<-features[,2]
subject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
colnames(subject) <- "SubjectId"
activity <- read.table("./UCI HAR Dataset/test/y_test.txt")
activity[,2] <- activity_labels[activity[,1],2]
colnames(activity) <- c("ActivityId", "Activity")
data2 <- cbind(subject,activity,data2)
# Merges training and test data sets
data = rbind(data1,data2)
# Extracts only the measurements on the mean and standard deviation for each measurement
a <- sort(c(1:3, grep("mean()",names(data),fixed=T), grep("std()",names(data),fixed=T)))
data <- data[,a]
library(dplyr)
data <- tbl_df(data)
FinalData <- data %>%
group_by(SubjectId, ActivityId, Activity) %>%
summarise_each(funs(mean))
# Writes the final data set in csv and text format, omitting the row names
write.csv(FinalData, file="./TidyData.csv",row.names=FALSE)
write.table(FinalData, file="./TidyData.txt",row.names=FALSE)
|
b3bb58bfc529a5134a6e7810d56599f823e075f1 | 4d9b883d69da5b37f3787ccdff2caa0d3246eef3 | /Biochips/Biochips.r | cd50411ca01ac40722e71414792e853846e9699e | [] | no_license | maxkuan/Johnson | 41431f5648a220fdad7a6ee1a69c54a16766a830 | 08f79e2874709dc1b8740734800464835d9bdcb8 | refs/heads/master | 2020-05-23T21:41:12.923558 | 2019-12-17T11:45:19 | 2019-12-17T11:45:19 | 186,958,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,143 | r | Biochips.r | #Distance between two sets.
#Version:0.1.2
#Last Update:6/26/2019
#
#
#Packages required: igraph.
#
#Initialization steps in R:
#1. library("igraph")
#2. source(file = file.choose()) (e.g. Biochips.r)
#3. select pajek network file (e.g. Biochips_Cited_Citing.net)
#4. select assignee file (e.g. Biochips_Assignee_Pajek.csv)
#
#
#initailize packages
library(igraph)
library(compare)
#initialize the network
graph <- read.graph(file.choose(), format = "pajek")
#dat <- read.csv(file.choose())
#m <- as.matrix(dat)
#m <- m[,-1] #delete first column
#net <- graph.adjacency(m,mode="directed",weighted=TRUE)
#w <- E(net)$weight
#td <- 0
#initialize assignee
assignee <- read.csv(file.choose())
#initialize set A
#a <- read.csv(file.choose())
#initialize set B
#b <- read.csv(file.choose())
#find shortest path betweeb two sets
find_shortest_path_between_two_sets <- function(intA, intB) {
temp <- get.shortest.paths(graph, intA, intB)
return(lengths(temp[[1]]) - 1)
}
#find distance between two sets
find_distance_between_two_sets <- function(setA, setB) {
td <- 0
for(i in 1:lengths(setA)) {
for(j in 1:lengths(setB)) {
temp <- get.shortest.paths(graph, setA[[1]][i], setB[[1]][j])
td <- td + lengths(temp[[1]]) - 1
}
}
return(td)
}
#find distance between two sets with apply
find_distance_between_two_sets_with_apply <- function(setA, setB) {
td <- 0
#if(any(compare(class(setA), class(data.frame())) == FALSE)) {
# setA <- as.data.frame(setA)
#}
#if(any(compare(class(setB), class(data.frame())) == FALSE)) {
# setB <- as.data.frame(setB)
#}
if(compare(class(setA), class(data.frame())) == FALSE) {
setA <- as.data.frame(setA)
}
if(compare(class(setB), class(data.frame())) == FALSE) {
setB <- as.data.frame(setB)
}
#for(i in 1:lengths(setA)) {
# for(j in 1:lengths(setB)) {
# temp <- find_shortest_path_between_two_sets(setA[[1]][i], setB[[1]][j])
# td <- td + lengths(temp[[1]]) - 1
# }
#}
td <- sum(mapply(find_shortest_path_between_two_sets, setA, setB))
print(paste("setA is ", setA, sep=""))
print(paste("setB is ", setB, sep=""))
return(td)
}
#Calculate a total distance between two vectors.
calculate_distance_between_two_sets <- function(setA, setB) {
td <- 0
r <- matrix(nrow = lengths(setA) * lengths(setB), ncol = 3)
#setA to setB
for(i in 1:lengths(setA)) {
#print(i)
for(j in 1:lengths(setB)) {
#print(j)
temp <- get.shortest.paths(graph, setA[[1]][i], setB[[1]][j])
td <- td + lengths(temp[[1]]) - 1
#print(td)
r[(i - 1) * lengths(setB) + j,] <- c(setA[[1]][i], setB[[1]][j], lengths(temp[[1]]) - 1)
#print((i - 1) * lengths(setB) + j)
}
#print(td)
#print(lengths(setA))
#print(lengths(setB))
}
a_to_b <<- as.data.frame(r)
print(paste("The total distance from ", colnames(setA), " to ", colnames(setB), " is: ", td, sep=""))
#setB to setA
td <- 0
for(i in 1:lengths(setB)) {
#print(i)
for(j in 1:lengths(setA)) {
#print(j)
temp <- get.shortest.paths(graph, setB[[1]][i], setA[[1]][j])
td <- td + lengths(temp[[1]]) - 1
#print(td)
r[(i - 1) * lengths(setA) + j,] <- c(setB[[1]][i], setA[[1]][j], lengths(temp[[1]]) - 1)
#print((i - 1) * lengths(setB) + j)
}
#print(td)
#print(lengths(setA))
#print(lengths(setB))
}
b_to_a <<- as.data.frame(r)
print(paste("The total distance from ", colnames(setB), " to ", colnames(setA), " is: ", td, sep=""))
}
#show paths between setA and setB
show_paths <- function(setA, setB) {
#show paths from setA to setB
path <- NULL
if(nrow(subset(a_to_b, distance > 0))) {
path <- cbind(subset(a_to_b, distance > 0), path = 0)
for(i in 1:nrow(path)) {
path[i, 4] <- toString(get.shortest.paths(graph, path[i, 1], path[i, 2])[["vpath"]][[1]])
}
a_to_b_path <<- path
}else {
a_to_b_path <<- path
print(paste("There is no path from ", colnames(setA), " to ", colnames(setB), sep=""))
}
#show paths from setB to setA
path <- NULL
if(nrow(subset(b_to_a, distance > 0))) {
path <- cbind(subset(b_to_a, distance > 0), path = 0)
for(i in 1:nrow(path)) {
path[i, 4] <- toString(get.shortest.paths(graph, path[i, 1], path[i, 2])[["vpath"]][[1]])
}
b_to_a_path <<- path
}else {
b_to_a_path <<- path
print(paste("There is no path from ", colnames(setB), " to ", colnames(setA), sep=""))
}
}
#find assignee's patents
find_patent <- function(name) {
set <- which(assignee == name, arr.ind = TRUE)
set <- set[, -2]
set <- as.data.frame(assignee[set, 3])
colnames(set) <- name
return(set)
}
#calculate distance between two assignees
calculate_distance_between_two_assignees <- function(assigneeA, assigneeB) {
set_a <<- find_patent(assigneeA)
set_b <<- find_patent(assigneeB)
calculate_distance_between_two_sets(set_a, set_b)
colnames(a_to_b) <<- c(assigneeA, assigneeB, "distance")
colnames(b_to_a) <<- c(assigneeB, assigneeA, "distance")
show_paths(set_a, set_b)
}
#detect wheather exist cycles in a graph
detect_cycle <- function(g) {
cy <- NULL
for(v1 in V(g)) {
for(v2 in neighbors(g, v1, mode="out")) {
cy <- c(cy, lapply(all_simple_paths(g, v2,v1, mode="out"), function(p) c(v1,p)))
}
}
cycles <<- cy
}
#find patent number with pajek label
#note that column name of pajek label must be "vaule"
find_patent_number <- function(pajek_label) {
return(assignee[assignee$value==pajek_label,1])
}
#find pajek label with patent number
#note that column name of patent number must be "PN"
find_pajek_label <- function(patent_number) {
return(assignee[assignee$PN==patent_number,3])
}
#find all patent with each assignee
find_all_patent_with_each_assignee <- function(as) {
assignee_ls <- NULL
assignees <- as[!duplicated((as$ASSIGNEE)),]
assignee_row <- nrow(assignees)
for(a in 1:assignee_row) {
assignee_ls <- c(assignee_ls, as.data.frame(find_patent(toString(assignees[a,2]))))
}
return(assignee_ls)
}
#find all distance among assignees
find_all_distance_among_assignees <- function() {
distance_m <- as.data.frame(matrix(, nrow = length(assignee_list), ncol = length(assignee_list)))
for(i in 1:length(assignee_list)) {
for(j in 1:length(assignee_list)) {
distance_m[i,j] <- find_distance_between_two_sets_with_apply(as.data.frame(assignee_list[[i]]), as.data.frame(assignee_list[[j]]))
print(paste(assignee_list[[i]], assignee_list[[j]], distance_m[i,j], sep=" "))
distance_m[j,i] <- find_distance_between_two_sets_with_apply(as.data.frame(assignee_list[[j]]), as.data.frame(assignee_list[[i]]))
}
}
#distance_m <- as.data.frame(mapply(find_distance_between_two_sets_with_apply, assignee_list, assignee_list,SIMPLIFY = F))
return(distance_m)
}
#calculate distance between all assignee
calculate_distance_among_all_assignee <- function(a) {
assignee_list <<- find_all_patent_with_each_assignee(a)
#distance_matrix <<- find_all_distance_among_assignees()
}
|
221dc78e8e6873389af2350e1c8ee3a2bed18746 | 598dc3632b10f2a6e3ad3b8d9bc17126cc985757 | /man-roxygen/args-data_common.R | 804ba5bf40936056b7d490bc061f2741fe6cf89a | [] | no_license | sunfeng98/multinma | 7c5e7b701da8bc6ee518ac684fd6e5f2a01775a8 | aa6c6e6a12ae1081ffe038410960655d9b765ed5 | refs/heads/master | 2021-03-23T13:59:02.426405 | 2019-10-24T14:48:49 | 2019-10-24T14:48:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 463 | r | args-data_common.R | #' @param data a data frame
#' @param trt column of `data` specifying treatments, coded using integers,
#' strings, or factors
#' @param trt_ref reference treatment for the network, as a single integer,
#' string, or factor. By default takes the first treatment in a "natural" sort
#' order.
#' @param study column of `data` specifying the studies, coded using integers,
#' strings, or factors
#' @param y column of `data` specifying a continuous outcome
|
c8b722bd40e6242750a047c2fb1fe0b3c1408aff | 5b722119d1b1ca9df17a2914a4db2d35f73b5490 | /Scripts/environmental_policy/Water-Data-Downloader/gwl_analysis.R | 6e39711e7d94f7343cdf4603370cd0c7f08794f8 | [
"CC-BY-4.0"
] | permissive | vishalbelsare/Public_Policy | 1d459eba9009e7183fa266d3bb9d4dd0d6dacddc | 4f57140f85855859ff2e49992f4b7673f1b72857 | refs/heads/master | 2023-03-29T05:01:10.846030 | 2021-01-13T21:52:45 | 2021-01-13T21:52:45 | 311,356,474 | 0 | 0 | NOASSERTION | 2021-04-04T20:12:17 | 2020-11-09T14:00:21 | null | UTF-8 | R | false | false | 6,760 | r | gwl_analysis.R | library("ggplot2", lib.loc = "/Users/user/Documents/R-packages")
library("reshape2", lib.loc = "/Users/user/Documents/R-packages")
library("plyr", lib.loc = "/Users/user/Documents/R-packages")
library("dplyr", lib.loc = "/Users/user/Documents/R-packages")
library("stringr", lib.loc = "/Users/user/Documents/R-packages")
library("data.table", lib.loc = "/Users/user/Documents/R-packages")
library("gridExtra", lib.loc = "/Users/user/Documents/R-packages")
library("scales", lib.loc = "/Users/user/Documents/R-packages")
library("lazyeval", lib.loc = "/Users/user/Documents/R-packages")
library("labeling", lib.loc = "/Users/user/Documents/R-packages")
options(stringsAsFactors = TRUE)
input_path = "/Users/user/Documents/California Water Data/Groundwater Level Data"
data_output_path = "/Users/user/Documents/California Water Data/Groundwater Level Data/R-output"
sumstats_output_path = "/Users/user/Documents/California Water Data/Groundwater Level Data/R-output/Sumstats"
graphs_output_path = "/Users/user/Documents/California Water Data/Groundwater Level Data/R-output/Graphs"
setwd(input_path)
regions = c('Central_Coast', 'Colorado_River', 'North_Coast', 'North_Lahontan', 'Sacramento_River', 'San_Francisco_Bay', 'San_Joaquin_River', 'South_Coast', 'South_Lahontan', 'Tulare_Lake')
file_list = paste0(regions,'_gwl_well_data.csv')
# Create single well data file from all of the separate regional well data files
all_well_data = rbindlist(lapply(file_list,fread, na.strings = "NA"))
all_well_data = mutate(all_well_data,
Measurement_Date = as.Date(Measurement_Date,"%m-%d-%Y"),
Region = as.factor(Region),
Basin = as.factor(Basin),
Use = as.factor(Use),
measurement_year = year(Measurement_Date))
get_na_grid = function(start_year, end_year, mode = "All"){
# Get wells and years for which there are water level data
wells_nonmiss_uyr = mutate(
distinct(
filter(all_well_data, !is.na(RPWS) & !is.na(GSWS)),
State_Well_Number, measurement_year
),
data_status = 'non_missing'
)
# Use this to create an index of wells and years
wells_nonmiss_all_yrs = rbindlist(
lapply(
c(start_year:end_year),
function(year){
mutate(distinct(select(wells_nonmiss_uyr, Region, Basin, Township, State_Well_Number)), measurement_year = year)
}
)
)
# Merge this index with unique-by-year well data to get data for yearly missingness
well_dat_na_grid = merge(filter(wells_nonmiss_uyr, measurement_year >= start_year & measurement_year <= end_year),
wells_nonmiss_all_yrs,
by = c('Region', 'Basin', 'Township', 'State_Well_Number', 'measurement_year'),
all = TRUE)
well_dat_na_grid = mutate(well_dat_na_grid,
data_status = ifelse(is.na(data_status),'missing', data_status),
data_status = as.factor(data_status)
)
# If specified, get data for yearly missingness that counts a well as missing only after it has been observed
if(tolower(mode) == 'first_nonmiss'){
wells_first_obs = summarize(group_by(wells_nonmiss_uyr, State_Well_Number),
first_obs_year = min(measurement_year))
well_dat_na_grid = filter(merge(well_dat_na_grid, wells_first_obs, by = 'State_Well_Number', all = TRUE),
measurement_year >= first_obs_year)
}
# Get a similar index for townships, basins, and wells for which we know no data are available
# combine it with the 'well_dat_na_grid' dataset
missing_data = distinct(filter(all_well_data,is.na(Use)), Region, Basin, Township)
missing_data_all_yrs = rbindlist(
lapply(
c(start_year:end_year),
function(year){
mutate(missing_data, data_status = 'missing', measurement_year = year)
}
)
)
well_dat_na_grid = rbindlist(list(well_dat_na_grid, missing_data_all_yrs), fill = TRUE)
return(well_dat_na_grid)
}
well_dat_na_grid = get_na_grid(1950, 2010, 'first_nonmiss')
# Get summary statistics
get_sumstats = function(geo_units){
geo_units = 'Region'
if(length(geo_units) == 1){
well_dat_na_grid = mutate(well_dat_na_grid, geo_unit = Region)
all_well_data = mutate(all_well_data, geo_unit = Region)
}
if(length(geo_units) == 2){
well_dat_na_grid = mutate(well_dat_na_grid, geo_unit = paste(Region, Basin, sep = '_'))
all_well_data = mutate(all_well_data, geo_unit = paste(Region, Basin, sep = '_'))
}
if(length(geo_units) == 3){
well_dat_na_grid = mutate(well_dat_na_grid, geo_unit = paste(Region, Basin, Township, sep = '_'))
all_well_data = mutate(all_well_data, geo_unit = paste(Region, Basin, Township, sep = '_'))
}
nonmiss_counts = summarize(group_by(well_dat_na_grid, geo_unit, measurement_year, data_status), n_data_status = n())
nonmiss_counts_wide = mutate(
dcast(as.data.frame(nonmiss_counts),
geo_unit + measurement_year ~ data_status,
value.var = "n_data_status"
),
missing = ifelse(is.na(missing), 0, missing),
non_missing = ifelse(is.na(non_missing), 0, non_missing),
n_wells = non_missing + missing,
n_observed = non_missing,
mean_nonmissing = n_observed/n_wells
)
geo_sep = colsplit(nonmiss_counts_wide$geo_unit, '_', names = geo_units)
na_sumstats = cbind(geo_sep, select(nonmiss_counts_wide, measurement_year, n_wells, n_observed, mean_nonmissing))
# Get summary stats for the proxy for the water level (RPWS) in each region
yrly_rpws = as.data.frame(
summarize(
group_by(all_well_data, geo_unit, measurement_year),
n_observed = n(),
median_level = median(RPWS, na.rm = TRUE),
mean_level = mean(RPWS, na.rm = TRUE)
)
)
geo_sep = colsplit(yrly_rpws$geo_unit, '_', names = geo_units)
rpws_sumstats = cbind(geo_sep, select(yrly_rpws, measurement_year, n_observed, median_level, mean_level))
sumstats_list = list(na_sumstats, rpws_sumstats)
names(sumstats_list) = c('na_sumstats', 'rpws_sumstats')
return(sumstats_list)
}
sumstats = get_sumstats(c('Region', 'Basin', 'Township'))
# Plots!
yearly_freqs_plot = ggplot(regions_sumstats, aes(measurement_year, mean_nonmissing, colour = Region))+
geom_point(aes(size = n_wells))+
geom_line(aes(colour = Region))+
scale_x_continuous(limits = c(1950,2010), breaks = c(1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010))
yearly_freqs_plot
|
edb7e6444a291cd71ab1ce2cfc398558e0050071 | e13f912dedfcb12756b517a27f566c7b537140ef | /02_ukb/src/03_univariate_analyses/01_hippocampus_gamm/render.R | 07f0acfe33e14ca877aed4576cc770878b74bdca | [
"MIT"
] | permissive | Lifebrain/p025_education_brain | db3901ea3d21059a39d81e2f3b764fb19e0e0e92 | 507cca3514b8ddbf65df7a047dba5bae1295badf | refs/heads/main | 2023-07-03T02:06:12.532051 | 2021-08-03T10:24:29 | 2021-08-03T10:24:29 | 323,602,627 | 0 | 0 | MIT | 2020-12-22T12:29:25 | 2020-12-22T11:09:50 | Jupyter Notebook | UTF-8 | R | false | false | 139 | r | render.R | library(rmarkdown)
render("UKB_hippo_analyses_GAMM.Rmd","pdf_document")
render("UKB_hippo_analyses_GAMM.Rmd","rmarkdown::github_document") |
b7cddc1abc49e7bca1fdeff6bf19a918ac434288 | 36fc4a86af475ce6d33c2aa63afbf23addae3996 | /lasso_subtype_coad.R | 954a14b4455f296df3dc86d20cc81eedca729d4d | [
"BSD-3-Clause"
] | permissive | jaclyn-taroni/TDMresults | 5501b70d99261c4f457d32f6df5b42755460e7c6 | 1415856a19bf47e6b9e4edb169028b6228ba4b91 | refs/heads/master | 2021-01-13T06:25:32.981547 | 2016-05-26T18:42:33 | 2016-05-26T18:42:33 | 59,760,406 | 0 | 0 | null | 2016-05-26T15:13:31 | 2016-05-26T15:13:31 | null | UTF-8 | R | false | false | 14,723 | r | lasso_subtype_coad.R | # Author: Jeffrey Thompson
# Purpose: Performs multiclass lasso logistic regression to classify cancer subtypes.
# Get command line arguments.
#args = commandArgs(trailingOnly = T)
source("package_loader.R")
NFOLDS = 100
NSEEDS = 10
INITIAL_SEED = 2
ACCURACY_PLOT_FILENAME = "coadread_accuracies.pdf"
KAPPA_PLOT_FILENAME = "coadread_kappas.pdf"
ACCURACY_TABLE_FILENAME = "coadread_accuracies.tsv"
BALANCED_PLOT_FILENAME = "coadread_balanced_accuracies.pdf"
args = c(normalized_data,
'COADREAD_ZEROONE.pcl',
'COADREADClin.tsv',
'COADREAD_TDM_ZEROONE.pcl',
'COADREADRNASeqClin.tsv',
'COADREAD_QN_ZEROONE.pcl',
'COADREADRNASeqClin.tsv',
'COADREAD_LOG_ZEROONE.pcl',
'COADREADRNASeqClin.tsv',
'COADREAD_NPN_ZEROONE.pcl',
'COADREADRNASeqClin.tsv',
'COADREAD_REF_NPN_ZEROONE.pcl',
'COADREADClin.tsv',
'COADREAD_UN_ZEROONE.pcl',
'COADREADRNASeqClin.tsv',
output)
input_dir = args[1]
ref_input = args[2]
ref_clin = args[3]
tdm_input = args[4]
tdm_clin = args[5]
qn_input = args[6]
qn_clin = args[7]
log_input = args[8]
log_clin = args[9]
npn_input = args[10]
npn_clin = args[11]
npn_ref_input = args[12]
npn_ref_clin = args[13]
un_input = args[14]
un_clin = args[15]
output_dir = args[16]
load_it(c("glmnet", "caret", "e1071", "stringr", "plyr", "huge"))
# This function creates dataframes to hold the accuracies for each class across runs.
setup_acc_df = function(df) {
df = data.frame(matrix(nrow=4, ncol=0))
rownames(df) = c("CIMP", "CIMPL", "NCIMP", "Normal")
return (df)
}
# Define data.frames to hold classification results.
refdf = setup_acc_df(refdf)
tdmdf = setup_acc_df(tdmdf)
qndf = setup_acc_df(qndf)
logdf = setup_acc_df(logdf)
npndf = setup_acc_df(npndf)
undf = setup_acc_df(undf)
tdmnotshareddf = setup_acc_df(tdmnotshareddf)
qnnotshareddf = setup_acc_df(qnnotshareddf)
lognotshareddf = setup_acc_df(lognotshareddf)
npnnotshareddf = setup_acc_df(npnnotshareddf)
unnotshareddf = setup_acc_df(unnotshareddf)
# Define random seeds.
set.seed(INITIAL_SEED)
seeds = sample(1:10000, NSEEDS)
message(paste("Random seeds:", paste(seeds, collapse=", ")), appendLF=TRUE)
# Pre-processes test data to ready it for the LASSO.
# Returns a list with first entry the pre-processed dataset and second entry
# the classes for those data.
preprocessTCGA = function(dataFile, clinFile) {
data = read.table(paste0(input_dir, dataFile),
sep='\t',
row.names=1,
header=T)
dataT = t(data)
# Remove duplicate samples from the data.
dataT = dataT[str_detect(substr(rownames(dataT),1,15), "(\\.01)|(\\.11)"),]
# Read clinical data.
clin = read.table(paste0(input_dir, clinFile), sep='\t', row.names=1, header=T)
# Make normal samples into their own class.
clin[,2] = as.character(clin[,2])
clin[clin[,1]=="Solid Tissue Normal",2] = "NORMAL"
clin[,2] = as.factor(clin[,2])
# Remove samples without cancer labels.
clin = subset(clin, clin[,1] != "")
clin[,1] = droplevels(clin[,1])
# Remove samples without class labels.
clin = subset(clin, clin[,2] != "")
clin[,2] = droplevels(clin[,2])
# Remove samples with unused class.
clin = subset(clin, clin[,2] != "Low purity c2")
clin[,2] = droplevels(clin[,2])
# Map long class names to simple class names.
# old_names = c("COADREAD non-CIMP c11", "COADREAD CIMPL c10", "COADREAD CIMP c12", "NORMAL")
# new_names = c("NCIMP", "CIMPL", "CIMP", "NORMAL")
# map = setNames(new_names, old_names)
# clin[,2] = sapply(clin[,2], function(x) map[as.character(x)])
# Filter clinical data to include only those samples with expression data.
clin.response = clin[,2][match(substr(chartr('-', '.', rownames(dataT)), 1, 15), chartr('-', '.', rownames(clin)))]
clin.response = na.omit(clin.response)
# Filter expression data to include only those samples with ids also in clinical data.
dataT = dataT[substr(chartr('-', '.', rownames(dataT)), 1, 15) %in% chartr('-', '.', rownames(clin)),]
# Filter any data with missing clinical annotation for tumor class.
dataT = dataT[!is.na(clin[,2][match(substr(chartr('-', '.', rownames(dataT)),1,15),
substr(chartr('-', '.', rownames(clin)),1,15))]),]
return (list(data=dataT, response=clin.response))
} # end function preprocessTCGA
# Predict subtypes on TCGA data using previously trained model.
predictTCGA = function(title, data, model) {
# Predict classes based on the trained model.
lasso.predict = predict(model, data$data, s = "lambda.1se", type = "class")
# Make sure all factors are included.
lasso.predict = factor(lasso.predict, c("CIMP", "CIMPL", "NCIMP", "NORMAL"))
# Build a contingency table of the results.
con_table = table(pred = lasso.predict, true = data$response, exclude="none")
# Generate statistics based on the contigency table.
cm = confusionMatrix(con_table)
return(cm)
}
# Load the training data.
train = read.table(paste0(input_dir, ref_input), sep='\t', row.names=1, header=T)
# Transpose rows and columns.
trainT = t(train)
# Remove duplicate samples from the data.
trainT = trainT[str_detect(substr(rownames(trainT),1,15), "(\\.01)|(\\.11)"),]
# Load the clinical features.
clin = read.table(paste0(input_dir, ref_clin), sep='\t', row.names=1, header=T)
# Make normal samples into their own class.
clin[,2] = as.character(clin[,2])
clin[clin[,1]=="Solid Tissue Normal",2] = "NORMAL"
clin[,2] = as.factor(clin[,2])
# Remove samples without cancer labels.
clin = subset(clin, clin[,1] != "")
clin[,1] = droplevels(clin[,1])
# Remove samples without class labels.
clin = subset(clin, clin[,2] != "")
clin[,2] = droplevels(clin[,2])
# Remove samples with unused class.
clin = subset(clin, clin[,2] != "Low purity c2")
clin[,2] = droplevels(clin[,2])
# Map long class names to simple class names.
old_names = c("COADREAD non-CIMP c11", "COADREAD CIMPL c10", "COADREAD CIMP c12", "NORMAL")
new_names = c("NCIMP", "CIMPL", "CIMP", "NORMAL")
map = setNames(new_names, old_names)
clin[,2] = sapply(clin[,2], function(x) map[as.character(x)])
# Filter the samples to remove those without classification.
trainT = trainT[substr(rownames(trainT),1,15) %in% substr(chartr('-', '.', rownames(clin)),1,15),]
# Filter clinical data to include only those entries that also have expression data.
trainclin.classes = clin[,2][match(substr(chartr('-', '.', rownames(trainT)),1,15), substr(chartr('-', '.', rownames(clin)),1,15))]
trainclin.classes = na.omit(trainclin.classes)
write.table(trainclin.classes, file=paste0(output_dir, "coad_ref_classes.txt"))
# Filter any expression data with missing clinical annotation for tumor class.
trainT = trainT[!is.na(clin[,2][match(substr(chartr('-', '.', rownames(trainT)),1,15), substr(chartr('-', '.', rownames(clin)),1,15))]),]
# Load the training data.
npn_train = read.table(paste0(input_dir, npn_ref_input), sep='\t', row.names=1, header=T)
# Transpose rows and columns.
npntrainT = t(npn_train)
# Remove duplicate samples from the data.
npntrainT = npntrainT[str_detect(substr(rownames(npntrainT),1,15), "(\\.01)|(\\.11)"),]
# Filter the samples to remove those without classification.
npntrainT = npntrainT[substr(rownames(npntrainT),1,15) %in% substr(chartr('-', '.', rownames(clin)),1,15),]
# Pre-process the TCGA data.
tdm = preprocessTCGA(tdm_input, tdm_clin)
qn = preprocessTCGA(qn_input, qn_clin)
log = preprocessTCGA(log_input, log_clin)
npn = preprocessTCGA(npn_input, npn_clin)
un = preprocessTCGA(un_input, un_clin)
# Determine which samples are shared between training and test data.
tdm_shared = substr(chartr('-', '.', rownames(tdm$data)),1,15) %in% substr(chartr('-', '.', rownames(trainT)),1,15)
qn_shared = substr(chartr('-', '.', rownames(qn$data)),1,15) %in% substr(chartr('-', '.', rownames(trainT)),1,15)
log_shared = substr(chartr('-', '.', rownames(log$data)),1,15) %in% substr(chartr('-', '.', rownames(trainT)),1,15)
npn_shared = substr(chartr('-', '.', rownames(npn$data)),1,15) %in% substr(chartr('-', '.', rownames(npntrainT)),1,15)
un_shared = substr(chartr('-', '.', rownames(un$data)),1,15) %in% substr(chartr('-', '.', rownames(trainT)),1,15)
# Filter data to include only those samples not shared between training and test.
tdm$data = tdm$data[!tdm_shared,]
tdm$response = tdm$response[!tdm_shared]
qn$data = qn$data[!qn_shared,]
qn$response = qn$response[!qn_shared]
log$data = log$data[!log_shared,]
log$response = log$response[!log_shared]
npn$data = npn$data[!npn_shared,]
npn$response = npn$response[!npn_shared]
un$data = un$data[!un_shared,]
un$response = un$response[!un_shared]
# Pre-allocate vectors for classification results
tdm_accs = vector(mode="list", length=length(seeds))
qn_accs = vector(mode="list", length=length(seeds))
log_accs = vector(mode="list", length=length(seeds))
npn_accs = vector(mode="list", length=length(seeds))
un_accs = vector(mode="list", length=length(seeds))
# Perform a number of iterations equal to the number of random seeds.
# At each iteration, perform n-fold cross validation to build a model on the training data,
# then use that model to make predictions on the test data.
for(seednum in 1:length(seeds)) {
# Train a model for classifying cancer subtype.
set.seed(seeds[seednum])
lasso.model=cv.glmnet(trainT, trainclin.classes, family="multinomial", parallel = F, type.measure="class", nfolds=NFOLDS)
plot(lasso.model)
set.seed(seeds[seednum])
npn.lasso.model=cv.glmnet(npntrainT, trainclin.classes, family="multinomial", parallel=F, type.measure="class", nfolds=NFOLDS)
acc = predictTCGA("TDM Results", tdm, lasso.model)
tdm_accs[[seednum]] = acc
tdmdf = cbind(tdmdf,as.vector(acc$byClass[,8]))
acc = predictTCGA("QN Results", qn, lasso.model)
qn_accs[[seednum]] = acc
qndf = cbind(qndf,as.vector(acc$byClass[,8]))
acc = predictTCGA("LOG Results", log, lasso.model)
log_accs[[seednum]] = acc
logdf = cbind(logdf,as.vector(acc$byClass[,8]))
acc = predictTCGA("NPN Results", npn, npn.lasso.model)
npn_accs[[seednum]] = acc
npndf = cbind(npndf, as.vector(acc$byClass[,8]))
acc = predictTCGA("Untransformed Results", un, lasso.model)
un_accs[[seednum]] = acc
undf = cbind(undf,as.vector(acc$byClass[,8]))
}
# Build a table of accuracies across all datasets.
accuracies = data.frame(NCIMP=numeric(0), CIMPL=numeric(0), CIMP=numeric(0), NORMAL=numeric(0))
accuracies = rbind(accuracies, apply(tdmdf,1,function(x) mean(x)))
accuracies = rbind(accuracies, apply(qndf,1,function(x) mean(x)))
accuracies = rbind(accuracies, apply(logdf,1,function(x) mean(x)))
accuracies = rbind(accuracies, apply(npndf,1,function(x) mean(x)))
accuracies = rbind(accuracies, apply(undf,1,function(x) mean(x)))
rownames(accuracies) = c("TDM", "QN", "LOG", "NPN", "UNTR")
colnames(accuracies) = c("CIMP", "CIMPL", "NCIMP", "NORMAL")
# write.table(accuracies, file=paste0(output_dir,"/COADlassoSubtypeAccuracies.txt"), sep='\t', row.names=T, col.names=T)
# Aggregate accuracies:
tdm_tables = lapply(tdm_accs, function(x) x$table)
qn_tables = lapply(qn_accs, function(x) x$table)
log_tables = lapply(log_accs, function(x) x$table)
npn_tables = lapply(npn_accs, function(x) x$table)
un_tables = lapply(un_accs, function(x) x$table)
tdm_reduced = Reduce("+", tdm_tables) / length(tdm_tables)
qn_reduced = Reduce("+", qn_tables) / length(qn_tables)
log_reduced = Reduce("+", log_tables) / length(log_tables)
npn_reduced = Reduce("+", npn_tables) / length(npn_tables)
un_reduced = Reduce("+", un_tables) / length(un_tables)
tdm_reduced[1:4,1:4] = t(apply(tdm_reduced, 1, function(x) as.integer(round(x))))[1:4,1:4]
qn_reduced[1:4,1:4] = t(apply(qn_reduced, 1, function(x) as.integer(round(x))))[1:4,1:4]
log_reduced[1:4,1:4] = t(apply(log_reduced, 1, function(x) as.integer(round(x))))[1:4,1:4]
npn_reduced[1:4,1:4] = t(apply(npn_reduced, 1, function(x) as.integer(round(x))))[1:4,1:4]
un_reduced[1:4,1:4] = t(apply(un_reduced, 1, function(x) as.integer(round(x))))[1:4,1:4]
tdm_cm = confusionMatrix(tdm_reduced)
qn_cm = confusionMatrix(qn_reduced)
log_cm = confusionMatrix(log_reduced)
npn_cm = confusionMatrix(npn_reduced)
un_cm = confusionMatrix(un_reduced)
saveRDS(log_cm, file=paste0(output_dir, "coad_log_cm.RDS"))
print(tdm_cm)
print(qn_cm)
print(log_cm)
print(npn_cm)
print(un_cm)
all_accs = data.frame(rbind(tdm_cm$overall, qn_cm$overall, log_cm$overall, npn_cm$overall, un_cm$overall))
all_accs = cbind(all_accs, method=c("TDM", "QN", "LOG", "NPN", "UNTR"))
all_accs$method = factor(all_accs$method, levels=all_accs$method)
nir = all_accs[1,]$AccuracyNull
# Plot accuracies
ci = aes(ymin=AccuracyLower, ymax=AccuracyUpper)
cbPalette = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#cbPalette = c("#56B4E9", "#E69F00", "#000000", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(all_accs, aes(x=factor(method), y=Accuracy, color=method)) +
geom_point(size=3) +
geom_errorbar(ci, width=.3) +
ylab("Total Accuracy") +
xlab("Normalization") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
geom_hline(aes(yintercept = nir), linetype="longdash") +
scale_color_manual(values=cbPalette)
ggsave(paste0(output_dir, ACCURACY_PLOT_FILENAME), plot=last_plot(), width=3, height=2.5)
cbPalette = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(data = all_accs) +
ylab("Kappa") +
xlab("Normalization") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
geom_point(aes(x=factor(method), y=Kappa, color=method), shape=18, size=4) +
scale_color_manual(values=cbPalette)
ggsave(paste0(output_dir, KAPPA_PLOT_FILENAME), plot=last_plot(), width=3, height=2.5)
coad_all = rbind(tdm_cm$byClass[,8],
qn_cm$byClass[,8],
log_cm$byClass[,8],
npn_cm$byClass[,8],
un_cm$byClass[,8])
coad_melted = melt(coad_all)
colnames(coad_melted) = c("Dataset", "Subtype", "Accuracy")
coad_melted$Dataset = c("TDM", "QN", "LOG", "NPN", "UNTR")
coad_melted$Dataset = factor(coad_melted$Dataset, levels=c("TDM", "QN", "LOG", "NPN", "UNTR"))
cbPalette = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(data=coad_melted, aes(x=Dataset, y=Accuracy, ymax=1)) +
ylim(0.45,1) +
coord_flip() +
geom_point(size=3, aes(colour=Dataset)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
theme(plot.title=element_text(hjust=0)) +
ylab("Balanced Accuracy") + theme_bw() + facet_wrap(~Subtype, ncol=5) +
theme(legend.position="none") +
scale_color_manual(values=cbPalette)
ggsave(paste0(output_dir, BALANCED_PLOT_FILENAME), plot=last_plot(), height=2.5, width=6)
write.table(all_accs, file=paste0(output_dir, ACCURACY_TABLE_FILENAME), sep='\t', row.names=T, col.names=T)
|
33fc3301f1faeea2c4c6060051144f13d47d4626 | 1f1440e7130ef24625b1538b761d468d7fe3cc70 | /cachematrix.R | c1858aa433509784165655b147e2ac8b57b4b38d | [] | no_license | efcjunior/ProgrammingAssignment2 | f10b4040f4a1539a6716911b0d7cab6fc9bc6492 | 20077aaca80c50bfcf041ef4b7aae5654157aeb2 | refs/heads/master | 2021-01-18T12:49:56.898813 | 2016-09-12T02:17:10 | 2016-09-12T02:17:10 | 67,937,584 | 0 | 0 | null | 2016-09-11T15:27:17 | 2016-09-11T15:27:16 | null | UTF-8 | R | false | false | 1,556 | r | cachematrix.R | ## These functions calculate the inverse of a matrix and cache the inverse.
## Once the matrix inverse is computed (by calling the "cacheSolve" function),
## the value of the inverse is cached until the values in the matrix are changed.
## makeCacheMatrix creates a special "matrix", which is really a list containing
## the following functions:
## 1. set(): set the value of the matrix
## 2. get(): get the value of the matrix
## 3. setinv(): set the value of the matrix inverse
## 4. getinv(): get the value of the matrix inverse.
##
## While calling makeCacheMatrix, always make sure to assign it a variable,
## and input a square matrix, e.g.,
## > my_matrix <- makeCacheMatrix(matrix(1:4, 2, 2))
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set,
get = get,
setinv = setinv,
getinv = getinv)
}
## This function calculates the inverse of matrix in x. If the matrix inverse
## of x already exists, the function returns the cached value.
## Always make sure to run this function using the variable to which the
## makeCacheMatrix was assigned, e.g.,
## > my_matrix <- makeCacheMatrix(matrix(1:4, 2, 2))
## > cacheSolve(my_matrix)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
m <- x$get()
i <- solve(m, ...)
x$setinv(i)
i
}
|
55200cce06d3893bd3e2cfc5516413ea5045a8d8 | 5c83a208cb5c9a7a7826a77639c36c591879bc9c | /pomegranate_cytospora.R | e25e6b083cbed13c04464e9f3ba34560099bd406 | [] | no_license | robchoudhury/pomegranate_cytospora | 3dfce3f56fdbb557f903f249823e7105d8d12dc2 | baca251af17925ec35ec68596f53b8645d46720f | refs/heads/master | 2020-03-27T09:16:11.534991 | 2018-08-27T17:05:05 | 2018-08-27T17:05:05 | 146,326,473 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,416 | r | pomegranate_cytospora.R | library(tidyverse)
conidia<-read_csv("data/conidia.csv") %>%
drop_na(isolate) %>%
mutate(lw_ratio=length/width) %>%
mutate(isolate=as.factor(isolate))
culture_rate<- read_csv("data/culture_rate.csv") %>%
mutate(rate=size/days)%>%
mutate(isolate=as.factor(isolate))
pycnidia<- read_csv("data/pycnidia.csv")
ggplot(conidia, aes(lw_ratio, group=isolate, fill=isolate))+
geom_density(alpha=0.5) +
theme_minimal()+
xlab("Conidial Length to Width Ratio")+
ylab("Density")+
theme(legend.position = c(0.2, 0.8),
axis.text = element_text(size=18),
axis.title=element_text(size=20))+
viridis::scale_fill_viridis(begin = 0, end = 0.8, discrete = T)
ggplot(culture_rate, aes(isolate, rate, group=isolate, color=isolate))+
geom_jitter(alpha=0.7)+
geom_boxplot(alpha=0.3, outlier.shape = NA)+
theme_minimal() +
ylab("Growth Rate (cm/day)")+
xlab("Isolate")+
theme(legend.position = c(0.6, 0.2),
axis.text = element_text(size=18),
axis.title=element_text(size=20))
ggplot(pycnidia, aes(size))+
geom_density(size=2)+
geom_vline(xintercept = mean(pycnidia$size),
linetype="dotted",
color="red",
size=1)+
theme_minimal() +
ylab("Density of # of Pycnidia")+
xlab("Size (um)")+
theme(
axis.text = element_text(size=18),
axis.title=element_text(size=20))
|
0343c3daf735c41e58d5fa85425bdd2162ea0bea | 7356b450592781768672f02984072349e6d2fb0e | /project/human-RBP-analysis/find_binding_sites.R | 7b623ce83e2a50f20146ac10a188a9a29fdb5691 | [] | no_license | bpickett/covid-gene-expression | be330d256a90126e7086595cd6154586f8d47cb0 | e6934d00c98c303d5fe39bdf8bb60c7712a0562b | refs/heads/master | 2021-05-26T18:00:20.918924 | 2020-04-07T17:07:20 | 2020-04-07T17:07:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,093 | r | find_binding_sites.R | ##############################################
### Modify this section for your local environment!!###
# Set working directory
setwd("~/ngc-workspaces/mnt-covid-omics/")
# List input data files
## RBPDB
exp_table = "rbpdb/RBPDB_v1.3.1_experiments_human_2012-11-21.tdt"
protexp_table = "rbpdb/RBPDB_v1.3.1_protExp_human_2012-11-21.tdt"
prot_table = "rbpdb/RBPDB_v1.3.1_proteins_human_2012-11-21.tdt"
## The above three files were downloaded from http://rbpdb.ccbr.utoronto.ca/downloads/RBPDB_v1.3.1_human_2012-11-21_TDT.zip. Unzip the downloaded directory to get the tables.
pwm_dir = "rbpdb/matrices_human/PWMDir"
# The pwm_dir contains PWM files downloaded from http://rbpdb.ccbr.utoronto.ca/downloads/matrices_human.zip.
## NCBI
ref_file="ncbi/ref/sequence.fasta.txt"
# This file is the reference genome for SARS-CoV-2. It was downloaded from https://www.ncbi.nlm.nih.gov/nuccore/NC_045512.
################################################
# Import requirements
library(data.table)
library(TFBSTools)
library(seqinr)
library(Biostrings)
# Define functions
# Funation to load PWMs
loadPWM = function(PWMfile, id, name){
mat=as.matrix(read.table(PWMfile))
pwm = PWMatrix(profileMatrix=matrix(c(mat), byrow=TRUE, nrow=4, dimnames=list(c("A", "C", "G", "T"))), ID=as.character(id), name=name)
return(pwm)
}
# Function to scan sequence with PWM
scanGenome = function(ref_file, pwm, expID, protID, min.score="80%", strand="*"){
# Read the genome sequence from FASTA file
ref = read.fasta(ref_file, as.string = T, forceDNAtolower = F)
refString=DNAString(ref[[1]][[1]])
# Search the genome with the given PWM
siteset = searchSeq(pwm, refString, min.score=min.score, strand=strand, seqname = names(ref))
# Format the results as a table
siteset = as.data.table(writeGFF3(siteset))
# Add attributes to the table
siteset_attributes = siteset[, tstrsplit(attributes, split=";|=", perl=T)]
siteset[, tf:= siteset_attributes[, V2]]
siteset[, seq:= siteset_attributes[, V6]]
# Add experiment and protein IDs to the table
siteset[, expID:= expID]
siteset[, protID:= protID]
return(siteset)
}
# Load tables from RBPDB
exp = fread(exp_table, header=F, col.names = c("id", "pmID", "exptype", "notes", "sequence_motif", "SELEX_file",
"aligned_SELEX_file", "aligned_motif_file", "PWM_file", "PFM_file",
"logo_file", "invivo_notes", "invivo_file", "secondary_structure", "flag"),
na.strings = "\\N")
protexp = fread(protexp_table, header=F, col.names = c("protID", "expID", "homolog", "id"), na.strings = "\\N")
prot = fread(prot_table, header=F, col.names = c("id", "annotID", "createDate", "updateDate", "geneName", "geneDesc",
"species", "taxID", "domains", "aliases", "flag", "flagNote", "PDBIDs"),
na.strings = "\\N")
# Select only the experiments for which PWM files are available
exp_with_pwm = exp[!is.na(PWM_file)]
# What types of experiments are these?
exp_with_pwm[, .N, by=exptype]
# Match these experiment to their protein IDs
exp_with_pwm = merge(exp_with_pwm, protexp, by.x="id", by.y="expID", all.x=T)
# Match these experiment to their protein names and other protein information
exp_with_pwm = merge(exp_with_pwm, prot, by.x="protID", by.y="id", all.x=T)
# For each experiment, load the matched PWM file
pwms = list()
for(i in 1:nrow(exp_with_pwm)){
path_to_pwm_file = paste0(pwm_dir, "/", exp_with_pwm[i, PWM_file])
expID = exp_with_pwm[i, id]
protName = exp_with_pwm[i, geneName]
pwms[[i]] = loadPWM(PWMfile = path_to_pwm_file, id = expID, name = protName)
}
# Scan the virus genome with each PWM to identify RBP-binding sites on the viral genome
sitesets = list()
for(i in 1:nrow(exp_with_pwm)){
pwm = pwms[[i]]
expID = exp_with_pwm[i, id]
protID = exp_with_pwm[i, protID]
sitesets[[i]] = scanGenome(ref_file = ref_file, pwm = pwm, expID=expID, protID=protID)
}
sitesets = rbindlist(sitesets)
# Save sitesets
save(sitesets, file="sitesets.RData")
|
c2fe166498583144162ed97200de755519aa033d | ab9315145932e1d0edcb1b52df1cc3ed078a317c | /scratch_work/master.r | b4e86d9fc09bcc6b15fdcf57783b0ec26252798f | [] | no_license | johnchower/flashreport | cfcbefc1afe5d77da6857ae21323d48ed3a6db96 | c23dca38331dc9632c251983a2db59cf95dae664 | refs/heads/master | 2020-07-28T12:31:42.252620 | 2017-05-03T16:39:10 | 2017-05-03T16:39:10 | 73,411,648 | 0 | 0 | null | 2017-05-03T20:53:11 | 2016-11-10T18:54:18 | R | UTF-8 | R | false | false | 5,903 | r | master.r | library(RPostgreSQL)
# Parse arguments
optionList <- list(
optparse::make_option(
opt_str = '--host'
, type = 'character'
, default = 'localhost'
, help = 'Hostname for database connection'
) ,
optparse::make_option(
opt_str = '--port'
, type = 'character'
, default = '5441'
, help = 'Port for database connection'
) ,
optparse::make_option(
opt_str = '--user'
, type = 'character'
, default = NULL
, help = 'User name for database connection'
) ,
optparse::make_option(
opt_str = '--pass'
, type = 'character'
, default = NULL
, help = 'Password for database connection'
) ,
optparse::make_option(
opt_str = '--rundate'
, type = 'character'
, default = as.character(Sys.Date())
, help = 'The most recent date to include in the analysis.
Must be entered in the form yyyy-mm-dd. Defaults to current date.'
) ,
optparse::make_option(
opt_str = '--minweek'
, type = 'integer'
, default = 1
, help = 'The latest week to include in the analysis.
If set to 1, then the most recent week in the analysis will be the week
preceding the rundate (not inclusive). If set to 2, then the most recent
week in the analysis will be the week before the week preceding the rundate.
[default = %default]'
) ,
optparse::make_option(
opt_str = '--maxweek'
, type = 'integer'
, default = 1
, help = 'The earliest week to include in the analysis.
Works the same way as minweek. Together, rundate, minweek,
and maxweek determine the overall date range reported in the results.
For example, rundate = 2016-12-09, minweek = 1, maxweek = 2 will give
results for the weeks (2016-12-02 - 2016-12-08) and (2016-11-25 - 2016-12-07).
[default = %default]'
) ,
optparse::make_option(
opt_str = '--yearbeginning'
, type = 'character'
, default = '2016-01-01'
, help = 'User name for database connection'
) ,
optparse::make_option(
opt_str = '--outloc'
, type = 'character'
, default = NULL
, help = 'Location to save the output.
Enter as /path/to/output not /path/to/output/'
) ,
optparse::make_option(
opt_str = '--outname'
, type = 'character'
, default = NULL
, help = 'Name of output csv file. Enter as name_of_output not name_of_output.csv'
)
)
opt_parser <- optparse::OptionParser(option_list = optionList)
opt <- optparse::parse_args(opt_parser)
# Connect to redshift
glootility::connect_to_redshift()
# driver <- DBI::dbDriver("PostgreSQL")
# connection <- RPostgreSQL::dbConnect(
# driver
# , dbname = 'insightsbeta'
# , host = opt$host
# , port = opt$port
# , user = opt$user
# , password = opt$pass
# )
# assign("redshift_connection"
# , list(drv = driver, con = connection)
# , envir = .GlobalEnv)
# Define temporary tables that future queries will use.
dbSendQuery(redshift_connection$con,
flashreport::query_user_flash_cat
)
dbSendQuery(redshift_connection$con,
flashreport::query_pa_flash_cat
)
# Define date ranges and query types to get results for.
run_date <- as.Date(opt$rundate)
weeks_back <- as.numeric(opt$minweek):as.numeric(opt$maxweek)
start_dates <- run_date - 7*weeks_back
end_dates <- start_dates + 6
year_beginning <- as.Date(opt$yearbeginning)
date_ranges <- data.frame(
range_types =
c(
rep('week', times = length(weeks_back))
, rep('ytd', times = length(weeks_back))
)
, max_dates = rep(end_dates, times = 2)
, stringsAsFactors = F
)
query_types <- paste0(c('au', 'pa', 'notifications'), 'Query')
# Run queries and put results into a long data frame.
long_flash_report <- flashreport::get_results(date_ranges, query_types)
# Postprocess results.
long_flash_report_dates_formatted <-
flashreport::format_LFR_dates(long_flash_report )
long_flash_report_2 <-
flashreport::curate_user_groups(long_flash_report_dates_formatted)
long_flash_report_subaggregate <-
flashreport::summarise_by_subaggregate(long_flash_report_2)
long_flash_report_aggregate <-
flashreport::summarise_in_aggregate(long_flash_report_2)
long_flash_report_3 <- rbind(long_flash_report_2
, long_flash_report_subaggregate
, long_flash_report_aggregate)
# Calculate WAU percentage for each user group, subaggregate, and aggregate,
# and for each date range.
long_flash_report_WAU_pct <-
flashreport::calculate_WAU_percentage(long_flash_report_3)
# Calculate total actions for each user group, subaggregate, and aggregate,
# and for each date range.
long_flash_report_total_actions <-
flashreport::calculate_total_actions(long_flash_report_3)
# Calculate average actions per WAU for each user group, subaggregate, and aggregate,
# and for each date range.
long_flash_report_actions_per_AU <-
flashreport::calculate_actions_per_AU(
long_flash_report_3
, long_flash_report_total_actions
)
# Calculate notifications_response_rate for each user group, subaggregate, and aggregate,
# and for each date range.
long_flash_report_NRR <-
flashreport::calculate_NRR(long_flash_report_3)
long_flash_report_final <- rbind(long_flash_report_3
, long_flash_report_WAU_pct
, long_flash_report_total_actions
, long_flash_report_actions_per_AU
, long_flash_report_NRR)
write.csv(long_flash_report_final
, file = paste0(opt$outloc, "/", opt$outname, ".csv")
, row.names = F)
dbDisconnect(redshift_connection$con)
|
83588147c4f3b09800a6bd3dfe1ea9e7066dc62c | 10e2f579a7e84ef8f7186265fb1fc12c9db62bde | /R/kml.R | c2995ae96a7359c83cdea4ed74f8a2ef69e8d159 | [] | no_license | cran/plotKML | bddd88464e2fa5b0c981086a4f8a33a4fdbeac37 | 068aaaf06a1976d202222142a95f2e951da0f604 | refs/heads/master | 2022-06-30T19:58:42.092133 | 2022-06-07T13:00:02 | 2022-06-07T13:00:02 | 17,698,575 | 8 | 8 | null | null | null | null | UTF-8 | R | false | false | 735 | r | kml.R |
kml.Spatial <- function(
obj,
folder.name = normalizeFilename(deparse(substitute(obj, env=parent.frame()))),
file.name = paste(normalizeFilename(deparse(substitute(obj, env=parent.frame()))), ".kml", sep=""),
kmz = get("kmz", envir = plotKML.opts),
...
){
kml_open(folder.name = folder.name, file.name = file.name)
kml_layer(obj = obj, ...)
kml_close(file.name = file.name)
if (kmz == TRUE){
kml_compress(file.name = file.name)
}
}
setMethod("kml", "Spatial", kml.Spatial)
setMethod("kml", "Raster", kml.Spatial)
setMethod("kml", "SoilProfileCollection", kml.Spatial)
setMethod("kml", "SpatialPhotoOverlay", kml.Spatial)
setMethod("kml", "STIDF", kml.Spatial)
# end of script; |
38ca587c62248a8741499f643bba238f3cb27e21 | c996829410125ef1f9e5193b0e98ef765661b57a | /tests/testthat/test-morphology.R | 7df22899f4c5cab5132c9a064910f98bfe366a9c | [
"MIT"
] | permissive | heavywatal/rtumopp | 9602096728611b75013f3bb936969edf3316ac92 | f3008188d41674967a573bdf987067aba536ebbb | refs/heads/master | 2023-07-08T19:35:07.951377 | 2023-07-06T14:53:08 | 2023-07-06T14:53:40 | 54,698,944 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,395 | r | test-morphology.R | test_that("add_surface works", {
result = tumopp("-D3 -Chex -N256 -Llinear")
surface_df = result$population[[1L]] |>
filter_extant() |>
add_surface(result$coord, result$dimensions)
surface_df |>
dplyr::count(surface) |>
dplyr::pull(n) |>
expect_length(2L)
})
test_that("add_phi works", {
result = tumopp("-D3 -Cmoore -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
result = tumopp("-D3 -Cneumann -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
result = tumopp("-D3 -Chex -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
result = tumopp("-D2 -Cmoore -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
result = tumopp("-D2 -Cneumann -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
result = tumopp("-D2 -Chex -N32")
pop = result$population[[1L]] |>
filter_extant() |>
add_phi(result$coord, result$dimensions)
max_phi = sum(structuring_element(result$coord, result$dimensions)) - 1L
expect_true(all(pop[["phi"]] %in% seq.int(0L, max_phi)))
})
test_that("detect_surface works", {
cells = expand_xyz(seq.int(-1L, 1L)) |> tibble::new_tibble()
cuboid = as_cuboid(cells, expand = 1L)
se = structuring_element("moore", 3L)
surface = filter_surface(cuboid, se) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(5L**3L) |>
expect_setequal(c(0L, 1L))
expect_identical(sum(surface), sum(cuboid) - 1L)
# mmand::erode() does not erodes from edges
as_cuboid(cells, expand = 0L) |>
filter_surface(se) |>
expect_setequal(0L)
surface_df = cells |>
detect_surface(se) |>
expect_s3_class("data.frame")
surface_df |> dplyr::count(surface)
cells |>
dplyr::slice(0L) |>
detect_surface(se) |>
expect_s3_class("data.frame")
})
test_that("structuring_element works", {
se = structuring_element("moore", 3L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(27L) |>
expect_setequal(1L)
expect_identical(dim(se), c(x = 3L, y = 3L, z = 3L))
expect_identical(sum(se), 27L)
se = structuring_element("neumann", 3L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(27L) |>
expect_setequal(c(0L, 1L))
expect_identical(dim(se), c(x = 3L, y = 3L, z = 3L))
expect_identical(sum(se), 7L)
se = structuring_element("hex", 3L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(27L) |>
expect_setequal(c(0L, 1L))
expect_identical(dim(se), c(x = 3L, y = 3L, z = 3L))
expect_identical(sum(se), 13L)
se = structuring_element("moore", 2L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(9L) |>
expect_setequal(1L)
expect_identical(dim(se), c(x = 3L, y = 3L, z = 1L))
expect_identical(sum(se), 9L)
se = structuring_element("neumann", 2L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(9L) |>
expect_setequal(c(0L, 1L))
expect_identical(dim(se), c(x = 3L, y = 3L, z = 1L))
expect_identical(sum(se), 5L)
se = structuring_element("hex", 2L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_type("integer") |>
expect_length(9L) |>
expect_setequal(c(0L, 1L))
expect_identical(dim(se), c(x = 3L, y = 3L, z = 1L))
expect_identical(sum(se), 7L)
})
test_that("cuboid class works", {
grid = expand_xyz(seq_len(4L), seq_len(3L), seq_len(2L)) |>
expect_s3_class("data.frame")
expect_identical(dim(grid), c(24L, 3L))
cuboid = as_cuboid(grid) |>
expect_s3_class(c("cuboid", "array")) |>
expect_length(24L) |>
expect_setequal(1L)
expect_identical(sum(cuboid), 24L)
expanded = as_cuboid(grid, expand = 1L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_length(120L) |>
expect_setequal(c(0L, 1L))
expect_identical(sum(expanded), sum(cuboid))
center = expanded[seq_len(4L) + 1L, seq_len(3L) + 1L, seq_len(2L) + 1L]
expect_identical(center, cuboid, ignore_attr = TRUE)
cuboid_df = expanded |>
as.data.frame() |>
expect_s3_class("data.frame")
expect_identical(dim(cuboid_df), c(120L, 4L))
expect_identical(cuboid_df |> as.array(), expanded)
expect_identical(cuboid_df |> as.data.frame(), cuboid_df)
cuboid_df |>
dplyr::filter(state > 0L) |>
dplyr::select(x:z) |>
expect_identical(grid, ignore_attr = TRUE)
})
test_that("cuboid class 2D works", {
grid = expand_xyz(seq_len(4L), seq_len(3L), 0L) |>
expect_s3_class("data.frame")
expect_identical(dim(grid), c(12L, 3L))
cuboid = as_cuboid(grid) |>
expect_s3_class(c("cuboid", "array")) |>
expect_length(12L) |>
expect_setequal(1L)
expect_identical(sum(cuboid), 12L)
expanded = as_cuboid(grid, expand = 1L) |>
expect_s3_class(c("cuboid", "array")) |>
expect_length(30L) |>
expect_setequal(c(0L, 1L))
expect_identical(sum(expanded), sum(cuboid))
center = expanded[seq_len(4L) + 1L, seq_len(3L) + 1L, 1L, drop = FALSE]
expect_identical(center, cuboid, ignore_attr = TRUE)
cuboid_df = expanded |>
as.data.frame() |>
expect_s3_class("data.frame")
expect_identical(dim(cuboid_df), c(30L, 4L))
expect_identical(cuboid_df |> as.array(), expanded)
expect_identical(cuboid_df |> as.data.frame(), cuboid_df)
cuboid_df |>
dplyr::filter(state > 0L) |>
dplyr::select(x:z) |>
expect_identical(grid, ignore_attr = TRUE)
})
|
ec06eb01665c4b076664edaa66bf1dcfe6822784 | 22ad2af5206643829c70b65a63b331891ed5af8a | /2019_Nature_Communications_Chloropicon_primus/Synteny/vsCHROMOSOMES/R/OLUCIvsMCOMMODA.matrix.R | aeb22180c13811bf98af34c68c0231fd914ae8be | [
"MIT"
] | permissive | PombertLab/Publication_scripts | ef93c8ede6be8ab3d57313ae65eca57478227a1b | 0cc4c36a5522525e4a9c7853a755e09af1c3b491 | refs/heads/master | 2023-06-22T11:13:38.866526 | 2023-06-14T15:02:45 | 2023-06-14T15:02:45 | 56,014,929 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,008 | r | OLUCIvsMCOMMODA.matrix.R | #!/usr/bin/Rscript
library(ComplexHeatmap)
library(RColorBrewer)
library(methods)
message ("Plotting OLUCIvsMCOMMODA.matrix...")
colors <- colorRampPalette(c("white", "blue", "magenta"))(n = 300)
ht_global_opt(heatmap_row_names_gp = gpar(fontsize = 8, fontface = "italic"), heatmap_column_names_gp = gpar(fontsize = 8), heatmap_column_title_gp = gpar(fontsize = 12))
pdf(file="OLUCIvsMCOMMODA.matrix.pdf", useDingbats=FALSE, width=5, height=4)
OLUCIvsMCOMMODA <- read.csv("OLUCIvsMCOMMODA.matrix", header=TRUE)
rownames(OLUCIvsMCOMMODA) <- OLUCIvsMCOMMODA[,1]
colnames(OLUCIvsMCOMMODA)
data_OLUCIvsMCOMMODA <- data.matrix(OLUCIvsMCOMMODA[,2:ncol(OLUCIvsMCOMMODA)])
ht_OLUCIvsMCOMMODA = Heatmap(data_OLUCIvsMCOMMODA, name = "OLUCIvsMCOMMODA", width = unit(51, "mm"), cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 1), column_title = "OLUCIvsMCOMMODAmatrix", col = colors)
class(ht_OLUCIvsMCOMMODA)
draw(ht_OLUCIvsMCOMMODA, heatmap_legend_side = "right")
dev.off()
|
4ee244003d2ce70cc17f38a3b6e73a7bff485ac7 | b4d3e44e7da647defaf290cc219a0011e9bab5f9 | /R/compare_apsim_met.R | 4f2cf6ec0a6749c9ded90103090b453a74b897f9 | [] | no_license | femiguez/apsimx | 81313570fbbbb915ba1519ad0cd95ac0d38dc4df | 7df3b0a34d273e8035dbe22d457ed3911d07d7bc | refs/heads/master | 2023-08-10T13:49:57.777487 | 2023-05-22T14:53:51 | 2023-05-22T14:53:51 | 194,395,452 | 34 | 21 | null | 2023-07-28T15:27:39 | 2019-06-29T10:54:27 | R | UTF-8 | R | false | false | 28,896 | r | compare_apsim_met.R | #'
#' @title Compare two or more metfiles
#' @name compare_apsim_met
#' @rdname compare_apsim_met
#' @description Helper function which allows for a simple comparison among \sQuote{met} objects
#' @param ... met file objects. Should be of class \sQuote{met}
#' @param met.var meteorological variable to use in the comparison. Either \sQuote{all},
#' \sQuote{radn}, \sQuote{maxt}, \sQuote{mint}, \sQuote{rain}, \sQuote{rh},
#' \sQuote{wind_speed} or \sQuote{vp}.
#' @param labels labels for plotting and identification of \sQuote{met} objects.
#' @param check whether to check \sQuote{met} objects using \sQuote{check_apsim_met}.
#' @param verbose whether to print agreement stats to console (default is FALSE).
#' @note I have only tested this for 2 or 3 objects. The code is set up to be able to
#' compare more, but I'm not sure that would be all that useful.
#' @export
#' @return object of class \sQuote{met_mrg}, which can be used for further plotting
#' @examples
#' \dontrun{
#' require(nasapower)
#' ## Specify the location
#' lonlat <- c(-93, 42)
#' ## dates
#' dts <- c("2017-01-01","2017-12-31")
#' ## Get pwr
#' pwr <- get_power_apsim_met(lonlat = lonlat, dates = dts)
#' ## Get data from IEM
#' iem <- get_iem_apsim_met(lonlat = lonlat, dates = dts)
#' ## Compare them
#' cmet <- compare_apsim_met(pwr[,1:6], iem, labels = c("pwr","iem"))
#' ## Visualize radiation
#' plot(cmet, met.var = "radn")
#' plot(cmet, plot.type = "diff")
#' plot(cmet, plot.type = "ts")
#' ## Visualize maxt
#' plot(cmet, met.var = "maxt")
#' plot(cmet, met.var = "maxt", plot.type = "diff")
#' plot(cmet, met.var = "maxt", plot.type = "ts")
#' ## Cumulative rain
#' plot(cmet, met.var = "rain", plot.type = "ts", cumulative = TRUE)
#' }
#'
compare_apsim_met <- function(...,
met.var = c("all", "radn", "maxt",
"mint", "rain", "rh",
"wind_speed", "vp"),
labels,
check = FALSE,
verbose = FALSE){
mets <- list(...)
n.mets <- length(mets)
met.var <- match.arg(met.var)
if(n.mets < 2) stop("you should provide at least two met objects", call. = FALSE)
met1 <- mets[[1]]
if(!inherits(met1, "met"))
stop("The first object should be of class 'met'", call. = FALSE)
m.nms <- NULL
if(!missing(labels)){
m.nms <- labels
if(length(labels) != n.mets)
stop(" 'labels' length should be the same as the number of 'met' objects", call. = FALSE)
}
if(!inherits(met1, "met")) stop("object should be of class 'met' ", call. = FALSE)
## Check for any issues
if(check) check_apsim_met(met1)
## Create the 'date' for indexing
nms1 <- names(met1)
met.mrg <- as.data.frame(met1)
yr <- as.character(met1$year[1])
met.mrg$dates <- as.Date(0:c(nrow(met1) - 1), origin = as.Date(paste0(yr, "-01-01")))
names(met.mrg) <- c(paste0(names(met1), ".1"), "dates")
for(i in 2:n.mets){
if(!inherits(mets[[i]], "met")){
stp.mssg <- paste("Object in position:", i, "is of class:", class(met.i),
". Was expecting an object of class 'met'.")
stop(stp.mssg, call. = FALSE)
}
met.i <- as.data.frame(mets[[i]])
if(ncol(met1) != ncol(met.i)) stop("met objects should have the same number of columns", call. = FALSE)
if(all(!names(met1) %in% names(met.i))) stop("met objects should have the same column names", call. = FALSE)
if(check) check_apsim_met(met.i)
yr <- as.character(met.i$year[1])
met.i$dates <- as.Date(0:c(nrow(met.i) - 1), origin = as.Date(paste0(yr, "-01-01")))
names(met.i) <- c(paste0(names(met1), ".", i), "dates")
nms <- names(met.i)
## drop the year.i and day.i names
met.mrg <- merge(met.mrg, met.i, by = "dates")
}
if(met.var == "all"){
vrs <- rep(setdiff(names(met1), c("year", "day")), each = n.mets - 1)
ans <- data.frame(variable = vrs,
vs = NA, labels = NA,
bias = NA, slope = NA, corr = NA)
if(missing(labels)) ans$labels <- NULL
}else{
ans <- data.frame(variable = rep(met.var, n.mets - 1),
vs = NA, labels = NA,
bias = NA, slope = NA, corr = NA)
if(missing(labels)) ans$labels <- NULL
}
## Calculate bias for all variables
if(met.var == "all"){
met.var.sel <- nms1[!(nms1 %in% c("year", "day", "dates"))]
gvar.sel <- paste0(met.var.sel, collapse = "|")
idx.met.mrg <- grep(gvar.sel, names(met.mrg))
met.mrg.s <- met.mrg[,idx.met.mrg]
k <- 1
z <- 1
## Compute Bias matrix
for(i in met.var.sel){
if(verbose) cat("Variable: ", i, "\n")
ans$variable[k] <- i
tmp <- met.mrg.s[, grep(i, names(met.mrg.s))]
if(ncol(tmp) < 2) stop("merged selected variables should be at least of length 2", call. = FALSE)
for(j in 2:ncol(tmp)){
if(verbose) cat(names(tmp)[j - 1], " vs. ", names(tmp)[j], "\n")
ans$vs[k] <- paste(names(tmp)[j - 1], "vs.", names(tmp)[j])
if(!missing(labels)){
if(verbose) cat("labels:", labels[j - 1], " vs. ", labels[j], "\n")
ans$labels[k] <- paste(labels[j - 1], "vs.", labels[j])
}
fm0 <- lm(tmp[, j - 1] ~ tmp[, j])
if(verbose) cat(" \t Bias: ", coef(fm0)[1], "\n")
ans$bias[k] <- coef(fm0)[1]
if(verbose) cat(" \t Slope: ", coef(fm0)[2], "\n")
ans$slope[k] <- coef(fm0)[2]
if(verbose) cat(" \t Corr: ", cor(tmp[,j - 1], tmp[, j]), "\n")
ans$corr[k] <- cor(tmp[,j - 1], tmp[, j])
if(verbose) cat(" \t RSS: ", deviance(fm0), "\n")
ans$rss[k] <- deviance(fm0)
if(verbose) cat(" \t RMSE: ", sigma(fm0), "\n")
ans$rmse[k] <- sigma(fm0)
k <- k + 1
}
}
}
if(met.var != "all"){
## Just select the appropriate variable
idx.met.mrg <- grep(met.var, names(met.mrg))
met.mrg.s <- met.mrg[, idx.met.mrg]
if(verbose) cat("Variable ", met.var, "\n")
ans$variable[1] <- met.var
tmp <- met.mrg.s
for(j in 2:ncol(tmp)){
if(verbose) cat(names(tmp)[j - 1], " vs. ", names(tmp)[j], "\n")
ans$vs[j - 1] <- paste(names(tmp)[j - 1], "vs.", names(tmp)[j])
if(!missing(labels)){
if(verbose) cat("labels", labels[j - 1], " vs. ", labels[j], "\n")
ans$labels[j - 1] <- paste(labels[j - 1], "vs.", labels[j])
}
fm0 <- lm(tmp[, j - 1] ~ tmp[, j])
if(verbose) cat(" \t Bias: ", coef(fm0)[1], "\n")
ans$bias[j - 1] <- coef(fm0)[1]
if(verbose) cat(" \t Slope: ", coef(fm0)[2], "\n")
ans$slope[j - 1] <- coef(fm0)[2]
if(verbose) cat(" \t Corr: ", cor(tmp[,j - 1], tmp[, j]), "\n")
ans$corr[j - 1] <- cor(tmp[,j - 1], tmp[, j])
if(verbose) cat(" \t RSS: ", deviance(fm0), "\n")
ans$rss[j - 1] <- deviance(fm0)
if(verbose) cat(" \t RMSE: ", sigma(fm0), "\n")
ans$rmse <- sigma(fm0)
}
}
attr(met.mrg, "met.names") <- m.nms
attr(met.mrg, "length.mets") <- n.mets
met.mrg <- structure(list(met.mrg = met.mrg, index.table = ans),
class = "met_mrg")
invisible(met.mrg)
}
#' print method for met_mrg
#' @rdname compare_apsim_met
#' @description print method for \sQuote{met_mrg}
#' @param x object of class \sQuote{met_mrg}
#' @param ... additional arguments passed to print
#' @param digits digits to print (default is 2)
#' @export
#' @return it prints the index.table data.frame
print.met_mrg <- function(x, ..., digits = 2){
print(x$index.table, digits = digits)
}
#' Plotting function for weather data
#' @rdname compare_apsim_met
#' @description plotting function for compare_apsim_met, it requires ggplot2
#' @param x object of class \sQuote{met_mrg}
#' @param ... met file objects. Should be of class \sQuote{met}
#' @param plot.type either \sQuote{vs}, \sQuote{diff}, \sQuote{ts} - for time series or \sQuote{density}
#' @param pairs pair of objects to compare, defaults to 1 and 2 but others are possible
#' @param cumulative whether to plot cumulative values (default FALSE)
#' @param met.var meteorological variable to plot
#' @param id identification (not implemented yet)
#' @param span argument to be passed to \sQuote{geom_smooth}
#' @return it produces a plot
#' @export
#'
plot.met_mrg <- function(x, ..., plot.type = c("vs", "diff", "ts", "density"),
pairs = c(1, 2),
cumulative = FALSE,
met.var = c("radn", "maxt", "mint", "rain"),
id, span = 0.75){
if(!requireNamespace("ggplot2", quietly = TRUE)){
warning("ggplot2 is required for this plotting function")
return(NULL)
}
x <- x$met.mrg
m.nms <- attr(x, "met.names")
if(max(pairs) > attr(x, "length.mets")) stop("pairs index larger than length of mets")
x <- as.data.frame(unclass(x))
plot.type <- match.arg(plot.type)
met.var <- match.arg(met.var)
if(cumulative && plot.type != "ts")
stop("cumulative is only available for plot.type = 'ts' ")
if(plot.type == "vs" && met.var != "all" && !cumulative){
tmp <- x[, grep(met.var, names(x))]
prs <- paste0(met.var, ".", pairs)
gp1 <- ggplot2::ggplot(data = tmp, ggplot2::aes(x = eval(parse(text = eval(prs[1]))),
y = eval(parse(text = eval(prs[2]))))) +
ggplot2::geom_point() +
ggplot2::xlab(paste(m.nms[pairs[1]], prs[1])) +
ggplot2::ylab(paste(m.nms[pairs[2]], prs[2])) +
ggplot2::geom_smooth(method = "lm") +
ggplot2::geom_abline(intercept = 0, slope = 1, color = "orange")
print(gp1)
}
if(plot.type == "diff" && met.var != "all" && !cumulative){
prs0 <- paste0(met.var, ".", pairs)
prs <- paste0(prs0, collapse = "|")
tmp <- x[, grep(prs, names(x))]
## x Variable is prs[1]
## y Variable is prs[2] - prs[1]
dff <- tmp[,prs0[2]] - tmp[,prs0[1]]
gp1 <- ggplot2::ggplot(data = tmp, ggplot2::aes(x = eval(parse(text = eval(prs0[1]))),
y = dff)) +
ggplot2::geom_point() +
ggplot2::xlab(paste(m.nms[pairs[1]], prs0[1])) +
ggplot2::ylab(paste("Difference", prs0[2], "-", prs0[1])) +
ggplot2::geom_smooth(method = "lm", ...) +
ggplot2::geom_hline(yintercept = 0, color = "orange")
print(gp1)
}
if(plot.type == "ts" && met.var != "all" && !cumulative){
prs0 <- paste0(met.var, ".", pairs)
prs <- paste0(prs0, collapse = "|")
tmp <- x[, grep(prs, names(x))]
tmp$dates <- x$dates ## Put it back in - kinda dumb
gp1 <- ggplot2::ggplot(data = tmp, ggplot2::aes(x = .data[["dates"]],
y = eval(parse(text = eval(prs0[1]))),
color = paste(m.nms[pairs[1]], prs0[1]))) +
ggplot2::geom_point() +
ggplot2::geom_smooth(span = span, ...) +
ggplot2::geom_point(ggplot2::aes(y = eval(parse(text = eval(prs0[2]))),
color = paste(m.nms[pairs[2]], prs0[2]))) +
ggplot2::geom_smooth(ggplot2::aes(y = eval(parse(text = eval(prs0[2]))),
color = paste(m.nms[pairs[2]], prs0[2])),
span = span, ...) +
ggplot2::xlab("Date") +
ggplot2::ylab(met.var) +
ggplot2::theme(legend.title = ggplot2::element_blank())
print(gp1)
}
if(plot.type == "ts" && met.var != "all" && cumulative){
prs0 <- paste0(met.var, ".", pairs)
prs <- paste0(prs0, collapse = "|")
tmp <- x[, grep(prs, names(x))]
tmp$dates <- x$dates
tmp$cum_var1 <- cumsum(tmp[, prs0[1]])
tmp$cum_var2 <- cumsum(tmp[, prs0[2]])
gp1 <- ggplot2::ggplot(data = tmp, ggplot2::aes(x = .data[["dates"]],
y = .data[["cum_var1"]],
color = paste(m.nms[pairs[1]], prs0[1]))) +
ggplot2::geom_line() +
ggplot2::geom_line(ggplot2::aes(y = .data[["cum_var2"]],
color = paste(m.nms[pairs[2]], prs0[2]))) +
ggplot2::xlab("Date") +
ggplot2::ylab(paste("Cumulative ", met.var)) +
ggplot2::theme(legend.title = ggplot2::element_blank())
print(gp1)
}
if(plot.type == "density" && met.var != "all" && !cumulative){
prs0 <- paste0(met.var, ".", pairs)
prs <- paste0(prs0, collapse = "|")
tmp <- x[, grep(prs, names(x))]
gp1 <- ggplot2::ggplot(data = tmp, ggplot2::aes(x = eval(parse(text = eval(prs0[1]))),
color = paste(m.nms[pairs[1]], prs0[1]))) +
ggplot2::geom_density() +
ggplot2::geom_density(ggplot2::aes(x = eval(parse(text = eval(prs0[2]))),
color = paste(m.nms[pairs[2]], prs0[2]))) +
ggplot2::xlab(met.var) +
ggplot2::theme(legend.title = ggplot2::element_blank())
print(gp1)
}
invisible(gp1)
}
#' The frost free period is computed by first spliting each year (or year interval)
#' in two halves. The first and last frosts in the first and second period are found.
#' For the Northern hemisphere calendar days are used (1-365).
#' For the Southern hemisphere the year is split in two halfs, but the second half of
#' the year is used as the first part of the growing season.
#' If frost is not found a zero is returned.
#'
#' @title Summary for an APSIM met file
#' @name summary.met
#' @description Create a data.frame summarizing an object of class \sQuote{met}
#' @param object object of class \sQuote{met}
#' @param ... optional argument (none used at the momemt)
#' @param years optional argument to subset years
#' @param months optional argument to subset by months. If an integer, it should
#' be between 1 and 12. If a character, it can be in the format, for example,
#' \sQuote{jan} or \sQuote{Jan}.
#' @param days optional argument to subset by days. It should be an integer
#' between 1 and 31.
#' @param julian.days optional argument to subset by julian days. It
#' should be a vector of integers between 1 and 365. Either use \sQuote{days} or
#' \sQuote{julian.days} but not both.
#' @param compute.frost logical (default FALSE). Whether to compute
#' frost statistics.
#' @param frost.temperature value to use for the calculation of the frost
#' period (default is zero).
#' @param check logical (default FALSE). Whether to \sQuote{check} the \sQuote{met} object.
#' @param verbose whether to print additional infomation to the console
#' @param na.rm whether to remove missing values. Passed to \sQuote{aggregate}
#' @param digits digits for rounding (default is 2).
#' @return an object of class \sQuote{data.frame} with attributes
#' @export
#' @examples
#'
#' extd.dir <- system.file("extdata", package = "apsimx")
#' ames <- read_apsim_met("Ames.met", src.dir = extd.dir)
#'
#' summary(ames, years = 2014:2016)
#'
summary.met <- function(object, ..., years, months, days, julian.days,
compute.frost = FALSE,
frost.temperature = 0,
check = FALSE, verbose = FALSE,
na.rm = FALSE, digits = 2){
x <- object
if(check) check_apsim_met(x)
if(!missing(days) && !missing(julian.days))
stop("Either use days or julian.days but not both", call. = TRUE)
## Summarize information by year
if(!missing(years)) x <- x[x$year %in% years,]
if(!missing(months)){
if(length(months) == 1) months <- as.integer(months)
if(!inherits(months, "integer") && !inherits(months, "character"))
stop("months should be either an integer or a character", call. = FALSE)
## Select months that fit the criteria
date.range <- seq(as.Date("2012-01-01"), as.Date("2012-12-31"), by = "day")
dat <- data.frame(month = as.numeric(format(date.range, "%m")),
Month = format(date.range, "%b"),
day = as.numeric(format(date.range, "%j")))
if(inherits(months, "integer")){
if(months < 1 || months > 12)
stop("months should be between 1 and 12", call. = FALSE)
wch.months <- which(dat$month %in% months)
x <- x[x$day %in% dat[wch.months, "day"],]
}
if(inherits(months, "character")){
## Months might be in upper of lower case
Months <- format(as.Date(paste0(1, months, 2012), "%d%b%Y"), "%b")
wch.months <- which(dat$Month %in% Months)
x <- x[x$day %in% dat[wch.months, "day"],]
}
}else{
months <- "1:12"
}
if(!missing(days)){
if(!inherits(days, "integer"))
stop("days should be of class integer", call. = FALSE)
if(days < 1 || days > 31)
stop("days should be between 1 and 31")
## Select days that fit the criteria
date.range <- seq(as.Date("2012-01-01"), as.Date("2012-12-31"), by = "day")
dat <- data.frame(month = as.numeric(format(date.range, "%m")),
Month = format(date.range, "%b"),
Day = as.numeric(format(date.range, "%d")),
day = as.numeric(format(date.range, "%j")))
if(is.numeric(days)){
wch.days <- which(dat$Day %in% days)
x <- x[x$day %in% dat[wch.days, "day"],]
}
}else{
days <- 1:31
}
if(!missing(julian.days)){
x <- x[x$day %in% julian.days, ]
days <- range(julian.days)
}
n.years <- length(unique(x$year))
if(compute.frost){
ans <- matrix(nrow = n.years, ncol = 16)
}else{
ans <- matrix(nrow = n.years, ncol = 12)
}
ans[,1] <- sort(unique(x$year))
x <- add_column_apsim_met(x, value = as.factor(x$year), name = "year", units = "()")
if(verbose){
cat("First year:", x$year[1], "\n")
cat("Last year:", x$year[nrow(x)], "\n")
cat("Total number of years:", length(unique(x$year)), "\n")
}
## Store high maxt and mint temperature by year
if(all(is.na(x$maxt))){
ans[,4] <- NA
ans[,6] <- NA
ans[,8] <- NA
}else{
ans[,4] <- round(stats::aggregate(maxt ~ year, data = x, FUN = max, na.rm = na.rm)$maxt, digits)
ans[,6] <- round(stats::aggregate(maxt ~ year, data = x, FUN = mean, na.rm = na.rm)$maxt, digits)
ans[,8] <- round(stats::aggregate(maxt ~ year, data = x, FUN = min, na.rm = na.rm)$maxt, digits)
}
if(all(is.na(x$mint))){
ans[,5] <- NA
ans[,7] <- NA
ans[,9] <- NA
}else{
ans[,5] <- round(stats::aggregate(mint ~ year, data = x, FUN = max, na.rm = na.rm)$mint, digits)
ans[,7] <- round(stats::aggregate(mint ~ year, data = x, FUN = mean, na.rm = na.rm)$mint, digits)
ans[,9] <- round(stats::aggregate(mint ~ year, data = x, FUN = min, na.rm = na.rm)$mint, digits)
}
## Total precipitation
if(all(is.na(x$rain))){
ans[,10] <- NA
}else{
ans[,10] <- round(stats::aggregate(rain ~ year, data = x, FUN = sum, na.rm = na.rm)$rain, digits)
}
## Total and mean radiation
if(all(is.na(x$radn))){
ans[,11] <- NA
ans[,12] <- NA
}else{
ans[,11] <- round(stats::aggregate(radn ~ year, data = x, FUN = sum, na.rm = na.rm)$radn, digits)
ans[,12] <- round(stats::aggregate(radn ~ year, data = x, FUN = mean, na.rm = na.rm)$radn, digits)
}
## How do I compute the length of the growing season
if(compute.frost){
lat0 <- strsplit(attr(x, "latitude"), "=")[[1]][2]
lat <- as.numeric(strsplit(lat0, "(", fixed = TRUE)[[1]][1])
ans[,13] <- rep(0, length(unique(x$year)))
ans[,14] <- rep(0, length(unique(x$year)))
## This should work regardless of the subset, but I haven't tested
if(lat >= 0){
## Northern hemisphere
## Last frost in the spring
length.days.by.year <- stats::aggregate(day ~ year, data = x, FUN = length)$day
half.point <- floor(mean(length.days.by.year)/2)
x.first <- x[x$day < half.point, ]
if(length(unique(x.first$year)) != length(unique(x$year))){
stop("At least one year has incomplete days. Spring frost cannot be computed.", call. = FALSE)
}
frosts <- which(x.first$mint < frost.temperature)
if(length(frosts) != 0){
x.spring.frosts <- x.first[frosts,]
last.spring.frost <- stats::aggregate(day ~ year, data = x.spring.frosts, FUN = max)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg1 <- merge(zero.days, last.spring.frost, all.x = TRUE, by = "year")
mrg1[is.na(mrg1$day.y), "day.y"] <- 0
ans[,13] <- mrg1[["day.y"]]
}
## First frost in the fall
x.last <- x[x$day > half.point, ]
if(length(unique(x.last$year)) != length(unique(x$year))){
stop("At least one year has incomplete days. Fall frost cannot be computed.", call. = FALSE)
}
frosts <- which(x.last$mint < frost.temperature)
if(length(frosts) != 0){
x.fall.frosts <- x.last[frosts,]
first.fall.frost <- stats::aggregate(day ~ year, data = x.fall.frosts, FUN = min)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg2 <- merge(zero.days, first.fall.frost, all.x = TRUE, by = "year")
mrg2[is.na(mrg2$day.y), "day.y"] <- 0
ans[,14] <- mrg2[["day.y"]]
}
## Frost days
tmp0 <- x[x$mint < 0,]
if(nrow(tmp0) > 0){
all.frost.days <- stats::aggregate(day ~ year, data = tmp0, FUN = length)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg3 <- merge(zero.days, all.frost.days, all.x = TRUE, by = "year")
mrg3[is.na(mrg3$day.y), "day.y"] <- 0
ans[,16] <- mrg3[["day.y"]]
}else{
ans[,16] <- rep(0, length(unique(x$years)))
}
## Frost-free period
if(sum(ans[,13]) == 0 && sum(ans[,14]) == 0){
ans[,15] <- stats::aggregate(day ~ year, data = x, FUN = length)$day
}else{
if(all(ans[,13] > 0) && all(ans[,14] > 0)){
ans[,15] <- ans[,14] - ans[,13]
}else{
## Need to compute this by year
yrs <- sort(unique(x$year))
for(j in seq_along(yrs)){
## If first half has a zero
if(ans[j, 13] == 0 && ans[j, 14] != 0){
tmp <- x.fall.frosts[x.fall.frosts$year == yrs[j],]
if(nrow(tmp) == 0){
ans[j, 15] <- length.days.by.year[j]
}else{
last.fall.frost <- max(tmp$day)
fall.frost.days <- last.fall.frost - ans[j, 14]
ans[j, 15] <- fall.frost.days
}
}
## If second half has a zero
if(ans[j, 14] == 0 && ans[j, 13] != 0){
tmp <- x.spring.frosts[x.spring.frosts$year == yrs[j],]
##print(tmp)
if(nrow(tmp) == 0){
ans[j, 15] <- length.days.by.year[j]
}else{
first.spring.frost <- min(tmp$day)
spring.frost.days <- ans[j, 13] - first.spring.frost
ans[j, 15] <- spring.frost.days
}
}
## Both are zero
if(ans[j, 14] == 0 && ans[j, 13] == 0){
ans[j, 15] <- length.days.by.year[j]
}
## Both are not zero
if(ans[j, 14] != 0 && ans[j, 13] != 0){
ans[j, 15] <- ans[j,14] - ans[j,13]
}
}
}
}
}
if(lat < 0){
## Southern hemisphere
## Last frost in the fall
length.days.by.year <- stats::aggregate(day ~ year, data = x, FUN = length)$day
half.point <- floor(mean(length.days.by.year)/2)
x.first <- x[x$day > half.point, ]
if(length(unique(x.first$year)) != length(unique(x$year))){
stop("At least one year has incomplete days. Spring frost cannot be computed.", call. = FALSE)
}
frosts <- which(x.first$mint < frost.temperature)
if(length(frosts) != 0){
x.spring.frosts <- x.first[frosts,]
last.spring.frost <- stats::aggregate(day ~ year, data = x.spring.frosts, FUN = max)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg1 <- merge(zero.days, last.spring.frost, all.x = TRUE, by = "year")
mrg1[is.na(mrg1$day.y), "day.y"] <- 0
ans[,13] <- mrg1[["day.y"]]
}
## First frost in the fall
x.last <- x[x$day < half.point, ]
if(length(unique(x.last$year)) != length(unique(x$year))){
stop("At least one year has incomplete days. Fall frost cannot be computed.", call. = FALSE)
}
frosts <- which(x.last$mint < frost.temperature)
if(length(frosts) != 0){
x.fall.frosts <- x.last[frosts,]
first.fall.frost <- stats::aggregate(day ~ year, data = x.fall.frosts, FUN = min)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg2 <- merge(zero.days, first.fall.frost, all.x = TRUE, by = "year")
mrg2[is.na(mrg2$day.y), "day.y"] <- 0
ans[,14] <- mrg2[["day.y"]]
}
## Frost days
tmp0 <- x[x$mint < 0,]
if(nrow(tmp0) > 0){
all.frost.days <- stats::aggregate(day ~ year, data = tmp0, FUN = length)
zero.days <- data.frame(year = sort(unique(x$year)), day = 0)
mrg3 <- merge(zero.days, all.frost.days, all.x = TRUE, by = "year")
mrg3[is.na(mrg3$day.y), "day.y"] <- 0
ans[,16] <- mrg3[["day.y"]]
}else{
ans[,16] <- rep(0, length(unique(x$year)))
}
## Frost-free period
if(sum(ans[,13]) == 0 && sum(ans[,14]) == 0){
ans[,15] <- stats::aggregate(day ~ year, data = x, FUN = length)$day
}else{
if(all(ans[,13] > 0) && all(ans[,14] > 0)){
##last.day <- ifelse(is_leap_year(sort(unique(x$year))), 366, 365)
ans[,15] <- (length.days.by.year - ans[,13]) + ans[,14]
}else{
## Need to compute this by year
yrs <- sort(unique(x$year))
for(j in seq_along(yrs)){
## If first half has a zero
if(ans[j, 13] == 0 && ans[j, 14] != 0){
tmp <- x.fall.frosts[x.fall.frosts$year == yrs[j],]
if(nrow(tmp) == 0){
ans[j, 15] <- length.days.by.year[j]
}else{
last.fall.frost <- max(tmp$day)
fall.frost.days <- ans[j, 14] - last.fall.frost
ans[j, 15] <- fall.frost.days
}
}
## If second half has a zero
if(ans[j, 14] == 0 && ans[j, 13] != 0){
tmp <- x.spring.frosts[x.spring.frosts$year == yrs[j],]
##print(tmp)
if(nrow(tmp) == 0){
ans[j, 15] <- length.days.by.year[j]
}else{
first.spring.frost <- min(tmp$day)
spring.frost.days <- ans[j, 13] - first.spring.frost
ans[j, 15] <- length.days.by.year[j] - spring.frost.days
}
}
## Both are zero
if(ans[j, 14] == 0 && ans[j, 13] == 0){
ans[j, 15] <- length.days.by.year[j]
}
## Both are not zero
if(ans[j, 14] != 0 && ans[j, 13] != 0){
ans[j, 15] <- (length.days.by.year[j] - ans[j,13]) + ans[j,14]
}
}
}
}
}
}
if(compute.frost){
colnames(ans) <- c("year", "months", "days", ## 1, 2, 3
"high_maxt", "high_mint", ## 4 and 5
"avg_maxt", "avg_mint", ## 6 and 7
"low_maxt", "low_mint", ## 8 and 9
"rain_sum", "radn_sum", "radn_avg", ## 10, 11, 12
"first_half_frost", "second_half_frost", ## 13, 14
"frost_free_period", "frost_days") ## 15, 16
}else{
colnames(ans) <- c("year", "months", "days", ## 1, 2, 3
"high_maxt", "high_mint", ## 4 and 5
"avg_maxt", "avg_mint", ## 6 and 7
"low_maxt", "low_mint", ## 8 and 9
"rain_sum", "radn_sum", "radn_avg") ## 10, 11, 12
}
ansd <- as.data.frame(ans)
if(inherits(months, "integer")){
if(grepl(":", deparse(months), fixed = TRUE)){
ansd$months <- rep(paste(range(months), collapse = ":"), n.years)
}else{
ansd$months <- rep(paste(months, collapse = ","), n.years)
}
}else{
ansd$months <- rep(paste(months, collapse = ","), n.years)
}
ansd$days <- rep(deparse(days), n.years)
return(ansd)
}
|
749b63c335202cef01338f85538cab6112a99fcb | ac24b52eeb09b6097e1f1674d1d2f8215f4f82de | /complete01.R | a893325f8f7882b62eb539ff566827db47eb3f28 | [] | no_license | politov01/R-examples | d0a31370fb17567dea95d11e5c3ae47bb61da301 | e84ade3d43b2e251aa94fe374562ecf86fc747f2 | refs/heads/master | 2021-01-19T06:48:12.973809 | 2016-06-24T03:12:11 | 2016-06-24T03:12:11 | 61,855,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,192 | r | complete01.R | complete <-
function (directory ="specdata", id = 1:332)
{
files <- list.files(directory, full.names = 1)
#complete_files <- data.frame(id=integer(), nobs=integer())
complete_files <- data.frame(id=NA, nobs=NA)
complete_files
# id nobs
#1 NA NA
#for(i in id) {
for(i in 1:length(id)) {
#complete_files[i, 1] <- i
complete_files[i, 1] <- id[i]
#complete_files[i, 2] <- sum(complete.cases(read.csv(files[i])))
x<-read.csv("001.csv")
x
# Date sulfate nitrate ID
#1 2003-01-01 NA NA 1
#2 2003-01-02 NA NA 1
#3 2003-01-03 NA NA 1
y <- complete.cases(x)
y
# [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
# [13] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
# [25] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
# ..........
#[1441] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
#[1453] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
z <- sum(y)
z
#[1] 117
complete_files[i, 2] <- sum(complete.cases(read.csv(files[id[i]])))
}
#complete_files[complete.cases(complete_files),]
complete_files
}
|
38424d3467f4810903bf5a8f3130761ee0b4dd88 | be595da8174d9d8248a43a5e9cdb0b0e835abf01 | /src/regression.R | 066272967c2c1297d374812570935adf93795ca5 | [] | no_license | garlandxie/env_filt_bees | e209bd62871b379684cd843680c696a20d0bf416 | da0c3d3e1e2d95db63e3a7b2e5b8501b86b26a16 | refs/heads/master | 2020-09-26T05:01:10.469863 | 2020-01-19T19:42:22 | 2020-01-19T19:42:22 | 226,171,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,742 | r | regression.R | # calculating regression models ------------------------------------------------
# author(s): Nicholas Sookhan, Garland Xie
# libraries --------------------------------------------------------------------
library(here) # for creating relative file-paths
library(dplyr) # for manipulating data
library(readr) # for reading csv files
library(sp) # for analysing geospatial data
library(tibble) # for converting row names to columns (+ vice versa)
library(spdep) # for running spatial autocorrelation tests
library(ggplot2) # for visualising data
library(purrr) # for creating iterations
# import -----------------------------------------------------------------------
# relative file-paths
site_path <- here("data/original", "site_info_coords.csv")
mpd_path <- here("data/working", "ses_mpd.csv")
mfd_path <- here("data/working", "ses_mfd.csv")
met_250_path <- here("data/original", "land_use_metrics_250.csv")
met_500_path <- here("data/original", "land_use_metrics_500.csv")
# import
site <- read_csv(site_path)
ses_mpd <- read_csv(mpd_path)
ses_mfd <- read_csv(mfd_path)
l_met_250 <- read_csv(met_250_path)
l_met_500 <- read_csv(met_500_path)
# check packaging --------------------------------------------------------------
# site
str(site)
head(site, n = 5)
tail(site, n = 5)
# ses mpd
str(ses_mpd)
head(ses_mpd, n = 5)
tail(ses_mpd, n = 5)
# ses mfd
str(ses_mfd)
head(ses_mfd, n = 5)
tail(ses_mfd, n = 5)
# met 250
str(l_met_250)
head(l_met_250, n = 5)
tail(l_met_250, n = 5)
# met 500
str(l_met_500)
head(l_met_500, n = 5)
tail(l_met_500, n = 5)
# data cleaning ----------------------------------------------------------------
full_df <- site %>%
full_join(l_met_250, by = "X1") %>%
full_join(l_met_500, by = "X1") %>%
full_join(ses_mpd, by = "X1") %>%
full_join(ses_mfd, by = "X1") %>%
select(ID = "X1",
num_sp = "ntaxa.y",
mtm3deg_nor,
mtm3deg_eas,
grass_250_percent = "prop.landscape_250_grass",
tree_250_percent = "prop.landscape_250_tree_canopy",
urban_250_percent = "prop.landscape_250_urban",
grass_500_percent = "prop.landscape_500_grass",
tree_500_percent = "prop.landscape_500_tree_canopy",
urban_500_percent = "prop.landscape_500_urban",
ses_mfd = "mfd.obs.z",
ses_mpd = "mpd.obs.z")
# linear models: model fitting -------------------------------------------------
# ses mpd
lm_250_mpd <- full_df %>%
filter(!is.na(urban_250_percent)) %>%
lm(ses_mpd ~ urban_250_percent, data = .)
lm_500_mpd <- full_df %>%
filter(!is.na(urban_500_percent)) %>%
lm(ses_mpd ~ urban_500_percent, data = .)
# ses mfd
lm_250_mfd <- full_df %>%
filter(!is.na(urban_250_percent)) %>%
lm(ses_mfd ~ urban_250_percent, data = .)
lm_500_mfd <- full_sdf %>%
filter(!is.na(urban_500_percent)) %>%
lm(ses_mfd ~ urban_500_percent, data = .)
# linear models: model summary -------------------------------------------------
# mpd
summary(lm_250_mpd)
summary(lm_500_mpd)
# mfd
summary(lm_250_mfd)
summary(lm_500_mfd)
# linear models: spatial autocorrelation ---------------------------------------
# 250 m spatial scale
coords_250 <- df %>%
filter(!is.na(urban_250_percent)) %>%
select(ID, mtm3deg_nor, mtm3deg_eas) %>%
column_to_rownames(var = "ID")
coordinates(coords_250) = ~mtm3deg_eas + mtm3deg_nor
proj4string(coords_250) <- CRS("+proj=tmerc +lat_0=0 +lon_0=-79.5 \n
+k=0.9999 +x_0=304800 +y_0=0 +datum=NAD27 \n
+units=m +no_defs +ellps=clrk66 \n
+nadgrids=@conus,@alaska,@ntv2_0.gsb, \n
@ntv1_can.dat")
# 500m spatial scale
coords_500 <- df %>%
filter(!is.na(urban_500_percent)) %>%
select(ID, mtm3deg_nor, mtm3deg_eas) %>%
column_to_rownames(var = "ID")
coordinates(coords_500) = ~mtm3deg_eas + mtm3deg_nor
proj4string(coords_500) <- CRS("+proj=tmerc +lat_0=0 +lon_0=-79.5 \n
+k=0.9999 +x_0=304800 +y_0=0 +datum=NAD27 \n
+units=m +no_defs +ellps=clrk66 \n
+nadgrids=@conus,@alaska,@ntv2_0.gsb, \n
@ntv1_can.dat")
# moran i test for spatial autocorrelation in residuals
lm_morantest_250 <- partial(
lm.morantest,
listw = nb2listw(knn2nb(knearneigh(coords_250, 8)),style = "W"),
)
lm_morantest_500 <- partial(
lm.morantest,
listw = nb2listw(knn2nb(knearneigh(coords_500, 8)),style = "W"),
)
# mpd
lm_morantest_250(lm_250_mpd)
lm_morantest_500(lm_500_mpd)
# mfd
lm_morantest_250(lm_250_mfd)
lm_morantest_500(lm_500_mfd)
# linear models: model adequacy ------------------------------------------------
# mpd
plot(lm_250_mpd)
plot(lm_500_mpd)
hist(resid(lm_250_mpd))
hist(resid(lm_500_mpd))
# mfd
plot(lm_250_mfd)
plot(lm_500_mfd)
hist(resid(lm_250_mfd))
hist(resid(lm_500_mfd))
# plots ------------------------------------------------------------------------
(plot_lm_mpd_250 <- df %>%
filter(!is.na(urban_250_percent) & !is.na(ses_mpd)) %>%
ggplot(aes(x = urban_250_percent, y = ses_mpd)) +
geom_smooth(method = "lm") +
geom_jitter() +
geom_hline(yintercept = 0, linetype = "dashed") +
labs(y = "ses.MPD",
x = "% Impervious Cover (250m scale)") +
theme_minimal())
(plot_lm_mpd_500 <- df %>%
filter(!is.na(urban_500_percent) & !is.na(ses_mpd)) %>%
ggplot(aes(x = urban_500_percent, y = ses_mpd)) +
geom_smooth(method = "lm") +
geom_jitter() +
geom_hline(yintercept = 0, linetype = "dashed") +
labs(y = "ses.MPD",
x = "% Impervious Cover (500m scale)") +
theme_minimal())
plot_lm_mfd_250 <- df %>%
filter(!is.na(urban_250_percent) & !is.na(ses_mfd)) %>%
ggplot(aes(x = urban_250_percent, y = ses_mfd)) +
geom_jitter() +
geom_hline(yintercept = 0, linetype = "dashed") +
labs(y = "ses.MFD",
x = "% Impervious Cover (250m scale)") +
theme_minimal()
plot_lm_mfd_500 <- df %>%
filter(!is.na(urban_500_percent) & !is.na(ses_mfd)) %>%
ggplot(aes(x = urban_500_percent, y = ses_mfd)) +
geom_jitter() +
labs(y = "ses.MFD",
x = "% Impervious Cover (500m scale)") +
theme_minimal()
# Save the data! ---------------------------------------------------------------
# ses MPD vs % urban (250m scale)
ggsave(filename = here("output/figures", "fig2-crit2-ses_MPD.png"),
plot = plot_lm_mpd_250,
units = "in",
width = 5,
height = 4,
device = "png")
# ses MFD vs % urban (250m scale)
ggsave(filename = here("output/figures", "fig2-crit2-ses_MFD.png"),
plot = plot_lm_mfd_250,
units = "in",
width = 5,
height = 4,
device = "png")
|
821bde628ec605319965fcaaafe25d196088c107 | f4d569066b9766dbf0c59215d616891796653819 | /tests/testthat/test-exported-funs.R | 316b1b4ea4850e60ddbe2cb307e3501c14276574 | [] | no_license | kesimmons/panelPomp | 2edef54c2f1a1336bd216041c8dc0d67585633d0 | 4409ac6c1d518ee2dc14dd30583f8e7b6f4c2165 | refs/heads/master | 2020-07-03T14:20:20.934581 | 2016-09-30T16:10:11 | 2016-09-30T16:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,527 | r | test-exported-funs.R | library(panelPomp)
context("For now, the context is the name of the function being tested (along with the file it is in)")
test_that("functionality X is being implemented as intended", {
# When "functionality X is being implemented as intended," then one should ...
expect_equal(
object = 10,
expected = 10 + 1e-7)
expect_identical(# THIS FAILS #expect_identical(10, 10 + 1e-7)
object = 10,
expected = 10); string <- "Testing is fun!"
expect_match(
object = string,
regexp = "Testing is fun!")
expect_match(# additional arguments are passed to grepl
object = string,
regexp = "testing",
ignore.case = TRUE); a <- list(1:10, letters)
expect_output(
object = str(a),
regexp = "List of 2")
expect_output(# additional arguments are passed to grepl
object = str(a),
regexp = "int [1:10]",
fixed = TRUE)
#expect_message(
# object = pomp::pompExample("panelGompertz"),
# "Newly created object(s):") # The message is part of the ... argument
expect_warning(# leaving the second argument blank will produce an error with the actual message/warning
"NaNs produced", # The message is part of the ... argument
object = log(-1)); pomp::pompExample("panelGompertz")
expect_is(
object = panelGompertz,
class = "panelPomp")
expect_true(
object = TRUE==TRUE)
expect_false(
object = TRUE==FALSE)
#expect_equal_to_reference(
# object = coef(panelGompertz),
# file = "exported-funs.rds") # must be an .rds file
})
|
aec9cac4fdfa078fe708cf38fbcb79bf37dff593 | 738fbf8ed6e37e7b9b4df850afcba2879715533c | /blackFriday.R | df0a1855d0e083dc868170f1450af6d0b346c9fb | [] | no_license | VIgneshGV91/Black-Friday-sales-Consumer-Behavior-Analysis | c683aee434ce54fffa16154eaee4c8ab484b3d00 | 62e03d1514057891fd423f187ff997802e8dba28 | refs/heads/master | 2020-08-07T19:37:41.569147 | 2019-10-08T06:42:37 | 2019-10-08T06:42:37 | 213,566,982 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,342 | r | blackFriday.R | setwd("C:/Vignesh/Studies/Fall 2018/Adv Stats IDS 575/Project/Black Friday")
BlackFriday <- read.csv(file="BlackFriday.csv", header=TRUE, sep=",")
# lets see the structure of dataset
str(BlackFriday)
nrow(BlackFriday)
ncol(BlackFriday)
# check the missing values
sapply(BlackFriday, function(x) sum(is.na(x)))
# check the datatype for each column
typeof(BlackFriday$User_ID)
typeof(BlackFriday$Product_ID)
typeof(BlackFriday$Gender)
typeof(BlackFriday$Age)
typeof(BlackFriday$Occupation)
typeof(BlackFriday$City_Category)
typeof(BlackFriday$Stay_In_Current_City_Years)
typeof(BlackFriday$Marital_Status)
typeof(BlackFriday$Product_Category_1)
typeof(BlackFriday$Marital_Status)
typeof(BlackFriday$Product_Category_1)
typeof(BlackFriday$Product_Category_2)
typeof(BlackFriday$Product_Category_3)
typeof(BlackFriday$Purchase)
# Converting columns to Respective datatype
BlackFriday$Gender<-as.factor(BlackFriday$Gender)
BlackFriday$User_ID<-as.numeric(BlackFriday$User_ID)
BlackFriday$Age<- as.factor(BlackFriday$Age)
BlackFriday$City_Category<-as.factor(BlackFriday$City_Category)
BlackFriday$Stay_In_Current_City_Years<-as.factor(BlackFriday$Stay_In_Current_City_Years)
BlackFriday$Marital_Status<-as.factor(BlackFriday$Marital_Status)
BlackFriday$Product_Category_1<-as.factor(BlackFriday$Product_Category_1)
BlackFriday$Product_Category_2<-as.factor(BlackFriday$Product_Category_2)
summary(BlackFriday)
# Filling the empty values with Zeros
i <- sapply(BlackFriday, is.factor)
BlackFriday[i] <- lapply(BlackFriday[i], as.character) # Convert factors to character variables
BlackFriday[is.na(BlackFriday)] <- 0 # Replace NA with 0, as shown in Example 1
BlackFriday[i] <- lapply(BlackFriday[i], as.factor) # Convert character columns back to factors
str(BlackFriday)
View(BlackFriday)
# Finding Duplicate data
unique(BlackFriday)
# Get unique values in each column
rapply(BlackFriday,function(x)length(unique(x)))
# Group the age values into 7 groups
levels(BlackFriday$Age)[1] <- 1
levels(BlackFriday$Age)[2] <- 2
levels(BlackFriday$Age)[3] <- 3
levels(BlackFriday$Age)[4] <- 4
levels(BlackFriday$Age)[5] <- 5
levels(BlackFriday$Age)[6] <- 6
levels(BlackFriday$Age)[7] <- 7
#check the levels of Age
levels(BlackFriday$Age)
# Lets check the stay in current City, replace +4 with 4
levels(BlackFriday$Stay_In_Current_City_Years)
levels(BlackFriday$Stay_In_Current_City_Years)[5] <- 4
levels(BlackFriday$Stay_In_Current_City_Years)
# Lets do statistics
mean(BlackFriday$Purchase)
median(BlackFriday$Purchase)
sd(BlackFriday$Purchase)
plot(BlackFriday$Purchase)
# Lets do some Bivariate analysis
# Relationship Between Gender Vs Purchase
GenderTable<-table(BlackFriday$Gender)
GenderTable
# Lets determine the relationship between input and output variable
# Purchase and Gender
GenderPurchaseAov<-aov(BlackFriday$Purchase~BlackFriday$Gender, data=BlackFriday)
summary(GenderPurchaseAov)
# The result shows the P-value is less than 0.5 so gender influences the Purchase cost
# Purchase and Age
AgePurchaseAov <-aov(BlackFriday$Purchase~BlackFriday$Age, data=BlackFriday)
summary(AgePurchaseAov)
# The result shows the P-value is less than 0.5 so Age influences the Purchase cost
# Purchase and Martial status
MartialStatus <-aov(BlackFriday$Purchase~BlackFriday$Marital_Status, data=BlackFriday)
summary(MartialStatus)
# The result shows the P-value is less than 0.5 so Martial Status influences the Purchase cost
# Purchase and Occupation
OccupationAov <- aov(BlackFriday$Purchase~BlackFriday$Occupation , data=BlackFriday)
summary(OccupationAov)
# The result shows the P-value is less than 0.5 so Occupation influences the Purchase cost
# Purchase and currentstay in city
CurrentCityAov <- aov(BlackFriday$Purchase~BlackFriday$Stay_In_Current_City_Years, data=BlackFriday)
summary(CurrentCityAov)
# The result shows the P-value is less than 0.5 so Martial Status influences the Purchase cost
# Purchase and CityCategory
Cityaov <- aov(BlackFriday$Purchase~BlackFriday$City_Category, data=BlackFriday)
summary(Cityaov)
# Purchase and category 1
Category1aov <- aov(BlackFriday$Purchase~BlackFriday$Product_Category_1, data=BlackFriday)
summary(Category1aov)
# Purchase and category 2
Category2aov <- aov(BlackFriday$Purchase~BlackFriday$Product_Category_2, data=BlackFriday)
summary(Category2aov)
# Purchase and category 3
Category3aov <- aov(BlackFriday$Purchase~BlackFriday$Product_Category_3, data=BlackFriday)
summary(Category3aov)
#######################################################################################################
########################################################################################################
library(ggplot2)
# check if any NA
sapply(BlackFriday, function(x)all(any(is.na(x))))
# check number of NA
apply(BlackFriday, 2, function(x) sum(is.na(x)))
# check other columns to get a feel for the data
summary(BlackFriday)
# table(unlist(df$Age))
# table(unlist(df$Gender))
# table(unlist(df$City_Category))
# table(unlist(df$Stay_In_Current_City_Years))
# table(unlist(df$Marital_Status))
# graph individual purchases
ggplot(BlackFriday, aes(x = BlackFriday$Purchase)) + geom_histogram(binwidth = 100)
# aggregate purchases by User_ID and graph
aggpur <- aggregate(Purchase ~ User_ID, BlackFriday, sum)
ggplot(aggpur, aes(x = aggpur$Purchase)) + geom_histogram(binwidth = 10000) + xlim(0,5000000)
# purchases per user
pur_per_usr <- data.frame(table(BlackFriday$User_ID))
ggplot(pur_per_usr, aes(x = pur_per_usr$Freq)) + geom_histogram(binwidth = 1) + xlim(0,300)
# create new dataframe with total purchases only
tot_pur <- merge(x = aggpur, y = BlackFriday, by = "User_ID", all = TRUE)
View(tot_pur)
tot_pur_BF <- tot_pur[-which(duplicated(tot_pur$User_ID)), ]
View(tot_pur_BF)
tot_pur_BF[ ,c('User_ID', 'Product_ID', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3', 'Purchase.y')] <- list(NULL)
colnames(tot_pur_BF)[colnames(tot_pur_BF)=="Purchase.x"] <- "Purchased"
# Occupation and Marital_Status should be factors - the Occupation is a fixed integer from 1 to 20 indicating the occupation of the customer
tot_pur_BF$Occupation <- as.factor(tot_pur_BF$Occupation)
tot_pur_BF$Marital_Status <- as.factor(tot_pur_BF$Marital_Status)
View(tot_pur_BF)
# get training and test data (80/20)
rownum <- sample(1:nrow(tot_pur_BF),size = 0.8*nrow(tot_pur_BF))
train <- tot_pur_BF[rownum,]
test <- tot_pur_BF[-rownum,]
# Baseline model - predict the mean of the training data
base_mean <- mean(train$Purchased)
base_mean
# Evaluate RMSE and MAE on the testing data
RMSE_base <- sqrt(mean((base_mean-test$Purchased)^2))
RMSE_base
MAE_base <- mean(abs(base_mean-test$Purchased))
MAE_base
# linear model
lm_model <- lm(Purchased ~., data = train)
summary(lm_model)
plot(lm_model)
## there are some non linear relations between Purchase amount and other two variables
## The residuals are not completely normally distributed is another concern. We can remove the tails of these outliers which have high variance
## Our prediction may vary as the scale location plot is not horizontal
## Even though there is no point beyond Cook's distance there are some rows which are affecting the
#regression model, So we neede to remove these outliers
lm_pred <- predict(lm_model, test)
lm_model_rmse <- sqrt(mean((lm_pred-test$Purchased)^2))
lm_model_rmse
lm_model_mae <- mean(abs(lm_pred-test$Purchased))
lm_model_mae
# random forest
library(randomForest)
rf_model <- randomForest(Purchased ~., data = train)
plot(rf_model)
rf_pred <- predict(rf_model, test)
rf_model_rmse <- sqrt(mean((rf_pred-test$Purchased)^2))
rf_model_mae <- mean(abs(rf_pred-test$Purchased))
# stepwise regression
base_model <- glm(Purchased ~ 1, data = train)
whole_model <- glm(Purchased ~ ., data = train)
step_model <- step(base_model, scope = list(lower = base_model, upper = whole_model), direction = "forward")
stepwise_purchase_prediction <- predict(step_model)
swr_rmse <- sqrt(mean((stepwise_purchase_prediction-train$Purchased)^2))
swr_mae <- mean(abs(stepwise_purchase_prediction-train$Purchased))
# rpart Decision Tree
library(rpart)
rpart_model <- rpart(Purchased ~., train)
plot(rpart_model)
rpart_pred <- predict(rpart_model, test)
rpart_rmse <- sqrt(mean((rpart_pred-test$Purchased)^2))
rpart_mae <- mean(abs(rpart_pred-test$Purchased))
# gxboost
# SVM
# caret package and running k-fold cross validation
#
# trControl = trainControl(method = "cv", number = 10, verboseIter = TRUE)
#
# fitControl <- trainControl(## 10-fold CV
# method = "cv",
# number = 10,
# verboseIter = TRUE)
#
# set.seed(825)
# gbmFit1 <- train(Purchased ~ ., data = tot_pur_BF,
# method = "lm",
# trControl = fitControl)
# gbmFit1
#
# kfold_model <- train(Purchased ~ ., tot_pur_BF,method = "lm",trControl)
#
# kfold_prediction <- predict(kfold_model, tot_pur_BF)
# kfold_model_rmse <- sqrt(mean((kfold_prediction-tot_pur_BF$Purchased)^2))
# kfold_model_mae <- mean(abs(kfold_prediction-tot_pur_BF$Purchased))
#
# ggplot(test, aes(x = lm_model, y = Purchased)) + geom_point(color = "blue", alpha = 0.7) + geom_abline(color = "red") + ggtitle("Linear Model Prediction vs. Real values")
# ggplot(test, aes(x = rf_prediction, y = Purchased)) + geom_point(color = "blue", alpha = 0.7) + geom_abline(color = "red") + ggtitle("Random Forest Prediction vs. Real values")
# ggplot(train, aes(x = stepwise_purchase_prediction, y = Purchased)) + geom_point(color = "blue", alpha = 0.7) + geom_abline(color = "red") + ggtitle("Stepwise Regression Prediction vs. Real values")
# ggplot(test, aes(x = rpart_prediction, y = Purchased)) + geom_point(color = "blue", alpha = 0.7) + geom_abline(color = "red") + ggtitle("rpart Prediction vs. Real values")
# boxplots
# library(ggplot2)
# ggplot2(aes(y = Purchased, x = Gender), data = tot_pur_BF) + geom_boxplot()
# ggplot2(aes(y = Purchased, x = Age), data = tot_pur_BF) + geom_boxplot()
# ggplot2(aes(y = Purchased, x = Occupation), data = tot_pur_BF) + geom_boxplot()
# ggplot2(aes(y = Purchased, x = City_Category), data = tot_pur_BF) + geom_boxplot()
# ggplot2(aes(y = Purchased, x = Stay_In_Current_City_Years), data = tot_pur_BF) + geom_boxplot()
# ggplot2(aes(y = Purchased, x = Marital_Status), data = tot_pur_BF) + geom_boxplot()
# ggplot2(data = tot_pur_BF, aes(x = "", y = Purchased)) + geom_boxplot()
|
d5f35bd2127f01eee3ff178a6689c34a2c2a446f | a4d25f8c1ed20ca09eb11a819992786cf032f063 | /library/ssc/function/coBC.R | 8ffdd526f212f3bcf7ef4a0eb231dd1a91ce7a98 | [] | no_license | delta0726/r-semi_supervised | 93b5290ad9b3785457cd339a6f28c96e910131f0 | cd33c812bd4ea285871007d97be31fce5de5ac04 | refs/heads/master | 2023-02-26T02:01:36.769807 | 2021-02-04T14:40:19 | 2021-02-04T14:40:19 | 335,382,053 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 404 | r | coBC.R | # *********************************************************************************************
# Title : 共訓練モデルのインターフェース
# Function : coBCG
# Created by: Owner
# Created on: 2021/01/26
# URL : https://www.rdocumentation.org/packages/ssc/versions/2.1-0/topics/coBCG
# *********************************************************************************************
|
b8c6f67eead231b78f6c849df427524b6a07c9da | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/LPM/examples/milano.Rd.R | fab841b8fb7afa8528143a3de7b38bb187e9ed2e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 233 | r | milano.Rd.R | library(LPM)
### Name: milano
### Title: Maximum annual rainfall series for different durations
### Aliases: milano
### Keywords: datasets
### ** Examples
data(milano)
## maybe str(series.rainfall) ; plot(series.rainfall) ...
|
75827581007b94c3bf564b5f4ec83c28026a0b22 | 262b88394d6cfdaafeb40242a1467bd41f9d4fb6 | /2021/descriptive statistics.r | 04b09ec73617267d7b4823d18b7d99e516131e92 | [] | no_license | nabilased/Lecture | a71d8bcd3c34e2108fc1508e2c0c4d2d22e14f2b | f27b737a9b92d21007010955461eea59d17d2968 | refs/heads/master | 2023-04-08T20:18:04.353757 | 2021-04-14T04:05:37 | 2021-04-14T04:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | descriptive statistics.r | data <-read.csv("table2.csv")
head(data)
#Mean
result.mean <-mean(data$wage)
print(result.mean)
#Median
result.median <-median(data$wage)
print(result.median)
#Percentiles
result.quantile<-quantile(data$wage)
print(result.quantile)
#Minimum
result.min <-min(data$wage)
print(result.min)
#Maximum
result.max <-max(data$wage)
print(result.max)
#Variance
result.var <-var(data$wage)
print(result.var)
#Standard deviation
result.sd <-sd(data$wage)
print(result.sd)
#Descriptive statistics: mean, median, 25th and 75th quartiles, min, max
summary(data) |
a135c7ab814ef3bd4bb0bc6fce72069c4bde72b1 | f8ad4483b3f35fd6183f61bdc871ea8b707c71a3 | /motivatedarrows/R/compile.R | 2dd66cadb34ae09c54ecf6f77bd84e01c0a8185b | [] | no_license | lupyanlab/motivated-arrows | b954ab357630357c368a1403a8d556ada3a74aa4 | b55e292cc341395296b2ffcfd98cc42b70313432 | refs/heads/master | 2021-01-10T07:42:47.995877 | 2016-04-03T22:42:04 | 2016-04-03T22:42:04 | 45,709,148 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 407 | r | compile.R |
#' Load the comma-separated data files in a directory.
#'
#' @param data_dir The directory of the data files, passed to `file.path`.
#' @param regex_key Pattern passed to `list.files`. Matches are loaded.
#' @return dplyr::data_frame
#' @export
compile <- function(data_dir, regex_key = "*") {
data_files <- list.files(data_dir, regex_key, full.names = TRUE)
plyr::ldply(data_files, readr::read_csv)
}
|
fabad0984c6db89b490dd06911b21f5c26caff40 | 88ae470c5142eb53aeb0226eaeed5f72f7b76c23 | /man/View.Rd | c4ae918e1e06b6c94591a8b63563f4fd410e1bb3 | [] | no_license | Tim-Holzapfel/enhancedView | 516a19adf122108819c74adb5d8aafc55d05418a | 9fb6852664003b916ad7da101944ce45b89f6ad8 | refs/heads/master | 2023-07-31T21:41:26.527085 | 2021-09-11T23:14:56 | 2021-09-11T23:14:56 | 282,852,271 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 888 | rd | View.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/View.R
\name{View}
\alias{View}
\title{Enhanced Data Viewer}
\usage{
View(
file,
pageLength = getOption("enhancedView.pageLength", default = 200),
default_view = getOption("enhancedView.default_view", default = FALSE),
theme = getOption("enhancedView.theme", default = "flatly")
)
}
\arguments{
\item{file}{File to be viewed}
\item{pageLength}{default page length.}
\item{default_view}{Request the default RStudio data viewer.}
\item{theme}{Shinytheme to use. Possible values are "cerulean", "cosmo", "cyborg", "darkly", "flatly", "journal", "lumen", "paper", "readable", "sandstone", "simplex", "slate", "spacelab", "superhero", "united" or "yeti". The default is "flatly".}
}
\description{
One of the biggest problems of the built-in data viewer is its
limited capability to expand the columns.
}
|
bf0c3f91a96566dc709b662b2b36a6fb0298f589 | 7959c075b8d8fd90c423863d6cc51cb29ea517c5 | /lab_05.R | 031521f22dfe15860ab1a42114497967727becfe | [] | no_license | salientsoph/Rexam | d8373e7bbfc85fc38cc203add6572574b88c7926 | 0d0b9cb7fc378654c886ca70ba56498770e8b4a8 | refs/heads/master | 2022-12-24T06:02:13.720153 | 2020-09-25T14:18:31 | 2020-09-25T14:18:31 | 293,553,326 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,733 | r | lab_05.R | # 문제1
grade <- sample(1:6, 1)
if(grade == 1 | grade == 2 | grade ==3){
cat(grade, "학년은 저학년 입니다")
}else{
cat(grade, "학년은 고학년 입니다")
}
# grade <- sample(1:6, 1)
# if(grade >= 1 & grade <= 3){
# if(grade >= 1 && grade <= 3) 위와 동일
# cat(grade, "학년은 저학년 입니다")
#}else{
# cat(grade, "학년은 고학년 입니다")
#}
# 문제2
choice <- sample(1:5, 1)
x = 300
y = 50
if(choice == 1){
result = x + y
}else if(choice == 2){
result = x - y
}else if(choice == 3){
result = x * y
}else if(choice == 4){
result = x / y
}else{
result = x %% y
}
print(paste("결과값:", result))
cat("결과값:", result, "\n")
# result <- switch(EXPR=choice, 300+50, 300-50, 300*50, 300/50, 300%%50)
# result <- switch(EXPR=as.character(choice),
# "1"=300+50, "2"=300-50, "3"=300*50, "4"=300/50, "5"=300%%50)
# 문제3
# r에선 문자열 연산 불가
count <- sample(3:10,1); count
deco <- sample(1:3,1); deco
if(deco==1){
deco <- "*"
}else if(deco==2){
deco <- "$"
}else{
deco <- "#"
}
for(data in (1:count))
cat(deco)
# for(num in 1:count){
#if(deco==1){
# cat("*", sep="")
#}else if(deco==2){
# cat("$", sep="")
#}else{
# cat("#", sep="")
#}}
# 문제4
score <- sample(0:100, 1)
if(score>=90){
level <- 1
}else if(score>=80){ # 이미 위에서 90이상이 아니라서 내려온거라서, 90이하인거 안써도 됨
level <- 2
}else if(score>=70){
level <- 3
}else if(score>=60){
level <- 4
}else{
level <- 5
}
cat(score, "점은",
switch(EXPR = level,"A","B","C","D","f"), "등급입니다", "\n")
# 문제5
for(i in (1:26)){
alpha <- c(LETTERS[i], letters[i], sep="")
cat('"', alpha, '" ', sep="")}
|
7870e96270bb89e1789d234431971fc9a637bdd4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/renpow/examples/data.test100.Rd.R | 1cb8725d912c15f5a3c1bfc61887d1199b266b37 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 174 | r | data.test100.Rd.R | library(renpow)
### Name: test100
### Title: Dataset: simple example for 100 data points
### Aliases: test100
### Keywords: datasets
### ** Examples
x100 <- test100$x
|
fb58c53d41fd2ecd148dfa87c38b3eb9ca7bbf46 | b05ff0cb36e1be4f7808b956a0743acc9e0a5d93 | /R/presence_absence_boxplots.R | 73f806d691bca0e427c155a2e1bd061039c1cf4a | [
"CC0-1.0"
] | permissive | dongmeic/climate-space | b649a7a8e6b8d76048418c6d37f0b1dd50512be7 | 7e800974e92533d3818967b6281bc7f0e10c3264 | refs/heads/master | 2021-01-20T02:13:12.143683 | 2020-04-03T16:47:56 | 2020-04-03T16:47:56 | 89,385,878 | 0 | 0 | null | 2020-04-03T16:47:57 | 2017-04-25T17:01:45 | Jupyter Notebook | UTF-8 | R | false | false | 2,132 | r | presence_absence_boxplots.R | # boxplot for host and beetle presence and absence
library(ggplot2)
library(grid)
source("/gpfs/projects/gavingrp/dongmeic/climate-space/R/combine_CRU_Daymet.R")
out <- "/gpfs/projects/gavingrp/dongmeic/beetle/output/plots/"
setwd(out)
indata <- get_data()
indata$hosts <- ifelse(indata$beetles==1 & indata$hosts==0, 1, indata$hosts)
vargrp <- c("OctTmin", "JanTmin", "MarTmin", "Tmin", "OctMin", "JanMin", "MarMin",
"winterMin", "minT", "Acs", "drop5", "max.drop", "maxAugT", "AugMaxT", "AugTmax", "maxT",
"TMarAug", "OptTsum", "summerTmean", "AugTmean", "fallTmean", "TOctSep", "Tmean", "ddAugJul",
"ddAugJun", "Tvar", "PMarAug", "summerP0", "summerP1", "summerP2", "POctSep",
"PcumOctSep", "Pmean", "PPT", "cv.gsp", "mi", "pt.coef", "vpd", "cwd", "wd")
presence.boxplot <- function(i, legend=F){
df <- indata[,c(vargrp[i],"prs")]
colnames(df)[1] <- "var"
#df2 <- df[sample(nrow(df), 500),]
p <- ggplot(df, aes(as.character(prs), var))
p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
p <- p + geom_boxplot(outlier.size=0.1, aes(color=factor(prs, labels=c("Host-abs", "MPB-abs", "MPB")))) + coord_flip()
p <- p + scale_colour_manual(name="",values = c("darkgrey", "#1b9e77", "#d95f02"))
if(legend){
p <- p + theme(legend.position=c(0.8,0.8),legend.text=element_text(size=11)) +
guides(colour = guide_legend(override.aes = list(size=0.8)))
}else{
p <- p + theme(legend.position="none")
}
p <- p + labs(x="", y=vargrp[i])
return(p)
}
n1 <- rep(c(1:5),8); n2 <- c(rep(1,5),rep(2,5),rep(3,5),rep(4,5),rep(5,5),rep(6,5),rep(7,5),rep(8,5))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
png("cs_boxplot.png", width=15, height=24, units="in", res=300)
grid.newpage()
par(mar=c(2,2,4,2))
pushViewport(viewport(layout = grid.layout(8, 5)))
for(i in 1:length(vargrp)){
if(i==40){
p <- presence.boxplot(i,legend=T)
}else{
p <- presence.boxplot(i)
}
print(p, vp = vplayout(n2[i], n1[i]))
}
dev.off()
print("all done!") |
d790173bfa2179ff34e6dce4b7169d9144e76eda | 6e9698f08f1a67f3c18937df62885c083025f71a | /run_analysis.R | 6e8e249f70fc41093e982037dd5c113eedb6a92d | [] | no_license | chdean/getting-cleaning-data-project | 5420d3e9281aff387b9b9477c3d93dc2276de857 | ae8abce6b3c63bc68115395409f762fdd6d31e05 | refs/heads/master | 2021-01-10T13:32:40.112987 | 2016-04-10T22:50:42 | 2016-04-10T22:50:42 | 55,926,362 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,206 | r | run_analysis.R | ## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard
## deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the
## data set
## 4. Appropriately labels the data set with descriptive variable
## names.
## 5. From the data set in step 4, creates a second, independent tidy
## data set with the average of each variable for each activity and
## each subject.
library(plyr)
library(dplyr)
root <- getwd()
dataDir <- "UCI HAR Dataset"
downloadData <- function() {
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile <- "data.zip"
if (!file.exists(zipFile)) {
download.file(url, zipFile, method = "curl")
}
if (!file.exists(dataDir)) {
unzip(zipFile)
}
}
## prepare the environment
downloadData()
setwd(dataDir)
## column names for x
features <- read.table("features.txt", sep=" ")[,2]
## activity labels for y
activityLabels <- read.table("activity_labels.txt")[,2]
readData <- function(set) {
setwd(set)
## load test subjects
subjects <- read.table(paste0("subject_", set, ".txt"))
subjects <- rename(subjects, subject = V1)
## load sensor data
x <- read.table(paste0("X_", set, ".txt"))
colnames(x) <- features
## select only mean and standard deviations from sensor data
featureCols <- grep("mean|std", names(x))
x <- x[, featureCols]
## load activity data, convert ID to human readable labels
y <- read.table(paste0("y_", set, ".txt"))
y <- rename(y, activity = V1)
y$activity <- activityLabels[y$activity]
## bind data
data <- cbind(subjects, y, x)
setwd("..")
return(data)
}
## combine test and training sets
mergedData <- rbind(readData("test"),
readData("train"))
## return to main directory
setwd(root)
## create tidy dataset
dataMeans <- function(data) { colMeans(data[,-c(1,2)]) }
tidy <- ddply(mergedData, .(subject, activity), dataMeans)
names(tidy)[-c(1,2)] <- paste0("mean", names(tidy)[-c(1,2)])
## write file
write.table(tidy, "tidyData.txt", row.names = FALSE)
|
2d47f29b7b2ccb2ad4c233dfcf1f42bdfcc9ce27 | 02a9b547caaf419f5163bd6e4421905f28080b79 | /applyfunctions.R | d371cf2bddc311e8d873224e82de259cd59812d4 | [] | no_license | nunsuch123/RProgramming | b05baeee5059f591bbb0431975f53416698fc991 | bff4e74220b6fc086c231658d7a65a82256e49ff | refs/heads/master | 2020-05-16T01:37:16.286807 | 2019-05-10T17:01:29 | 2019-05-10T17:01:29 | 29,223,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 276 | r | applyfunctions.R | s <- split(iris, iris$Species)
lapply(s, function(x) colMeans(x[, c("Sepal.Length","Sepal.Width")]))
s <- split(airquality, airquality$Month)
sapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")]))
apply(iris[1:4], 2, mean)
with(mtcars, tapply(mpg, cyl, mean)) |
016a39a1dcc226488deaccf712eac9db1dd9f161 | 396df2552224ffcb0294fe6e297b231aa2e59e68 | /_working/0143.wvs.R | e6a6c368887bc2004299be1059e021b343075ee9 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | ellisp/blog-source | d072bed980a5074d6c7fac03be3635f70ab5f098 | 1227f83df23af06da5280214ac7f2e0182be5707 | refs/heads/master | 2023-09-05T07:04:53.114901 | 2023-08-27T21:27:55 | 2023-08-27T21:27:55 | 122,695,494 | 17 | 8 | null | 2023-08-27T21:15:33 | 2018-02-24T02:36:45 | HTML | UTF-8 | R | false | false | 12,206 | r | 0143.wvs.R | library(tidyverse)
library(frs)
library(data.table)
library(odbc)
library(viridis)
library(ggthemes)
library(scales)
# correct citation must be used WV6_Data_R_v20180912; .
# http://www.worldvaluessurvey.org/WVSDocumentationWV6.jsp
wvs <- readRDS("../data/F00007762-WV6_Data_R_v20180912.Rds")
# label code from David Hood @Thoughtfulnz https://gist.github.com/thoughtfulbloke/8d6e016a74039c030ba3871dca36c19c
unlabel <- function(x){
codes <- attr(x, "labels")
if(is.null(codes)) {
print(paste("No labels with",attr(x, "label")))
return(x)
}
df <- data.frame(y = as.numeric(x), stringsAsFactors = FALSE)
replacement <- data.frame(
y = as.numeric(codes),
newform = names(codes),
stringsAsFactors = FALSE
)
df2 <- df %>% left_join(replacement, by="y") %>%
mutate(newform = ifelse(is.na(newform), as.character(x), newform))
if(length(x) == length(df2$newform) &
sum(is.na(x)) == sum(is.na(df2$newform))){
return(df2$newform)
} else {
print(paste("Problem with",attr(x, "label")))
return(x)
}
}
wv <- wvs %>%
mutate_all(unlabel)
labs <- wvs %>%
map_chr(~attributes(.)$label) %>%
make.names(unique = TRUE)
names(wv) <- labs
# End of David Hood's tidying code
#------------turn into a star schema--------------------
wv$respondent_id <- 1:nrow(wv)
respondent_vars <- c("Wave", "Country.Code", "Country.regions..with.split.ups.",
"Interview.number", "Study", "Unified.respondent.number",
"X1000.equilibrated.weight", "X1500.equilibrated.weight",
"Country...wave...study...set...year", "Nation.Wave", "Nation.Year",
"COW.Country.Code", "Weight.for.overal.secular.values",
"Weight.for.Emancipative.values", "Weight", "Weight.with.split.ups",
"Questionnaire.version", "Date.of.interview", "Survey.year", "respondent_id")
#--------long thin version that will be the basis of the main fact table
wvt <- wv[ , c(names(wv)[!names(wv) %in% respondent_vars] , "respondent_id")] %>%
gather(question, response, -respondent_id) %>%
mutate(question = as.factor(question),
response = as.factor(response),
question_id = as.integer(question),
response_id = as.integer(response)) %>%
# one mystery NA to Highest.educational.level.attained
filter(!is.na(response))
# note that this version is 38 million rows long
d_questions <- wvt %>%
distinct(question_id, question) %>%
mutate(question = gsub("..", ": ", question, fixed = TRUE),
question = gsub(".", " ", question, fixed = TRUE),
question = str_squish(question),
question = gsub("country s ", "country's ", question, fixed = TRUE),
question = gsub("Mother s ", "Mother's ", question, fixed = TRUE),
question = gsub("Father s ", "Father's ", question, fixed = TRUE)) %>%
separate(question, into = c("super_broad", "very_broad", "broad_question", "mid_question", "narrow_question"),
fill = "left", remove = FALSE, sep = ":") %>%
mutate(narrow_question = str_squish(narrow_question),
mid_question = str_squish(mid_question),
broad_question = str_squish(broad_question))
d_responses <- wvt %>%
distinct(response_id, response) %>%
# cleaning
mutate(response = str_squish(gsub("Dont ", "Don't ", response))) %>%
# creating alternative versions of some of the responses:
mutate(response_no_commas = gsub(",", "", response, fixed = TRUE),
response_no_spaces = gsub(" ", "", response_no_commas, fixed = TRUE),
response_lower_case = tolower(response),
response_numeric = suppressWarnings(as.numeric(response_no_spaces)),
response_any_numerals = ifelse(is.na(response_numeric),
str_extract(response_no_spaces, "[0-9]+"),
response_numeric),
response_class = ifelse(is.na(response_numeric), "character", "numeric"),
agrees = as.numeric(response_lower_case %in%
c("strongly agree", "agree", "completely agree", "agree strongly")),
important = as.numeric(response_lower_case %in%
c("very important", "important", "absolutely important", "rather important")),
trust = as.numeric(response_lower_case %in%
c("trust somewhat", "trust completely", "most people can be trusted")),
often = as.numeric(response_lower_case %in%
c("fairly often", "very often", "often", "quite frequently", "very frequently", "frequently")),
like_me = as.numeric(response_lower_case %in%
c("very much like me", "like me", "somewhat like me", "a little like")),
interested = as.numeric(response_lower_case %in%
c("somewhat interested", "very interested", "respondent was somewhat interested",
"respondent was very interested")),
satisfied = as.numeric(response_lower_case %in%
c("completely satisfied", "fairly satisfied", "very satisfied", "strongly satisfied")),
happy = as.numeric(response_lower_case %in%
c("rather happy", "very happy")),
respect = as.numeric(response_lower_case %in%
c("fairly much respect")),
justifiable = as.numeric(response_lower_case %in%
c("Always justifiable")),
invalid = as.numeric(response_lower_case %in%
c("no answer") |
grepl("not asked", response_lower_case) |
grepl("don't know", response_lower_case) |
grepl("unsure", response_lower_case) |
grepl("unknown", response_lower_case) |
grepl("inapplicable", response_lower_case) |
grepl("dropped out survey", response_lower_case) |
grepl("inappropriate response", response_lower_case) |
grepl("missing", response_lower_case) |
grepl("not applicable", response_lower_case))
) %>%
select(response_id, everything())
d_responses %>%
filter(grepl("inappropriate", response_lower_case)) %>%
select(response, important, agrees, satisfied, respect, invalid)
d_respondents <- wv[ , respondent_vars] %>%
rename_all(tolower) %>%
rename_all(function(x){gsub("\\.+", "_", x)}) %>%
select(respondent_id, everything()) %>%
mutate(weight = ifelse(weight == "No weighting", 1, as.numeric(weight)))
f_wvs <- wvt %>%
select(respondent_id, question_id, response_id)
#-----------upload to database--------------
fwrite(d_respondents[ , c("respondent_id", "wave", "country_code", "weight", "survey_year")],
"d_respondents.txt", sep = "|", col.names = FALSE)
bcp("localhost", "survey_microdata", "wvs", table = "d_respondents", file = "d_respondents.txt")
fwrite(d_questions[ , c("question_id", "question")],
"d_questions.txt", sep = "|", col.names = FALSE)
bcp("localhost", "survey_microdata", "wvs", table = "d_questions", file = "d_questions.txt")
fwrite(d_responses, "d_responses.txt", sep = "|", col.names = FALSE)
bcp("localhost", "survey_microdata", "wvs", table = "d_responses", file = "d_responses.txt")
# this is astonishly fast - less than a second to write 38 million rows to disk
fwrite(f_wvs, "f_wvs.txt", sep = "|", col.names = FALSE)
# this is more like a few minute. As well as writing to the database server disk,
# it's checking for uniqueness of primary keys and the foreign key constraints.
bcp("localhost", "survey_microdata", "wvs", table = "f_wvs", file = "f_wvs.txt")
#-----------------analysis--------------
# 40 most commonly used responses:
f_wvs %>%
group_by(response_id) %>%
summarise(freq = n()) %>%
arrange(desc(freq)) %>%
slice(1:4000) %>%
inner_join(d_responses, by = "response_id") %>% View
mutate(response = fct_reorder(response, freq)) %>%
ggplot(aes(x = freq, y = response)) +
geom_point()
# uses of integers from 0 to 30:
f_wvs %>%
group_by(response_id) %>%
summarise(freq = n()) %>%
arrange(desc(freq)) %>%
inner_join(filter(d_responses, response_numeric %in% 0:30), by = "response_id") %>%
mutate(response = fct_reorder(response, response_numeric)) %>%
filter(freq > 0) %>%
ggplot(aes(x = freq, y = response)) +
geom_point()
# number of agreeing responses. This of course mostly reflects the number of times a question
# was asked and the valid responses, not the actual amount of agreement
d_responses %>%
filter(agrees == 1) %>%
inner_join(f_wvs, by = "response_id") %>%
inner_join(d_questions, by = "question_id") %>%
group_by(question) %>%
summarise(freq = n()) %>%
ungroup() %>%
mutate(question = fct_reorder(question, freq)) %>%
ggplot(aes(x = freq, y = question)) +
geom_point()
# more useful lets look at the amount of agreeing going on
agree_questions <- d_responses %>%
filter(agrees == 1) %>%
inner_join(f_wvs, by = "response_id") %>%
inner_join(d_questions, by = "question_id") %>%
distinct(question, question_id)
# unweighted responses here
f_wvs %>%
inner_join(agree_questions, by = "question_id") %>%
inner_join(d_responses, by = "response_id") %>%
group_by(question) %>%
summarise(agrees = mean(agrees)) %>%
mutate(question = fct_reorder(question, agrees)) %>%
ggplot(aes(x = agrees, y = question)) +
geom_point()
# To weight them we need to do an inner join of my 5 million facts (after filtering) with my 90,000 row respondent dimension table
# and this is too much for my computer
sql <- "
WITH agree_questions AS
(SELECT DISTINCT question, c.question_id
FROM wvs.d_responses AS a
INNER JOIN wvs.f_wvs AS b
ON a.response_id = b.response_id
INNER JOIN wvs.d_questions AS c
ON b.question_id = c.question_id
WHERE agrees = 1)
SELECT
sum(g.weight * f.agrees) / sum(g.weight) AS weighted_agree,
question,
country_code
FROM wvs.f_wvs AS d
INNER JOIN agree_questions AS e
ON d.question_id = e.question_id
INNER JOIN wvs.d_responses AS f
ON d.response_id = f.response_id
INNER JOIN wvs.d_respondents AS g
ON d.respondent_id = g.respondent_id
WHERE f.invalid != 1
GROUP BY question, country_code
ORDER by weighted_agree DESC"
con <- dbConnect(odbc(), "sqlserver", database = "survey_microdata")
d <- dbGetQuery(con, sql)
d2 <- d %>%
as_tibble() %>%
mutate(question = fct_reorder(question, weighted_agree)) %>%
mutate(country_code = fct_reorder(country_code, weighted_agree))
yup <- length(unique(d2$question))
p <- d2 %>%
ggplot(aes(x = country_code, y = question)) +
theme_tufte(base_family = "main_font") +
geom_tile(aes(fill = weighted_agree)) +
scale_fill_viridis("Weighted percentage of valid answers that agreed or strongly agreed:", label = percent) +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 8),
legend.position = "bottom",
axis.text.y = element_text(size = 7)) +
labs(x = "The countries that 'agree' with more statements are on the right.",
y = "The statements most agreed with (when asked) are at the top.\n",
caption = "Source: http://www.worldvaluessurvey.org/WVSDocumentationWV6.jsp, WV6_Data_R_v20180912. Analysis by http://freerangestats.info") +
# draw the countries along the top as a sort of repeat of the x axis:
geom_text(data = distinct(d2, country_code), aes(label = country_code),
y = yup + 1, angle = 45, hjust = 0, size = 2.7) +
expand_limits(y = yup + 9, x = length(unique(d2$country_code)) + 3) +
ggtitle("What do people of the world agree with?",
"All questions in the World Values survey with an agree/disagree style of response.")
CairoSVG("../img/0143-heatmap.svg", 18, 9.5)
print(p)
dev.off()
convert_pngs("0143")
|
f7360825f8f78e9dfe95afb71e66876b15c7bc50 | 0e51a3076da4e856590c7bde1d3604b519ee1f99 | /cachematrix.R | f2f8e8c327ceb5da6371ea46fd9781e746d987cc | [] | no_license | Brennui/ProgrammingAssignment2 | 31bc3ca414222f544fe630b83d6c11f6e0190d06 | 7abb5cc0979b422eefb3f57bbbff27586c6ec64f | refs/heads/master | 2020-12-26T00:51:35.229574 | 2015-11-23T01:03:12 | 2015-11-23T01:03:12 | 46,646,988 | 0 | 0 | null | 2015-11-22T04:55:52 | 2015-11-22T04:55:51 | null | UTF-8 | R | false | false | 1,665 | r | cachematrix.R | ## A matrix may be input into the makeCacheMatrix function,
## Within it are functions that are used to manipulate the matrix
## cacheSolve takes in the makeCacheMatrix (with a inputted matrix)
## It solves for the inverse of the matrix
## makeCacheMatrix takes a matrix as an input. The functions
## within it can load the matrix to a variable, set the matrix,
## load the inverse of the function to a variable, or set the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) { ## assigns a new matrix to the makeCacheMatrix function
x <<- y ## <<- is needed to assign x in the overall makecacheMatrix function rather than just the set function
inv <<- NULL
}
get <- function() x ## Retrieves the matrix
setinv <- function(invmat) inv <<- invmat ##assigns a new inverse matrix value to cache
getinv <- function() inv ##retrieves the inverse matrix from cache
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function takes a matrix within makecacheMatrix as and input and returns the inverse of the matrix to cache
cacheSolve <- function(x, ...) {
inv <- x$getinv() ## retrieves the inverse matrix if it exists
if (!is.null(inv)){ ## triggers if the inverse matrix exists within the cache
print("data in cache, retrieving")
return (inv)
}
z <- x$get() ##assigns the matrix to variable
inv <- solve(z, ...) ## This function is what is actually inverting the matrix
x$setinv(inv) ## storing the inverse matrix in cache
inv
## Return a matrix that is the inverse of 'x'
}
|
0f1f6fbd1bcc0e0d334b55d8664dad0f478147ce | d44475655dbe61df07aca3c6417d53b88f84ee52 | /s1~s10/s_test6.R | a4b18bcb9f1c3ffbb4b6f692f8a09feb20650cba | [] | no_license | bjh1646/R_data | ad564701e651e3e88a0bd91621dbb39ecc283efc | dd46c37cdd8070a4ddab1c7f376fde010b170d68 | refs/heads/master | 2023-08-21T16:05:22.773594 | 2021-08-29T13:08:46 | 2021-08-29T13:08:46 | 392,208,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | s_test6.R | library(shiny)
ui <- fluidPage(
numericInput("obs", "Observation:", 10, min=1, max=100),
verbatimTextOutput("value")
)
server <- function(input, output, session) {
output$value = renderText({input$obs})
}
shinyApp(ui, server)
|
ef6e70de80a5d7854c0ca47b63203d4b13bb30bf | 1aced36f0193b1cd8f5cc6b180d40b60d08ce994 | /inst/network.scripts/insurance.R | 93e5b6fbb35b7362f098db74638a92d12df49a47 | [] | no_license | gasse/bnlearn-clone-3.1 | 28ffd5b50ca97ec7fe54fa6037bc4a7757a81315 | 2bd270be53eafb08e13bdadce448db27442e2978 | refs/heads/master | 2020-12-24T18:03:29.368563 | 2013-11-14T16:34:04 | 2013-11-14T16:34:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 67,549 | r | insurance.R |
n = 20000
AGE = c("Adolescent", "Adult", "Senior")
SEC = c("Prole", "Middle", "UpperMiddle", "Wealthy")
BOOL = c("True", "False")
RISK = c("Psychopath", "Adventurous", "Normal", "Cautious")
HOME = c("Secure", "City", "Suburb", "Rural")
MIL = c("FiveThou", "TwentyThou", "FiftyThou", "Domino")
YEAR = c("Current", "Older")
MODEL = c("SportsCar", "Economy", "FamilySedan", "Luxury", "SuperLuxury")
SKILL = c("SubStandard", "Normal", "Expert")
VALUE = c("FiveThou", "TenThou", "TwentyThou", "FiftyThou", "Million")
RUGGED = c("EggShell", "Football", "Tank")
SKILL2 = c("Poor", "Normal", "Excellent")
HIST = c("Zero", "One", "Many")
ACC = c("None", "Mild", "Moderate", "Severe")
SKILL3 = c("Poor", "Fair", "Good", "Excellent")
VALUE2 = c("Thousand", "TenThou", "HundredThou", "Million")
Age = sample(AGE, n, prob = c(0.2, 0.6, 0.2), replace = TRUE)
Mileage = sample(MIL, n, prob = c(0.1, 0.4, 0.4, 0.1), replace = TRUE)
SocioEcon = Age
SocioEcon[SocioEcon == "Adolescent"] = sample(SEC, length(which(SocioEcon == "Adolescent")), prob = c(0.4, 0.4, 0.19, 0.01), replace = TRUE)
SocioEcon[SocioEcon == "Adult"] = sample(SEC, length(which(SocioEcon == "Adult")), prob = c(0.4, 0.4, 0.19, 0.01), replace = TRUE)
SocioEcon[SocioEcon == "Senior"] = sample(SEC, length(which(SocioEcon == "Senior")), prob = c(0.5, 0.2, 0.29, 0.01), replace = TRUE)
OtherCar = SocioEcon
OtherCar[OtherCar == "Prole"] = sample(BOOL, length(which(OtherCar == "Prole")), prob = c(0.5, 0.5), replace = TRUE)
OtherCar[OtherCar == "Middle"] = sample(BOOL, length(which(OtherCar == "Middle")), prob = c(0.8, 0.2), replace = TRUE)
OtherCar[OtherCar == "UpperMiddle"] = sample(BOOL, length(which(OtherCar == "UpperMiddle")), prob = c(0.9, 0.1), replace = TRUE)
OtherCar[OtherCar == "Wealthy"] = sample(BOOL, length(which(OtherCar == "Wealthy")), prob = c(0.95, 0.05), replace = TRUE)
GoodStudent = apply(cbind(SocioEcon, Age), 1, paste, collapse = ":")
GoodStudent[GoodStudent == "Prole:Adolescent"] = sample(BOOL, length(which(GoodStudent == "Prole:Adolescent")), prob = c(0.1, 0.9), replace = TRUE)
GoodStudent[GoodStudent == "Prole:Adult"] = sample(BOOL, length(which(GoodStudent == "Prole:Adult")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "Prole:Senior"] = sample(BOOL, length(which(GoodStudent == "Prole:Senior")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "Middle:Adolescent"] = sample(BOOL, length(which(GoodStudent == "Middle:Adolescent")), prob = c(0.2, 0.8), replace = TRUE)
GoodStudent[GoodStudent == "Middle:Adult"] = sample(BOOL, length(which(GoodStudent == "Middle:Adult")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "Middle:Senior"] = sample(BOOL, length(which(GoodStudent == "Middle:Senior")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "UpperMiddle:Adolescent"] = sample(BOOL, length(which(GoodStudent == "UpperMiddle:Adolescent")), prob = c(0.5, 0.5), replace = TRUE)
GoodStudent[GoodStudent == "UpperMiddle:Adult"] = sample(BOOL, length(which(GoodStudent == "UpperMiddle:Adult")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "UpperMiddle:Senior"] = sample(BOOL, length(which(GoodStudent == "UpperMiddle:Senior")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "Wealthy:Adolescent"] = sample(BOOL, length(which(GoodStudent == "Wealthy:Adolescent")), prob = c(0.4, 0.6), replace = TRUE)
GoodStudent[GoodStudent == "Wealthy:Adult"] = sample(BOOL, length(which(GoodStudent == "Wealthy:Adult")), prob = c(0, 1), replace = TRUE)
GoodStudent[GoodStudent == "Wealthy:Senior"] = sample(BOOL, length(which(GoodStudent == "Wealthy:Senior")), prob = c(0, 1), replace = TRUE)
RiskAversion = apply(cbind(Age, SocioEcon), 1, paste, collapse = ":")
RiskAversion[RiskAversion == "Adolescent:Prole"] = sample(RISK, length(which(RiskAversion == "Adolescent:Prole")), prob = c(0.02, 0.58, 0.3, 0.1), replace = TRUE)
RiskAversion[RiskAversion == "Adolescent:Middle"] = sample(RISK, length(which(RiskAversion == "Adolescent:Middle")), prob = c(0.02, 0.38, 0.5, 0.1), replace = TRUE)
RiskAversion[RiskAversion == "Adolescent:UpperMiddle"] = sample(RISK, length(which(RiskAversion == "Adolescent:UpperMiddle")), prob = c(0.02, 0.48, 0.4, 0.1), replace = TRUE)
RiskAversion[RiskAversion == "Adolescent:Wealthy"] = sample(RISK, length(which(RiskAversion == "Adolescent:Wealthy")), prob = c(0.02, 0.58, 0.3, 0.1), replace = TRUE)
RiskAversion[RiskAversion == "Adult:Prole"] = sample(RISK, length(which(RiskAversion == "Adult:Prole")), prob = c(0.015, 0.285, 0.5, 0.2), replace = TRUE)
RiskAversion[RiskAversion == "Adult:Middle"] = sample(RISK, length(which(RiskAversion == "Adult:Middle")), prob = c(0.015, 0.185, 0.6, 0.2), replace = TRUE)
RiskAversion[RiskAversion == "Adult:UpperMiddle"] = sample(RISK, length(which(RiskAversion == "Adult:UpperMiddle")), prob = c(0.015, 0.285, 0.5, 0.2), replace = TRUE)
RiskAversion[RiskAversion == "Adult:Wealthy"] = sample(RISK, length(which(RiskAversion == "Adult:Wealthy")), prob = c(0.015, 0.285, 0.4, 0.3), replace = TRUE)
RiskAversion[RiskAversion == "Senior:Prole"] = sample(RISK, length(which(RiskAversion == "Senior:Prole")), prob = c(0.01, 0.09, 0.4, 0.5), replace = TRUE)
RiskAversion[RiskAversion == "Senior:Middle"] = sample(RISK, length(which(RiskAversion == "Senior:Middle")), prob = c(0.01, 0.04, 0.35, 0.6), replace = TRUE)
RiskAversion[RiskAversion == "Senior:UpperMiddle"] = sample(RISK, length(which(RiskAversion == "Senior:UpperMiddle")), prob = c(0.01, 0.09, 0.4, 0.5), replace = TRUE)
RiskAversion[RiskAversion == "Senior:Wealthy"] = sample(RISK, length(which(RiskAversion == "Senior:Wealthy")), prob = c(0.01, 0.09, 0.4, 0.5), replace = TRUE)
AntiTheft = apply(cbind(RiskAversion, SocioEcon), 1, paste, collapse = ":")
AntiTheft[AntiTheft == "Psychopath:Prole"] = sample(BOOL, length(which(AntiTheft == "Psychopath:Prole")), prob = c(0.000001, 0.999999), replace = TRUE)
AntiTheft[AntiTheft == "Psychopath:Middle"] = sample(BOOL, length(which(AntiTheft == "Psychopath:Middle")), prob = c(0.000001, 0.999999), replace = TRUE)
AntiTheft[AntiTheft == "Psychopath:UpperMiddle"] = sample(BOOL, length(which(AntiTheft == "Psychopath:UpperMiddle")), prob = c(0.05, 0.95), replace = TRUE)
AntiTheft[AntiTheft == "Psychopath:Wealthy"] = sample(BOOL, length(which(AntiTheft == "Psychopath:Wealthy")), prob = c(0.5, 0.5), replace = TRUE)
AntiTheft[AntiTheft == "Adventurous:Prole"] = sample(BOOL, length(which(AntiTheft == "Adventurous:Prole")), prob = c(0.000001, 0.999999), replace = TRUE)
AntiTheft[AntiTheft == "Adventurous:Middle"] = sample(BOOL, length(which(AntiTheft == "Adventurous:Middle")), prob = c(0.000001, 0.999999), replace = TRUE)
AntiTheft[AntiTheft == "Adventurous:UpperMiddle"] = sample(BOOL, length(which(AntiTheft == "Adventurous:UpperMiddle")), prob = c(0.2, 0.8), replace = TRUE)
AntiTheft[AntiTheft == "Adventurous:Wealthy"] = sample(BOOL, length(which(AntiTheft == "Adventurous:Wealthy")), prob = c(0.5, 0.5), replace = TRUE)
AntiTheft[AntiTheft == "Normal:Prole"] = sample(BOOL, length(which(AntiTheft == "Normal:Prole")), prob = c(0.1, 0.9), replace = TRUE)
AntiTheft[AntiTheft == "Normal:Middle"] = sample(BOOL, length(which(AntiTheft == "Normal:Middle")), prob = c(0.3, 0.7), replace = TRUE)
AntiTheft[AntiTheft == "Normal:UpperMiddle"] = sample(BOOL, length(which(AntiTheft == "Normal:UpperMiddle")), prob = c(0.9, 0.1), replace = TRUE)
AntiTheft[AntiTheft == "Normal:Wealthy"] = sample(BOOL, length(which(AntiTheft == "Normal:Wealthy")), prob = c(0.8, 0.2), replace = TRUE)
AntiTheft[AntiTheft == "Cautious:Prole"] = sample(BOOL, length(which(AntiTheft == "Cautious:Prole")), prob = c(0.95, 0.05), replace = TRUE)
AntiTheft[AntiTheft == "Cautious:Middle"] = sample(BOOL, length(which(AntiTheft == "Cautious:Middle")), prob = c(0.999999, 0.000001), replace = TRUE)
AntiTheft[AntiTheft == "Cautious:UpperMiddle"] = sample(BOOL, length(which(AntiTheft == "Cautious:UpperMiddle")), prob = c(0.999999, 0.000001), replace = TRUE)
AntiTheft[AntiTheft == "Cautious:Wealthy"] = sample(BOOL, length(which(AntiTheft == "Cautious:Wealthy")), prob = c(0.999999, 0.000001), replace = TRUE)
HomeBase = apply(cbind(RiskAversion, SocioEcon), 1, paste, collapse = ":")
HomeBase[HomeBase == "Psychopath:Prole"] = sample(HOME, length(which(HomeBase == "Psychopath:Prole")), prob = c(0.000001, 0.8, 0.049999, 0.15), replace = TRUE)
HomeBase[HomeBase == "Psychopath:Middle"] = sample(HOME, length(which(HomeBase == "Psychopath:Middle")), prob = c(0.15, 0.8, 0.04, 0.01), replace = TRUE)
HomeBase[HomeBase == "Psychopath:UpperMiddle"] = sample(HOME, length(which(HomeBase == "Psychopath:UpperMiddle")), prob = c(0.35, 0.6, 0.04, 0.01), replace = TRUE)
HomeBase[HomeBase == "Psychopath:Wealthy"] = sample(HOME, length(which(HomeBase == "Psychopath:Wealthy")), prob = c(0.489999, 0.5, 0.000001, 0.01), replace = TRUE)
HomeBase[HomeBase == "Adventurous:Prole"] = sample(HOME, length(which(HomeBase == "Adventurous:Prole")), prob = c(0.000001, 0.8, 0.05, 0.149999), replace = TRUE)
HomeBase[HomeBase == "Adventurous:Middle"] = sample(HOME, length(which(HomeBase == "Adventurous:Middle")), prob = c(0.01, 0.25, 0.6, 0.14), replace = TRUE)
HomeBase[HomeBase == "Adventurous:UpperMiddle"] = sample(HOME, length(which(HomeBase == "Adventurous:UpperMiddle")), prob = c(0.2, 0.4, 0.3, 0.1), replace = TRUE)
HomeBase[HomeBase == "Adventurous:Wealthy"] = sample(HOME, length(which(HomeBase == "Adventurous:Wealthy")), prob = c(0.95, 0.000001, 0.000001, 0.049998), replace = TRUE)
HomeBase[HomeBase == "Normal:Prole"] = sample(HOME, length(which(HomeBase == "Normal:Prole")), prob = c(0.000001, 0.8, 0.05, 0.149999), replace = TRUE)
HomeBase[HomeBase == "Normal:Middle"] = sample(HOME, length(which(HomeBase == "Normal:Middle")), prob = c(0.299999, 0.000001, 0.6, 0.1), replace = TRUE)
HomeBase[HomeBase == "Normal:UpperMiddle"] = sample(HOME, length(which(HomeBase == "Normal:UpperMiddle")), prob = c(0.5, 0.000001, 0.4, 0.099999), replace = TRUE)
HomeBase[HomeBase == "Normal:Wealthy"] = sample(HOME, length(which(HomeBase == "Normal:Wealthy")), prob = c(0.85, 0.000001, 0.001, 0.148999), replace = TRUE)
HomeBase[HomeBase == "Cautious:Prole"] = sample(HOME, length(which(HomeBase == "Cautious:Prole")), prob = c(0.000001, 0.8, 0.05, 0.149999), replace = TRUE)
HomeBase[HomeBase == "Cautious:Middle"] = sample(HOME, length(which(HomeBase == "Cautious:Middle")), prob = c(0.95, 0.000001, 0.024445, 0.025554), replace = TRUE)
HomeBase[HomeBase == "Cautious:UpperMiddle"] = sample(HOME, length(which(HomeBase == "Cautious:UpperMiddle")), prob = c(0.999997, 0.000001, 0.000001, 0.000001), replace = TRUE)
HomeBase[HomeBase == "Cautious:Wealthy"] = sample(HOME, length(which(HomeBase == "Cautious:Wealthy")), prob = c(0.999997, 0.000001, 0.000001, 0.000001), replace = TRUE)
SeniorTrain = apply(cbind(Age, RiskAversion), 1, paste, collapse = ":")
SeniorTrain[SeniorTrain == "Adolescent:Psychopath"] = sample(BOOL, length(which(SeniorTrain == "Adolescent:Psychopath")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adolescent:Adventurous"] = sample(BOOL, length(which(SeniorTrain == "Adolescent:Adventurous")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adolescent:Normal"] = sample(BOOL, length(which(SeniorTrain == "Adolescent:Normal")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adolescent:Cautious"] = sample(BOOL, length(which(SeniorTrain == "Adolescent:Cautious")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adult:Psychopath"] = sample(BOOL, length(which(SeniorTrain == "Adult:Psychopath")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adult:Adventurous"] = sample(BOOL, length(which(SeniorTrain == "Adult:Adventurous")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adult:Normal"] = sample(BOOL, length(which(SeniorTrain == "Adult:Normal")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Adult:Cautious"] = sample(BOOL, length(which(SeniorTrain == "Adult:Cautious")), prob = c(0, 1), replace = TRUE)
SeniorTrain[SeniorTrain == "Senior:Psychopath"] = sample(BOOL, length(which(SeniorTrain == "Senior:Psychopath")), prob = c(0.000001, 0.999999), replace = TRUE)
SeniorTrain[SeniorTrain == "Senior:Adventurous"] = sample(BOOL, length(which(SeniorTrain == "Senior:Adventurous")), prob = c(0.000001, 0.999999), replace = TRUE)
SeniorTrain[SeniorTrain == "Senior:Normal"] = sample(BOOL, length(which(SeniorTrain == "Senior:Normal")), prob = c(0.3, 0.7), replace = TRUE)
SeniorTrain[SeniorTrain == "Senior:Cautious"] = sample(BOOL, length(which(SeniorTrain == "Senior:Cautious")), prob = c(0.9, 0.1), replace = TRUE)
VehicleYear = apply(cbind(SocioEcon, RiskAversion), 1, paste, collapse = ":")
VehicleYear[VehicleYear == "Prole:Psychopath"] = sample(YEAR, length(which(VehicleYear == "Prole:Psychopath")), prob = c(0.15, 0.85), replace = TRUE)
VehicleYear[VehicleYear == "Prole:Adventurous"] = sample(YEAR, length(which(VehicleYear == "Prole:Adventurous")), prob = c(0.15, 0.85), replace = TRUE)
VehicleYear[VehicleYear == "Prole:Normal"] = sample(YEAR, length(which(VehicleYear == "Prole:Normal")), prob = c(0.15, 0.85), replace = TRUE)
VehicleYear[VehicleYear == "Prole:Cautious"] = sample(YEAR, length(which(VehicleYear == "Prole:Cautious")), prob = c(0.15, 0.85), replace = TRUE)
VehicleYear[VehicleYear == "Middle:Psychopath"] = sample(YEAR, length(which(VehicleYear == "Middle:Psychopath")), prob = c(0.3, 0.7), replace = TRUE)
VehicleYear[VehicleYear == "Middle:Adventurous"] = sample(YEAR, length(which(VehicleYear == "Middle:Adventurous")), prob = c(0.3, 0.7), replace = TRUE)
VehicleYear[VehicleYear == "Middle:Normal"] = sample(YEAR, length(which(VehicleYear == "Middle:Normal")), prob = c(0.3, 0.7), replace = TRUE)
VehicleYear[VehicleYear == "Middle:Cautious"] = sample(YEAR, length(which(VehicleYear == "Middle:Cautious")), prob = c(0.3, 0.7), replace = TRUE)
VehicleYear[VehicleYear == "UpperMiddle:Psychopath"] = sample(YEAR, length(which(VehicleYear == "UpperMiddle:Psychopath")), prob = c(0.8, 0.2), replace = TRUE)
VehicleYear[VehicleYear == "UpperMiddle:Adventurous"] = sample(YEAR, length(which(VehicleYear == "UpperMiddle:Adventurous")), prob = c(0.8, 0.2), replace = TRUE)
VehicleYear[VehicleYear == "UpperMiddle:Normal"] = sample(YEAR, length(which(VehicleYear == "UpperMiddle:Normal")), prob = c(0.8, 0.2), replace = TRUE)
VehicleYear[VehicleYear == "UpperMiddle:Cautious"] = sample(YEAR, length(which(VehicleYear == "UpperMiddle:Cautious")), prob = c(0.8, 0.2), replace = TRUE)
VehicleYear[VehicleYear == "Wealthy:Psychopath"] = sample(YEAR, length(which(VehicleYear == "Wealthy:Psychopath")), prob = c(0.9, 0.1), replace = TRUE)
VehicleYear[VehicleYear == "Wealthy:Adventurous"] = sample(YEAR, length(which(VehicleYear == "Wealthy:Adventurous")), prob = c(0.9, 0.1), replace = TRUE)
VehicleYear[VehicleYear == "Wealthy:Normal"] = sample(YEAR, length(which(VehicleYear == "Wealthy:Normal")), prob = c(0.9, 0.1), replace = TRUE)
VehicleYear[VehicleYear == "Wealthy:Cautious"] = sample(YEAR, length(which(VehicleYear == "Wealthy:Cautious")), prob = c(0.9, 0.1), replace = TRUE)
MakeModel = apply(cbind(SocioEcon, RiskAversion), 1, paste, collapse = ":")
MakeModel[MakeModel == "Prole:Psychopath"] = sample(MODEL, length(which(MakeModel == "Prole:Psychopath")), prob = c(0.1, 0.7, 0.2, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Prole:Adventurous"] = sample(MODEL, length(which(MakeModel == "Prole:Adventurous")), prob = c(0.1, 0.7, 0.2, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Prole:Normal"] = sample(MODEL, length(which(MakeModel == "Prole:Normal")), prob = c(0.1, 0.7, 0.2, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Prole:Cautious"] = sample(MODEL, length(which(MakeModel == "Prole:Cautious")), prob = c(0.1, 0.7, 0.2, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Middle:Psychopath"] = sample(MODEL, length(which(MakeModel == "Middle:Psychopath")), prob = c(0.15, 0.2, 0.65, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Middle:Adventurous"] = sample(MODEL, length(which(MakeModel == "Middle:Adventurous")), prob = c(0.15, 0.2, 0.65, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Middle:Normal"] = sample(MODEL, length(which(MakeModel == "Middle:Normal")), prob = c(0.15, 0.2, 0.65, 0, 0), replace = TRUE)
MakeModel[MakeModel == "Middle:Cautious"] = sample(MODEL, length(which(MakeModel == "Middle:Cautious")), prob = c(0.15, 0.2, 0.65, 0, 0), replace = TRUE)
MakeModel[MakeModel == "UpperMiddle:Psychopath"] = sample(MODEL, length(which(MakeModel == "UpperMiddle:Psychopath")), prob = c(0.2, 0.05, 0.3, 0.45, 0), replace = TRUE)
MakeModel[MakeModel == "UpperMiddle:Adventurous"] = sample(MODEL, length(which(MakeModel == "UpperMiddle:Adventurous")), prob = c(0.2, 0.05, 0.3, 0.45, 0), replace = TRUE)
MakeModel[MakeModel == "UpperMiddle:Normal"] = sample(MODEL, length(which(MakeModel == "UpperMiddle:Normal")), prob = c(0.2, 0.05, 0.3, 0.45, 0), replace = TRUE)
MakeModel[MakeModel == "UpperMiddle:Cautious"] = sample(MODEL, length(which(MakeModel == "UpperMiddle:Cautious")), prob = c(0.2, 0.05, 0.3, 0.45, 0), replace = TRUE)
MakeModel[MakeModel == "Wealthy:Psychopath"] = sample(MODEL, length(which(MakeModel == "Wealthy:Psychopath")), prob = c(0.3, 0.01, 0.09, 0.4, 0.2), replace = TRUE)
MakeModel[MakeModel == "Wealthy:Adventurous"] = sample(MODEL, length(which(MakeModel == "Wealthy:Adventurous")), prob = c(0.3, 0.01, 0.09, 0.4, 0.2), replace = TRUE)
MakeModel[MakeModel == "Wealthy:Normal"] = sample(MODEL, length(which(MakeModel == "Wealthy:Normal")), prob = c(0.3, 0.01, 0.09, 0.4, 0.2), replace = TRUE)
MakeModel[MakeModel == "Wealthy:Cautious"] = sample(MODEL, length(which(MakeModel == "Wealthy:Cautious")), prob = c(0.3, 0.01, 0.09, 0.4, 0.2), replace = TRUE)
DrivingSkill = apply(cbind(Age, SeniorTrain), 1, paste, collapse = ":")
DrivingSkill[DrivingSkill == "Adolescent:True"] = sample(SKILL, length(which(DrivingSkill == "Adolescent:True")), prob = c(0.5, 0.45, 0.05), replace = TRUE)
DrivingSkill[DrivingSkill == "Adolescent:False"] = sample(SKILL, length(which(DrivingSkill == "Adolescent:False")), prob = c(0.5, 0.45, 0.05), replace = TRUE)
DrivingSkill[DrivingSkill == "Adult:True"] = sample(SKILL, length(which(DrivingSkill == "Adult:True")), prob = c(0.3, 0.6, 0.1), replace = TRUE)
DrivingSkill[DrivingSkill == "Adult:False"] = sample(SKILL, length(which(DrivingSkill == "Adult:False")), prob = c(0.3, 0.6, 0.1), replace = TRUE)
DrivingSkill[DrivingSkill == "Senior:True"] = sample(SKILL, length(which(DrivingSkill == "Senior:True")), prob = c(0.1, 0.6, 0.3), replace = TRUE)
DrivingSkill[DrivingSkill == "Senior:False"] = sample(SKILL, length(which(DrivingSkill == "Senior:False")), prob = c(0.4, 0.5, 0.1), replace = TRUE)
CarValue = apply(cbind(MakeModel, VehicleYear, Mileage), 1, paste, collapse = ":")
CarValue[CarValue == "SportsCar:Current:FiveThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Current:FiveThou")), prob = c(0, 0.1, 0.8, 0.09, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Current:TwentyThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Current:TwentyThou")), prob = c(0, 0.1, 0.8, 0.09, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Current:FiftyThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Current:FiftyThou")), prob = c(0, 0.1, 0.8, 0.09, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Current:Domino"] = sample(VALUE, length(which(CarValue == "SportsCar:Current:Domino")), prob = c(0, 0.1, 0.8, 0.09, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Older:FiveThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Older:FiveThou")), prob = c(0.03, 0.3, 0.6, 0.06, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Older:TwentyThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Older:TwentyThou")), prob = c(0.16, 0.5, 0.3, 0.03, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Older:FiftyThou"] = sample(VALUE, length(which(CarValue == "SportsCar:Older:FiftyThou")), prob = c(0.4, 0.47, 0.1, 0.02, 0.01), replace = TRUE)
CarValue[CarValue == "SportsCar:Older:Domino"] = sample(VALUE, length(which(CarValue == "SportsCar:Older:Domino")), prob = c(0.9, 0.06, 0.02, 0.01, 0.01), replace = TRUE)
CarValue[CarValue == "Economy:Current:FiveThou"] = sample(VALUE, length(which(CarValue == "Economy:Current:FiveThou")), prob = c(0.1, 0.8, 0.1, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Current:TwentyThou"] = sample(VALUE, length(which(CarValue == "Economy:Current:TwentyThou")), prob = c(0.1, 0.8, 0.1, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Current:FiftyThou"] = sample(VALUE, length(which(CarValue == "Economy:Current:FiftyThou")), prob = c(0.1, 0.8, 0.1, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Current:Domino"] = sample(VALUE, length(which(CarValue == "Economy:Current:Domino")), prob = c(0.1, 0.8, 0.1, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Older:FiveThou"] = sample(VALUE, length(which(CarValue == "Economy:Older:FiveThou")), prob = c(0.25, 0.7, 0.05, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Older:TwentyThou"] = sample(VALUE, length(which(CarValue == "Economy:Older:TwentyThou")), prob = c(0.7, 0.2999, 0.0001, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Older:FiftyThou"] = sample(VALUE, length(which(CarValue == "Economy:Older:FiftyThou")), prob = c(0.99, 0.009999, 0.000001, 0, 0), replace = TRUE)
CarValue[CarValue == "Economy:Older:Domino"] = sample(VALUE, length(which(CarValue == "Economy:Older:Domino")), prob = c(0.999998, 0.000001, 0.000001, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Current:FiveThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Current:FiveThou")), prob = c(0, 0.1, 0.9, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Current:TwentyThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Current:TwentyThou")), prob = c(0, 0.1, 0.9, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Current:FiftyThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Current:FiftyThou")), prob = c(0, 0.1, 0.9, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Current:Domino"] = sample(VALUE, length(which(CarValue == "FamilySedan:Current:Domino")), prob = c(0, 0.1, 0.9, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Older:FiveThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Older:FiveThou")), prob = c(0.2, 0.3, 0.5, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Older:TwentyThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Older:TwentyThou")), prob = c(0.5, 0.3, 0.2, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Older:FiftyThou"] = sample(VALUE, length(which(CarValue == "FamilySedan:Older:FiftyThou")), prob = c(0.7, 0.2, 0.1, 0, 0), replace = TRUE)
CarValue[CarValue == "FamilySedan:Older:Domino"] = sample(VALUE, length(which(CarValue == "FamilySedan:Older:Domino")), prob = c(0.99, 0.009999, 0.000001, 0, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Current:FiveThou"] = sample(VALUE, length(which(CarValue == "Luxury:Current:FiveThou")), prob = c(0, 0, 0, 1, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Current:TwentyThou"] = sample(VALUE, length(which(CarValue == "Luxury:Current:TwentyThou")), prob = c(0, 0, 0, 1, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Current:FiftyThou"] = sample(VALUE, length(which(CarValue == "Luxury:Current:FiftyThou")), prob = c(0, 0, 0, 1, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Current:Domino"] = sample(VALUE, length(which(CarValue == "Luxury:Current:Domino")), prob = c(0, 0, 0, 1, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Older:FiveThou"] = sample(VALUE, length(which(CarValue == "Luxury:Older:FiveThou")), prob = c(0.01, 0.09, 0.2, 0.7, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Older:TwentyThou"] = sample(VALUE, length(which(CarValue == "Luxury:Older:TwentyThou")), prob = c(0.05, 0.15, 0.3, 0.5, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Older:FiftyThou"] = sample(VALUE, length(which(CarValue == "Luxury:Older:FiftyThou")), prob = c(0.1, 0.3, 0.3, 0.3, 0), replace = TRUE)
CarValue[CarValue == "Luxury:Older:Domino"] = sample(VALUE, length(which(CarValue == "Luxury:Older:Domino")), prob = c(0.2, 0.2, 0.3, 0.3, 0), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Current:FiveThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Current:FiveThou")), prob = c(0, 0, 0, 0, 1), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Current:TwentyThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Current:TwentyThou")), prob = c(0, 0, 0, 0, 1), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Current:FiftyThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Current:FiftyThou")), prob = c(0, 0, 0, 0, 1), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Current:Domino"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Current:Domino")), prob = c(0, 0, 0, 0, 1), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Older:FiveThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Older:FiveThou")), prob = c(0.000001, 0.000001, 0.000001, 0.000001, 0.999996), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Older:TwentyThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Older:TwentyThou")), prob = c(0.000001, 0.000001, 0.000001, 0.000001, 0.999996), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Older:FiftyThou"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Older:FiftyThou")), prob = c(0.000001, 0.000001, 0.000001, 0.000001, 0.999996), replace = TRUE)
CarValue[CarValue == "SuperLuxury:Older:Domino"] = sample(VALUE, length(which(CarValue == "SuperLuxury:Older:Domino")), prob = c(0.000001, 0.000001, 0.000001, 0.000001, 0.999996), replace = TRUE)
RuggedAuto = apply(cbind(MakeModel, VehicleYear), 1, paste, collapse = ":")
RuggedAuto[RuggedAuto == "SportsCar:Current"] = sample(RUGGED, length(which(RuggedAuto == "SportsCar:Current")), prob = c(0.95, 0.04, 0.01), replace = TRUE)
RuggedAuto[RuggedAuto == "SportsCar:Older"] = sample(RUGGED, length(which(RuggedAuto == "SportsCar:Older")), prob = c(0.95, 0.04, 0.01), replace = TRUE)
RuggedAuto[RuggedAuto == "Economy:Current"] = sample(RUGGED, length(which(RuggedAuto == "Economy:Current")), prob = c(0.5, 0.5, 0), replace = TRUE)
RuggedAuto[RuggedAuto == "Economy:Older"] = sample(RUGGED, length(which(RuggedAuto == "Economy:Older")), prob = c(0.9, 0.1, 0), replace = TRUE)
RuggedAuto[RuggedAuto == "FamilySedan:Current"] = sample(RUGGED, length(which(RuggedAuto == "FamilySedan:Current")), prob = c(0.2, 0.6, 0.2), replace = TRUE)
RuggedAuto[RuggedAuto == "FamilySedan:Older"] = sample(RUGGED, length(which(RuggedAuto == "FamilySedan:Older")), prob = c(0.05, 0.55, 0.4), replace = TRUE)
RuggedAuto[RuggedAuto == "Luxury:Current"] = sample(RUGGED, length(which(RuggedAuto == "Luxury:Current")), prob = c(0.1, 0.6, 0.3), replace = TRUE)
RuggedAuto[RuggedAuto == "Luxury:Older"] = sample(RUGGED, length(which(RuggedAuto == "Luxury:Older")), prob = c(0.1, 0.6, 0.3), replace = TRUE)
RuggedAuto[RuggedAuto == "SuperLuxury:Current"] = sample(RUGGED, length(which(RuggedAuto == "SuperLuxury:Current")), prob = c(0.05, 0.55, 0.4), replace = TRUE)
RuggedAuto[RuggedAuto == "SuperLuxury:Older"] = sample(RUGGED, length(which(RuggedAuto == "SuperLuxury:Older")), prob = c(0.05, 0.55, 0.4), replace = TRUE)
Antilock = apply(cbind(MakeModel, VehicleYear), 1, paste, collapse = ":")
Antilock[Antilock == "SportsCar:Current"] = sample(BOOL, length(which(Antilock == "SportsCar:Current")), prob = c(0.9, 0.1), replace = TRUE)
Antilock[Antilock == "SportsCar:Older"] = sample(BOOL, length(which(Antilock == "SportsCar:Older")), prob = c(0.1, 0.9), replace = TRUE)
Antilock[Antilock == "Economy:Current"] = sample(BOOL, length(which(Antilock == "Economy:Current")), prob = c(0.001, 0.999), replace = TRUE)
Antilock[Antilock == "Economy:Older"] = sample(BOOL, length(which(Antilock == "Economy:Older")), prob = c(0, 1), replace = TRUE)
Antilock[Antilock == "FamilySedan:Current"] = sample(BOOL, length(which(Antilock == "FamilySedan:Current")), prob = c(0.4, 0.6), replace = TRUE)
Antilock[Antilock == "FamilySedan:Older"] = sample(BOOL, length(which(Antilock == "FamilySedan:Older")), prob = c(0, 1), replace = TRUE)
Antilock[Antilock == "Luxury:Current"] = sample(BOOL, length(which(Antilock == "Luxury:Current")), prob = c(0.99, 0.01), replace = TRUE)
Antilock[Antilock == "Luxury:Older"] = sample(BOOL, length(which(Antilock == "Luxury:Older")), prob = c(0.3, 0.7), replace = TRUE)
Antilock[Antilock == "SuperLuxury:Current"] = sample(BOOL, length(which(Antilock == "SuperLuxury:Current")), prob = c(0.99, 0.01), replace = TRUE)
Antilock[Antilock == "SuperLuxury:Older"] = sample(BOOL, length(which(Antilock == "SuperLuxury:Older")), prob = c(0.15, 0.85), replace = TRUE)
Airbag = apply(cbind(MakeModel, VehicleYear), 1, paste, collapse = ":")
Airbag[Airbag == "SportsCar:Current"] = sample(BOOL, length(which(Airbag == "SportsCar:Current")), prob = c(1, 0), replace = TRUE)
Airbag[Airbag == "SportsCar:Older"] = sample(BOOL, length(which(Airbag == "SportsCar:Older")), prob = c(0.1, 0.9), replace = TRUE)
Airbag[Airbag == "Economy:Current"] = sample(BOOL, length(which(Airbag == "Economy:Current")), prob = c(1, 0), replace = TRUE)
Airbag[Airbag == "Economy:Older"] = sample(BOOL, length(which(Airbag == "Economy:Older")), prob = c(0.05, 0.95), replace = TRUE)
Airbag[Airbag == "FamilySedan:Current"] = sample(BOOL, length(which(Airbag == "FamilySedan:Current")), prob = c(1, 0), replace = TRUE)
Airbag[Airbag == "FamilySedan:Older"] = sample(BOOL, length(which(Airbag == "FamilySedan:Older")), prob = c(0.2, 0.8), replace = TRUE)
Airbag[Airbag == "Luxury:Current"] = sample(BOOL, length(which(Airbag == "Luxury:Current")), prob = c(1, 0), replace = TRUE)
Airbag[Airbag == "Luxury:Older"] = sample(BOOL, length(which(Airbag == "Luxury:Older")), prob = c(0.6, 0.4), replace = TRUE)
Airbag[Airbag == "SuperLuxury:Current"] = sample(BOOL, length(which(Airbag == "SuperLuxury:Current")), prob = c(1, 0), replace = TRUE)
Airbag[Airbag == "SuperLuxury:Older"] = sample(BOOL, length(which(Airbag == "SuperLuxury:Older")), prob = c(0.1, 0.9), replace = TRUE)
DrivQuality = apply(cbind(DrivingSkill, RiskAversion), 1, paste, collapse = ":")
DrivQuality[DrivQuality == "SubStandard:Psychopath"] = sample(SKILL2, length(which(DrivQuality == "SubStandard:Psychopath")), prob = c(1, 0, 0), replace = TRUE)
DrivQuality[DrivQuality == "SubStandard:Adventurous"] = sample(SKILL2, length(which(DrivQuality == "SubStandard:Adventurous")), prob = c(1, 0, 0), replace = TRUE)
DrivQuality[DrivQuality == "SubStandard:Normal"] = sample(SKILL2, length(which(DrivQuality == "SubStandard:Normal")), prob = c(1, 0, 0), replace = TRUE)
DrivQuality[DrivQuality == "SubStandard:Cautious"] = sample(SKILL2, length(which(DrivQuality == "SubStandard:Cautious")), prob = c(1, 0, 0), replace = TRUE)
DrivQuality[DrivQuality == "Normal:Psychopath"] = sample(SKILL2, length(which(DrivQuality == "Normal:Psychopath")), prob = c(0.5, 0.2, 0.3), replace = TRUE)
DrivQuality[DrivQuality == "Normal:Adventurous"] = sample(SKILL2, length(which(DrivQuality == "Normal:Adventurous")), prob = c(0.3, 0.4, 0.3), replace = TRUE)
DrivQuality[DrivQuality == "Normal:Normal"] = sample(SKILL2, length(which(DrivQuality == "Normal:Normal")), prob = c(0, 1, 0), replace = TRUE)
DrivQuality[DrivQuality == "Normal:Cautious"] = sample(SKILL2, length(which(DrivQuality == "Normal:Cautious")), prob = c(0, 0.8, 0.2), replace = TRUE)
DrivQuality[DrivQuality == "Expert:Psychopath"] = sample(SKILL2, length(which(DrivQuality == "Expert:Psychopath")), prob = c(0.3, 0.2, 0.5), replace = TRUE)
DrivQuality[DrivQuality == "Expert:Adventurous"] = sample(SKILL2, length(which(DrivQuality == "Expert:Adventurous")), prob = c(0.01, 0.01, 0.98), replace = TRUE)
DrivQuality[DrivQuality == "Expert:Normal"] = sample(SKILL2, length(which(DrivQuality == "Expert:Normal")), prob = c(0, 0, 1), replace = TRUE)
DrivQuality[DrivQuality == "Expert:Cautious"] = sample(SKILL2, length(which(DrivQuality == "Expert:Cautious")), prob = c(0, 0, 1), replace = TRUE)
DrivHist = apply(cbind(DrivingSkill, RiskAversion), 1, paste, collapse = ":")
DrivHist[DrivHist == "SubStandard:Psychopath"] = sample(HIST, length(which(DrivHist == "SubStandard:Psychopath")), prob = c(0.001, 0.004, 0.995), replace = TRUE)
DrivHist[DrivHist == "SubStandard:Adventurous"] = sample(HIST, length(which(DrivHist == "SubStandard:Adventurous")), prob = c(0.002, 0.008, 0.99), replace = TRUE)
DrivHist[DrivHist == "SubStandard:Normal"] = sample(HIST, length(which(DrivHist == "SubStandard:Normal")), prob = c(0.03, 0.15, 0.82), replace = TRUE)
DrivHist[DrivHist == "SubStandard:Cautious"] = sample(HIST, length(which(DrivHist == "SubStandard:Cautious")), prob = c(0.3, 0.3, 0.4), replace = TRUE)
DrivHist[DrivHist == "Normal:Psychopath"] = sample(HIST, length(which(DrivHist == "Normal:Psychopath")), prob = c(0.1, 0.3, 0.6), replace = TRUE)
DrivHist[DrivHist == "Normal:Adventurous"] = sample(HIST, length(which(DrivHist == "Normal:Adventurous")), prob = c(0.5, 0.3, 0.2), replace = TRUE)
DrivHist[DrivHist == "Normal:Normal"] = sample(HIST, length(which(DrivHist == "Normal:Normal")), prob = c(0.9, 0.07, 0.03), replace = TRUE)
DrivHist[DrivHist == "Normal:Cautious"] = sample(HIST, length(which(DrivHist == "Normal:Cautious")), prob = c(0.95, 0.04, 0.01), replace = TRUE)
DrivHist[DrivHist == "Expert:Psychopath"] = sample(HIST, length(which(DrivHist == "Expert:Psychopath")), prob = c(0.3, 0.3, 0.4), replace = TRUE)
DrivHist[DrivHist == "Expert:Adventurous"] = sample(HIST, length(which(DrivHist == "Expert:Adventurous")), prob = c(0.6, 0.3, 0.1), replace = TRUE)
DrivHist[DrivHist == "Expert:Normal"] = sample(HIST, length(which(DrivHist == "Expert:Normal")), prob = c(0.99, 0.009999, 0.000001), replace = TRUE)
DrivHist[DrivHist == "Expert:Cautious"] = sample(HIST, length(which(DrivHist == "Expert:Cautious")), prob = c(0.999998, 0.000001, 0.000001), replace = TRUE)
Theft = apply(cbind(AntiTheft, HomeBase, CarValue), 1, paste, collapse = ":")
Theft[Theft == "True:Secure:FiveThou"] = sample(BOOL, length(which(Theft == "True:Secure:FiveThou")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "True:Secure:TenThou"] = sample(BOOL, length(which(Theft == "True:Secure:TenThou")), prob = c(0.000002, 0.999998), replace = TRUE)
Theft[Theft == "True:Secure:TwentyThou"] = sample(BOOL, length(which(Theft == "True:Secure:TwentyThou")), prob = c(0.000003, 0.999997), replace = TRUE)
Theft[Theft == "True:Secure:FiftyThou"] = sample(BOOL, length(which(Theft == "True:Secure:FiftyThou")), prob = c(0.000002, 0.999998), replace = TRUE)
Theft[Theft == "True:Secure:Million"] = sample(BOOL, length(which(Theft == "True:Secure:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "True:City:FiveThou"] = sample(BOOL, length(which(Theft == "True:City:FiveThou")), prob = c(0.0005, 0.9995), replace = TRUE)
Theft[Theft == "True:City:TenThou"] = sample(BOOL, length(which(Theft == "True:City:TenThou")), prob = c(0.002, 0.998), replace = TRUE)
Theft[Theft == "True:City:TwentyThou"] = sample(BOOL, length(which(Theft == "True:City:TwentyThou")), prob = c(0.005, 0.995), replace = TRUE)
Theft[Theft == "True:City:FiftyThou"] = sample(BOOL, length(which(Theft == "True:City:FiftyThou")), prob = c(0.005, 0.995), replace = TRUE)
Theft[Theft == "True:City:Million"] = sample(BOOL, length(which(Theft == "True:City:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "True:Suburb:FiveThou"] = sample(BOOL, length(which(Theft == "True:Suburb:FiveThou")), prob = c(0.00001, 0.99999), replace = TRUE)
Theft[Theft == "True:Suburb:TenThou"] = sample(BOOL, length(which(Theft == "True:Suburb:TenThou")), prob = c(0.0001, 0.9999), replace = TRUE)
Theft[Theft == "True:Suburb:TwentyThou"] = sample(BOOL, length(which(Theft == "True:Suburb:TwentyThou")), prob = c(0.0003, 0.9997), replace = TRUE)
Theft[Theft == "True:Suburb:FiftyThou"] = sample(BOOL, length(which(Theft == "True:Suburb:FiftyThou")), prob = c(0.0003, 0.9997), replace = TRUE)
Theft[Theft == "True:Suburb:Million"] = sample(BOOL, length(which(Theft == "True:Suburb:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "True:Rural:FiveThou"] = sample(BOOL, length(which(Theft == "True:Rural:FiveThou")), prob = c(0.00001, 0.99999), replace = TRUE)
Theft[Theft == "True:Rural:TenThou"] = sample(BOOL, length(which(Theft == "True:Rural:TenThou")), prob = c(0.00002, 0.99998), replace = TRUE)
Theft[Theft == "True:Rural:TwentyThou"] = sample(BOOL, length(which(Theft == "True:Rural:TwentyThou")), prob = c(0.00005, 0.99995), replace = TRUE)
Theft[Theft == "True:Rural:FiftyThou"] = sample(BOOL, length(which(Theft == "True:Rural:FiftyThou")), prob = c(0.00005, 0.99995), replace = TRUE)
Theft[Theft == "True:Rural:Million"] = sample(BOOL, length(which(Theft == "True:Rural:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "False:Secure:FiveThou"] = sample(BOOL, length(which(Theft == "False:Secure:FiveThou")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "False:Secure:TenThou"] = sample(BOOL, length(which(Theft == "False:Secure:TenThou")), prob = c(0.000002, 0.999998), replace = TRUE)
Theft[Theft == "False:Secure:TwentyThou"] = sample(BOOL, length(which(Theft == "False:Secure:TwentyThou")), prob = c(0.000003, 0.999997), replace = TRUE)
Theft[Theft == "False:Secure:FiftyThou"] = sample(BOOL, length(which(Theft == "False:Secure:FiftyThou")), prob = c(0.000002, 0.999998), replace = TRUE)
Theft[Theft == "False:Secure:Million"] = sample(BOOL, length(which(Theft == "False:Secure:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "False:City:FiveThou"] = sample(BOOL, length(which(Theft == "False:City:FiveThou")), prob = c(0.001, 0.999), replace = TRUE)
Theft[Theft == "False:City:TenThou"] = sample(BOOL, length(which(Theft == "False:City:TenThou")), prob = c(0.005, 0.995), replace = TRUE)
Theft[Theft == "False:City:TwentyThou"] = sample(BOOL, length(which(Theft == "False:City:TwentyThou")), prob = c(0.01, 0.99), replace = TRUE)
Theft[Theft == "False:City:FiftyThou"] = sample(BOOL, length(which(Theft == "False:City:FiftyThou")), prob = c(0.01, 0.99), replace = TRUE)
Theft[Theft == "False:City:Million"] = sample(BOOL, length(which(Theft == "False:City:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "False:Suburb:FiveThou"] = sample(BOOL, length(which(Theft == "False:Suburb:FiveThou")), prob = c(0.00001, 0.99999), replace = TRUE)
Theft[Theft == "False:Suburb:TenThou"] = sample(BOOL, length(which(Theft == "False:Suburb:TenThou")), prob = c(0.0002, 0.9998), replace = TRUE)
Theft[Theft == "False:Suburb:TwentyThou"] = sample(BOOL, length(which(Theft == "False:Suburb:TwentyThou")), prob = c(0.0005, 0.9995), replace = TRUE)
Theft[Theft == "False:Suburb:FiftyThou"] = sample(BOOL, length(which(Theft == "False:Suburb:FiftyThou")), prob = c(0.0005, 0.9995), replace = TRUE)
Theft[Theft == "False:Suburb:Million"] = sample(BOOL, length(which(Theft == "False:Suburb:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Theft[Theft == "False:Rural:FiveThou"] = sample(BOOL, length(which(Theft == "False:Rural:FiveThou")), prob = c(0.00001, 0.99999), replace = TRUE)
Theft[Theft == "False:Rural:TenThou"] = sample(BOOL, length(which(Theft == "False:Rural:TenThou")), prob = c(0.0001, 0.9999), replace = TRUE)
Theft[Theft == "False:Rural:TwentyThou"] = sample(BOOL, length(which(Theft == "False:Rural:TwentyThou")), prob = c(0.0002, 0.9998), replace = TRUE)
Theft[Theft == "False:Rural:FiftyThou"] = sample(BOOL, length(which(Theft == "False:Rural:FiftyThou")), prob = c(0.0002, 0.9998), replace = TRUE)
Theft[Theft == "False:Rural:Million"] = sample(BOOL, length(which(Theft == "False:Rural:Million")), prob = c(0.000001, 0.999999), replace = TRUE)
Accident = apply(cbind(Antilock, Mileage, DrivQuality), 1, paste, collapse = ":")
Accident[Accident == "True:FiveThou:Poor"] = sample(ACC, length(which(Accident == "True:FiveThou:Poor")), prob = c(0.7, 0.2, 0.07, 0.03), replace = TRUE)
Accident[Accident == "True:FiveThou:Normal"] = sample(ACC, length(which(Accident == "True:FiveThou:Normal")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
Accident[Accident == "True:FiveThou:Excellent"] = sample(ACC, length(which(Accident == "True:FiveThou:Excellent")), prob = c(0.999, 0.0007, 0.0002, 0.0001), replace = TRUE)
Accident[Accident == "True:TwentyThou:Poor"] = sample(ACC, length(which(Accident == "True:TwentyThou:Poor")), prob = c(0.4, 0.3, 0.2, 0.1), replace = TRUE)
Accident[Accident == "True:TwentyThou:Normal"] = sample(ACC, length(which(Accident == "True:TwentyThou:Normal")), prob = c(0.98, 0.01, 0.005, 0.005), replace = TRUE)
Accident[Accident == "True:TwentyThou:Excellent"] = sample(ACC, length(which(Accident == "True:TwentyThou:Excellent")), prob = c(0.995, 0.003, 0.001, 0.001), replace = TRUE)
Accident[Accident == "True:FiftyThou:Poor"] = sample(ACC, length(which(Accident == "True:FiftyThou:Poor")), prob = c(0.3, 0.3, 0.2, 0.2), replace = TRUE)
Accident[Accident == "True:FiftyThou:Normal"] = sample(ACC, length(which(Accident == "True:FiftyThou:Normal")), prob = c(0.97, 0.02, 0.007, 0.003), replace = TRUE)
Accident[Accident == "True:FiftyThou:Excellent"] = sample(ACC, length(which(Accident == "True:FiftyThou:Excellent")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
Accident[Accident == "True:Domino:Poor"] = sample(ACC, length(which(Accident == "True:Domino:Poor")), prob = c(0.2, 0.2, 0.3, 0.3), replace = TRUE)
Accident[Accident == "True:Domino:Normal"] = sample(ACC, length(which(Accident == "True:Domino:Normal")), prob = c(0.95, 0.03, 0.01, 0.01), replace = TRUE)
Accident[Accident == "True:Domino:Excellent"] = sample(ACC, length(which(Accident == "True:Domino:Excellent")), prob = c(0.985, 0.01, 0.003, 0.002), replace = TRUE)
Accident[Accident == "False:FiveThou:Poor"] = sample(ACC, length(which(Accident == "False:FiveThou:Poor")), prob = c(0.6, 0.2, 0.1, 0.1), replace = TRUE)
Accident[Accident == "False:FiveThou:Normal"] = sample(ACC, length(which(Accident == "False:FiveThou:Normal")), prob = c(0.98, 0.01, 0.005, 0.005), replace = TRUE)
Accident[Accident == "False:FiveThou:Excellent"] = sample(ACC, length(which(Accident == "False:FiveThou:Excellent")), prob = c(0.995, 0.003, 0.001, 0.001), replace = TRUE)
Accident[Accident == "False:TwentyThou:Poor"] = sample(ACC, length(which(Accident == "False:TwentyThou:Poor")), prob = c(0.3, 0.2, 0.2, 0.3), replace = TRUE)
Accident[Accident == "False:TwentyThou:Normal"] = sample(ACC, length(which(Accident == "False:TwentyThou:Normal")), prob = c(0.96, 0.02, 0.015, 0.005), replace = TRUE)
Accident[Accident == "False:TwentyThou:Excellent"] = sample(ACC, length(which(Accident == "False:TwentyThou:Excellent")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
Accident[Accident == "False:FiftyThou:Poor"] = sample(ACC, length(which(Accident == "False:FiftyThou:Poor")), prob = c(0.2, 0.2, 0.2, 0.4), replace = TRUE)
Accident[Accident == "False:FiftyThou:Normal"] = sample(ACC, length(which(Accident == "False:FiftyThou:Normal")), prob = c(0.95, 0.03, 0.015, 0.005), replace = TRUE)
Accident[Accident == "False:FiftyThou:Excellent"] = sample(ACC, length(which(Accident == "False:FiftyThou:Excellent")), prob = c(0.98, 0.01, 0.005, 0.005), replace = TRUE)
Accident[Accident == "False:Domino:Poor"] = sample(ACC, length(which(Accident == "False:Domino:Poor")), prob = c(0.1, 0.1, 0.3, 0.5), replace = TRUE)
Accident[Accident == "False:Domino:Normal"] = sample(ACC, length(which(Accident == "False:Domino:Normal")), prob = c(0.94, 0.03, 0.02, 0.01), replace = TRUE)
Accident[Accident == "False:Domino:Excellent"] = sample(ACC, length(which(Accident == "False:Domino:Excellent")), prob = c(0.98, 0.01, 0.007, 0.003), replace = TRUE)
Cushioning = apply(cbind(RuggedAuto, Airbag), 1, paste, collapse = ":")
Cushioning[Cushioning == "EggShell:True"] = sample(SKILL3, length(which(Cushioning == "EggShell:True")), prob = c(0.5, 0.3, 0.2, 0), replace = TRUE)
Cushioning[Cushioning == "EggShell:False"] = sample(SKILL3, length(which(Cushioning == "EggShell:False")), prob = c(0.7, 0.3, 0, 0), replace = TRUE)
Cushioning[Cushioning == "Football:True"] = sample(SKILL3, length(which(Cushioning == "Football:True")), prob = c(0, 0.1, 0.6, 0.3), replace = TRUE)
Cushioning[Cushioning == "Football:False"] = sample(SKILL3, length(which(Cushioning == "Football:False")), prob = c(0.1, 0.6, 0.3, 0), replace = TRUE)
Cushioning[Cushioning == "Tank:True"] = sample(SKILL3, length(which(Cushioning == "Tank:True")), prob = c(0, 0, 0, 1), replace = TRUE)
Cushioning[Cushioning == "Tank:False"] = sample(SKILL3, length(which(Cushioning == "Tank:False")), prob = c(0, 0, 0.7, 0.3), replace = TRUE)
ThisCarDam = apply(cbind(Accident, RuggedAuto), 1, paste, collapse = ":")
ThisCarDam[ThisCarDam == "None:EggShell"] = sample(ACC, length(which(ThisCarDam == "None:EggShell")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarDam[ThisCarDam == "None:Football"] = sample(ACC, length(which(ThisCarDam == "None:Football")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarDam[ThisCarDam == "None:Tank"] = sample(ACC, length(which(ThisCarDam == "None:Tank")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarDam[ThisCarDam == "Mild:EggShell"] = sample(ACC, length(which(ThisCarDam == "Mild:EggShell")), prob = c(0.001, 0.9, 0.098, 0.001), replace = TRUE)
ThisCarDam[ThisCarDam == "Mild:Football"] = sample(ACC, length(which(ThisCarDam == "Mild:Football")), prob = c(0.2, 0.75, 0.049999, 0.000001), replace = TRUE)
ThisCarDam[ThisCarDam == "Mild:Tank"] = sample(ACC, length(which(ThisCarDam == "Mild:Tank")), prob = c(0.7, 0.29, 0.009999, 0.000001), replace = TRUE)
ThisCarDam[ThisCarDam == "Moderate:EggShell"] = sample(ACC, length(which(ThisCarDam == "Moderate:EggShell")), prob = c(0.000001, 0.000999, 0.7, 0.299), replace = TRUE)
ThisCarDam[ThisCarDam == "Moderate:Football"] = sample(ACC, length(which(ThisCarDam == "Moderate:Football")), prob = c(0.001, 0.099, 0.8, 0.1), replace = TRUE)
ThisCarDam[ThisCarDam == "Moderate:Tank"] = sample(ACC, length(which(ThisCarDam == "Moderate:Tank")), prob = c(0.05, 0.6, 0.3, 0.05), replace = TRUE)
ThisCarDam[ThisCarDam == "Severe:EggShell"] = sample(ACC, length(which(ThisCarDam == "Severe:EggShell")), prob = c(0.000001, 0.000009, 0.00009, 0.9999), replace = TRUE)
ThisCarDam[ThisCarDam == "Severe:Football"] = sample(ACC, length(which(ThisCarDam == "Severe:Football")), prob = c(0.000001, 0.000999, 0.009, 0.99), replace = TRUE)
ThisCarDam[ThisCarDam == "Severe:Tank"] = sample(ACC, length(which(ThisCarDam == "Severe:Tank")), prob = c(0.05, 0.2, 0.2, 0.55), replace = TRUE)
OtherCarCost = apply(cbind(Accident, RuggedAuto), 1, paste, collapse = ":")
OtherCarCost[OtherCarCost == "None:EggShell"] = sample(VALUE2, length(which(OtherCarCost == "None:EggShell")), prob = c(1, 0, 0, 0), replace = TRUE)
OtherCarCost[OtherCarCost == "None:Football"] = sample(VALUE2, length(which(OtherCarCost == "None:Football")), prob = c(1, 0, 0, 0), replace = TRUE)
OtherCarCost[OtherCarCost == "None:Tank"] = sample(VALUE2, length(which(OtherCarCost == "None:Tank")), prob = c(1, 0, 0, 0), replace = TRUE)
OtherCarCost[OtherCarCost == "Mild:EggShell"] = sample(VALUE2, length(which(OtherCarCost == "Mild:EggShell")), prob = c(0.99, 0.005, 0.00499, 0.00001), replace = TRUE)
OtherCarCost[OtherCarCost == "Mild:Football"] = sample(VALUE2, length(which(OtherCarCost == "Mild:Football")), prob = c(0.98, 0.01, 0.009985, 0.00005), replace = TRUE)
OtherCarCost[OtherCarCost == "Mild:Tank"] = sample(VALUE2, length(which(OtherCarCost == "Mild:Tank")), prob = c(0.95, 0.03, 0.01998, 0.00002), replace = TRUE)
OtherCarCost[OtherCarCost == "Moderate:EggShell"] = sample(VALUE2, length(which(OtherCarCost == "Moderate:EggShell")), prob = c(0.6, 0.2, 0.19998, 0.00002), replace = TRUE)
OtherCarCost[OtherCarCost == "Moderate:Football"] = sample(VALUE2, length(which(OtherCarCost == "Moderate:Football")), prob = c(0.5, 0.2, 0.29997, 0.00003), replace = TRUE)
OtherCarCost[OtherCarCost == "Moderate:Tank"] = sample(VALUE2, length(which(OtherCarCost == "Moderate:Tank")), prob = c(0.4, 0.3, 0.29996, 0.00004), replace = TRUE)
OtherCarCost[OtherCarCost == "Severe:EggShell"] = sample(VALUE2, length(which(OtherCarCost == "Severe:EggShell")), prob = c(0.2, 0.4, 0.39996, 0.00004), replace = TRUE)
OtherCarCost[OtherCarCost == "Severe:Football"] = sample(VALUE2, length(which(OtherCarCost == "Severe:Football")), prob = c(0.1, 0.5, 0.39994, 0.00006), replace = TRUE)
OtherCarCost[OtherCarCost == "Severe:Tank"] = sample(VALUE2, length(which(OtherCarCost == "Severe:Tank")), prob = c(0.005, 0.55, 0.4449, 0.0001), replace = TRUE)
ILiCost = Accident
ILiCost[ILiCost == "None"] = sample(VALUE2, length(which(ILiCost == "None")), prob = c(1, 0, 0, 0), replace = TRUE)
ILiCost[ILiCost == "Mild"] = sample(VALUE2, length(which(ILiCost == "Mild")), prob = c(0.999, 0.000998, 0.000001, 0.000001), replace = TRUE)
ILiCost[ILiCost == "Moderate"] = sample(VALUE2, length(which(ILiCost == "Moderate")), prob = c(0.9, 0.05, 0.03, 0.02), replace = TRUE)
ILiCost[ILiCost == "Severe"] = sample(VALUE2, length(which(ILiCost == "Severe")), prob = c(0.8, 0.1, 0.06, 0.04), replace = TRUE)
MedCost = apply(cbind(Accident, Age, Cushioning), 1, paste, collapse = ":")
MedCost[MedCost == "None:Adolescent:Poor"] = sample(VALUE2, length(which(MedCost == "None:Adolescent:Poor")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adolescent:Fair"] = sample(VALUE2, length(which(MedCost == "None:Adolescent:Fair")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adolescent:Good"] = sample(VALUE2, length(which(MedCost == "None:Adolescent:Good")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adolescent:Excellent"] = sample(VALUE2, length(which(MedCost == "None:Adolescent:Excellent")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adult:Poor"] = sample(VALUE2, length(which(MedCost == "None:Adult:Poor")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adult:Fair"] = sample(VALUE2, length(which(MedCost == "None:Adult:Fair")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adult:Good"] = sample(VALUE2, length(which(MedCost == "None:Adult:Good")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Adult:Excellent"] = sample(VALUE2, length(which(MedCost == "None:Adult:Excellent")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Senior:Poor"] = sample(VALUE2, length(which(MedCost == "None:Senior:Poor")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Senior:Fair"] = sample(VALUE2, length(which(MedCost == "None:Senior:Fair")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Senior:Good"] = sample(VALUE2, length(which(MedCost == "None:Senior:Good")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "None:Senior:Excellent"] = sample(VALUE2, length(which(MedCost == "None:Senior:Excellent")), prob = c(1, 0, 0, 0), replace = TRUE)
MedCost[MedCost == "Mild:Adolescent:Poor"] = sample(VALUE2, length(which(MedCost == "Mild:Adolescent:Poor")), prob = c(0.96, 0.03, 0.009, 0.001), replace = TRUE)
MedCost[MedCost == "Mild:Adolescent:Fair"] = sample(VALUE2, length(which(MedCost == "Mild:Adolescent:Fair")), prob = c(0.98, 0.019, 0.0009, 0.0001), replace = TRUE)
MedCost[MedCost == "Mild:Adolescent:Good"] = sample(VALUE2, length(which(MedCost == "Mild:Adolescent:Good")), prob = c(0.99, 0.0099, 0.00009, 0.00001), replace = TRUE)
MedCost[MedCost == "Mild:Adolescent:Excellent"] = sample(VALUE2, length(which(MedCost == "Mild:Adolescent:Excellent")), prob = c(0.999, 0.00099, 0.000009, 0.000001), replace = TRUE)
MedCost[MedCost == "Mild:Adult:Poor"] = sample(VALUE2, length(which(MedCost == "Mild:Adult:Poor")), prob = c(0.96, 0.03, 0.009, 0.001), replace = TRUE)
MedCost[MedCost == "Mild:Adult:Fair"] = sample(VALUE2, length(which(MedCost == "Mild:Adult:Fair")), prob = c(0.98, 0.019, 0.0009, 0.0001), replace = TRUE)
MedCost[MedCost == "Mild:Adult:Good"] = sample(VALUE2, length(which(MedCost == "Mild:Adult:Good")), prob = c(0.99, 0.0099, 0.00009, 0.00001), replace = TRUE)
MedCost[MedCost == "Mild:Adult:Excellent"] = sample(VALUE2, length(which(MedCost == "Mild:Adult:Excellent")), prob = c(0.999, 0.00099, 0.000009, 0.000001), replace = TRUE)
MedCost[MedCost == "Mild:Senior:Poor"] = sample(VALUE2, length(which(MedCost == "Mild:Senior:Poor")), prob = c(0.9, 0.07, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Mild:Senior:Fair"] = sample(VALUE2, length(which(MedCost == "Mild:Senior:Fair")), prob = c(0.95, 0.04, 0.007, 0.003), replace = TRUE)
MedCost[MedCost == "Mild:Senior:Good"] = sample(VALUE2, length(which(MedCost == "Mild:Senior:Good")), prob = c(0.97, 0.025, 0.003, 0.002), replace = TRUE)
MedCost[MedCost == "Mild:Senior:Excellent"] = sample(VALUE2, length(which(MedCost == "Mild:Senior:Excellent")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
MedCost[MedCost == "Moderate:Adolescent:Poor"] = sample(VALUE2, length(which(MedCost == "Moderate:Adolescent:Poor")), prob = c(0.5, 0.2, 0.2, 0.1), replace = TRUE)
MedCost[MedCost == "Moderate:Adolescent:Fair"] = sample(VALUE2, length(which(MedCost == "Moderate:Adolescent:Fair")), prob = c(0.8, 0.15, 0.03, 0.02), replace = TRUE)
MedCost[MedCost == "Moderate:Adolescent:Good"] = sample(VALUE2, length(which(MedCost == "Moderate:Adolescent:Good")), prob = c(0.95, 0.02, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Moderate:Adolescent:Excellent"] = sample(VALUE2, length(which(MedCost == "Moderate:Adolescent:Excellent")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
MedCost[MedCost == "Moderate:Adult:Poor"] = sample(VALUE2, length(which(MedCost == "Moderate:Adult:Poor")), prob = c(0.5, 0.2, 0.2, 0.1), replace = TRUE)
MedCost[MedCost == "Moderate:Adult:Fair"] = sample(VALUE2, length(which(MedCost == "Moderate:Adult:Fair")), prob = c(0.8, 0.15, 0.03, 0.02), replace = TRUE)
MedCost[MedCost == "Moderate:Adult:Good"] = sample(VALUE2, length(which(MedCost == "Moderate:Adult:Good")), prob = c(0.95, 0.02, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Moderate:Adult:Excellent"] = sample(VALUE2, length(which(MedCost == "Moderate:Adult:Excellent")), prob = c(0.99, 0.007, 0.002, 0.001), replace = TRUE)
MedCost[MedCost == "Moderate:Senior:Poor"] = sample(VALUE2, length(which(MedCost == "Moderate:Senior:Poor")), prob = c(0.3, 0.3, 0.2, 0.2), replace = TRUE)
MedCost[MedCost == "Moderate:Senior:Fair"] = sample(VALUE2, length(which(MedCost == "Moderate:Senior:Fair")), prob = c(0.5, 0.2, 0.2, 0.1), replace = TRUE)
MedCost[MedCost == "Moderate:Senior:Good"] = sample(VALUE2, length(which(MedCost == "Moderate:Senior:Good")), prob = c(0.9, 0.07, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Moderate:Senior:Excellent"] = sample(VALUE2, length(which(MedCost == "Moderate:Senior:Excellent")), prob = c(0.95, 0.03, 0.01, 0.01), replace = TRUE)
MedCost[MedCost == "Severe:Adolescent:Poor"] = sample(VALUE2, length(which(MedCost == "Severe:Adolescent:Poor")), prob = c(0.3, 0.3, 0.2, 0.2), replace = TRUE)
MedCost[MedCost == "Severe:Adolescent:Fair"] = sample(VALUE2, length(which(MedCost == "Severe:Adolescent:Fair")), prob = c(0.5, 0.2, 0.2, 0.1), replace = TRUE)
MedCost[MedCost == "Severe:Adolescent:Good"] = sample(VALUE2, length(which(MedCost == "Severe:Adolescent:Good")), prob = c(0.9, 0.07, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Severe:Adolescent:Excellent"] = sample(VALUE2, length(which(MedCost == "Severe:Adolescent:Excellent")), prob = c(0.95, 0.03, 0.01, 0.01), replace = TRUE)
MedCost[MedCost == "Severe:Adult:Poor"] = sample(VALUE2, length(which(MedCost == "Severe:Adult:Poor")), prob = c(0.3, 0.3, 0.2, 0.2), replace = TRUE)
MedCost[MedCost == "Severe:Adult:Fair"] = sample(VALUE2, length(which(MedCost == "Severe:Adult:Fair")), prob = c(0.5, 0.2, 0.2, 0.1), replace = TRUE)
MedCost[MedCost == "Severe:Adult:Good"] = sample(VALUE2, length(which(MedCost == "Severe:Adult:Good")), prob = c(0.9, 0.07, 0.02, 0.01), replace = TRUE)
MedCost[MedCost == "Severe:Adult:Excellent"] = sample(VALUE2, length(which(MedCost == "Severe:Adult:Excellent")), prob = c(0.95, 0.03, 0.01, 0.01), replace = TRUE)
MedCost[MedCost == "Severe:Senior:Poor"] = sample(VALUE2, length(which(MedCost == "Severe:Senior:Poor")), prob = c(0.2, 0.2, 0.3, 0.3), replace = TRUE)
MedCost[MedCost == "Severe:Senior:Fair"] = sample(VALUE2, length(which(MedCost == "Severe:Senior:Fair")), prob = c(0.3, 0.3, 0.2, 0.2), replace = TRUE)
MedCost[MedCost == "Severe:Senior:Good"] = sample(VALUE2, length(which(MedCost == "Severe:Senior:Good")), prob = c(0.6, 0.3, 0.07, 0.03), replace = TRUE)
MedCost[MedCost == "Severe:Senior:Excellent"] = sample(VALUE2, length(which(MedCost == "Severe:Senior:Excellent")), prob = c(0.9, 0.05, 0.03, 0.02), replace = TRUE)
ThisCarCost = apply(cbind(ThisCarDam, CarValue, Theft), 1, paste, collapse = ":")
ThisCarCost[ThisCarCost == "None:FiveThou:True"] = sample(VALUE2, length(which(ThisCarCost == "None:FiveThou:True")), prob = c(0.2, 0.8, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:FiveThou:False"] = sample(VALUE2, length(which(ThisCarCost == "None:FiveThou:False")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:TenThou:True"] = sample(VALUE2, length(which(ThisCarCost == "None:TenThou:True")), prob = c(0.05, 0.95, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:TenThou:False"] = sample(VALUE2, length(which(ThisCarCost == "None:TenThou:False")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:TwentyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "None:TwentyThou:True")), prob = c(0.04, 0.01, 0.95, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:TwentyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "None:TwentyThou:False")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:FiftyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "None:FiftyThou:True")), prob = c(0.04, 0.01, 0.95, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:FiftyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "None:FiftyThou:False")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "None:Million:True"] = sample(VALUE2, length(which(ThisCarCost == "None:Million:True")), prob = c(0.04, 0.01, 0.2, 0.75), replace = TRUE)
ThisCarCost[ThisCarCost == "None:Million:False"] = sample(VALUE2, length(which(ThisCarCost == "None:Million:False")), prob = c(1, 0, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:FiveThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Mild:FiveThou:True")), prob = c(0.15, 0.85, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:FiveThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Mild:FiveThou:False")), prob = c(0.95, 0.05, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:TenThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Mild:TenThou:True")), prob = c(0.03, 0.97, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:TenThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Mild:TenThou:False")), prob = c(0.95, 0.05, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:TwentyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Mild:TwentyThou:True")), prob = c(0.03, 0.02, 0.95, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:TwentyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Mild:TwentyThou:False")), prob = c(0.99, 0.01, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:FiftyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Mild:FiftyThou:True")), prob = c(0.03, 0.02, 0.95, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:FiftyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Mild:FiftyThou:False")), prob = c(0.99, 0.01, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:Million:True"] = sample(VALUE2, length(which(ThisCarCost == "Mild:Million:True")), prob = c(0.02, 0.03, 0.25, 0.7), replace = TRUE)
ThisCarCost[ThisCarCost == "Mild:Million:False"] = sample(VALUE2, length(which(ThisCarCost == "Mild:Million:False")), prob = c(0.98, 0.01, 0.01, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:FiveThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:FiveThou:True")), prob = c(0.05, 0.95, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:FiveThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:FiveThou:False")), prob = c(0.25, 0.75, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:TenThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:TenThou:True")), prob = c(0.01, 0.99, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:TenThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:TenThou:False")), prob = c(0.15, 0.85, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:TwentyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:TwentyThou:True")), prob = c(0.001, 0.001, 0.998, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:TwentyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:TwentyThou:False")), prob = c(0.01, 0.01, 0.98, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:FiftyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:FiftyThou:True")), prob = c(0.001, 0.001, 0.998, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:FiftyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:FiftyThou:False")), prob = c(0.005, 0.005, 0.99, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:Million:True"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:Million:True")), prob = c(0.001, 0.001, 0.018, 0.98), replace = TRUE)
ThisCarCost[ThisCarCost == "Moderate:Million:False"] = sample(VALUE2, length(which(ThisCarCost == "Moderate:Million:False")), prob = c(0.003, 0.003, 0.044, 0.95), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:FiveThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Severe:FiveThou:True")), prob = c(0.03, 0.97, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:FiveThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Severe:FiveThou:False")), prob = c(0.05, 0.95, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:TenThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Severe:TenThou:True")), prob = c(0.000001, 0.999999, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:TenThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Severe:TenThou:False")), prob = c(0.01, 0.99, 0, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:TwentyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Severe:TwentyThou:True")), prob = c(0.000001, 0.000001, 0.999998, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:TwentyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Severe:TwentyThou:False")), prob = c(0.005, 0.005, 0.99, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:FiftyThou:True"] = sample(VALUE2, length(which(ThisCarCost == "Severe:FiftyThou:True")), prob = c(0.000001, 0.000001, 0.999998, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:FiftyThou:False"] = sample(VALUE2, length(which(ThisCarCost == "Severe:FiftyThou:False")), prob = c(0.001, 0.001, 0.998, 0), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:Million:True"] = sample(VALUE2, length(which(ThisCarCost == "Severe:Million:True")), prob = c(0.000001, 0.000001, 0.009998, 0.99), replace = TRUE)
ThisCarCost[ThisCarCost == "Severe:Million:False"] = sample(VALUE2, length(which(ThisCarCost == "Severe:Million:False")), prob = c(0.000001, 0.000001, 0.029998, 0.97), replace = TRUE)
PropCost = apply(cbind(OtherCarCost, ThisCarCost), 1, paste, collapse = ":")
PropCost[PropCost == "Thousand:Thousand"] = sample(VALUE2, length(which(PropCost == "Thousand:Thousand")), prob = c(0.7, 0.3, 0, 0), replace = TRUE)
PropCost[PropCost == "Thousand:TenThou"] = sample(VALUE2, length(which(PropCost == "Thousand:TenThou")), prob = c(0, 0.95, 0.05, 0), replace = TRUE)
PropCost[PropCost == "Thousand:HundredThou"] = sample(VALUE2, length(which(PropCost == "Thousand:HundredThou")), prob = c(0, 0, 0.98, 0.02), replace = TRUE)
PropCost[PropCost == "Thousand:Million"] = sample(VALUE2, length(which(PropCost == "Thousand:Million")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "TenThou:Thousand"] = sample(VALUE2, length(which(PropCost == "TenThou:Thousand")), prob = c(0, 0.95, 0.05, 0), replace = TRUE)
PropCost[PropCost == "TenThou:TenThou"] = sample(VALUE2, length(which(PropCost == "TenThou:TenThou")), prob = c(0, 0.6, 0.4, 0), replace = TRUE)
PropCost[PropCost == "TenThou:HundredThou"] = sample(VALUE2, length(which(PropCost == "TenThou:HundredThou")), prob = c(0, 0, 0.95, 0.05), replace = TRUE)
PropCost[PropCost == "TenThou:Million"] = sample(VALUE2, length(which(PropCost == "TenThou:Million")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "HundredThou:Thousand"] = sample(VALUE2, length(which(PropCost == "HundredThou:Thousand")), prob = c(0, 0, 0.98, 0.02), replace = TRUE)
PropCost[PropCost == "HundredThou:TenThou"] = sample(VALUE2, length(which(PropCost == "HundredThou:TenThou")), prob = c(0, 0, 0.8, 0.2), replace = TRUE)
PropCost[PropCost == "HundredThou:HundredThou"] = sample(VALUE2, length(which(PropCost == "HundredThou:HundredThou")), prob = c(0, 0, 0.6, 0.4), replace = TRUE)
PropCost[PropCost == "HundredThou:Million"] = sample(VALUE2, length(which(PropCost == "HundredThou:Million")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "Million:Thousand"] = sample(VALUE2, length(which(PropCost == "Million:Thousand")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "Million:TenThou"] = sample(VALUE2, length(which(PropCost == "Million:TenThou")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "Million:HundredThou"] = sample(VALUE2, length(which(PropCost == "Million:HundredThou")), prob = c(0, 0, 0, 1), replace = TRUE)
PropCost[PropCost == "Million:Million"] = sample(VALUE2, length(which(PropCost == "Million:Million")), prob = c(0, 0, 0, 1), replace = TRUE)
insurance = data.frame(
GoodStudent = factor(GoodStudent, levels = BOOL),
Age = factor(Age, levels = AGE),
SocioEcon = factor(SocioEcon, levels = SEC),
RiskAversion = factor(RiskAversion, levels = RISK),
VehicleYear = factor(VehicleYear, levels = YEAR),
ThisCarDam = factor(ThisCarDam, levels = ACC),
RuggedAuto = factor(RuggedAuto, levels = RUGGED),
Accident = factor(Accident, levels = ACC),
MakeModel = factor(MakeModel, levels = MODEL),
DrivQuality = factor(DrivQuality, levels = SKILL2),
Mileage = factor(Mileage, levels = MIL),
Antilock = factor(Antilock, levels = BOOL),
DrivingSkill = factor(DrivingSkill, levels = SKILL),
SeniorTrain = factor(SeniorTrain, levels = BOOL),
ThisCarCost = factor(ThisCarCost, levels = VALUE2),
Theft = factor(Theft, levels = BOOL),
CarValue = factor(CarValue, levels = VALUE),
HomeBase = factor(HomeBase, levels = HOME),
AntiTheft = factor(AntiTheft, levels = BOOL),
PropCost = factor(PropCost, levels = VALUE2),
OtherCarCost = factor(OtherCarCost, levels = VALUE2),
OtherCar = factor(OtherCar, levels = BOOL),
MedCost = factor(MedCost, levels = VALUE2),
Cushioning = factor(Cushioning, levels = SKILL3),
Airbag = factor(Airbag, levels = BOOL),
ILiCost = factor(ILiCost, levels = VALUE2),
DrivHist = factor(DrivHist, levels = HIST)
)
|
031d02e9ca70cb5c60b3ef3fb0f31e694a0c4e7e | 0d88818bc32ed3d26629b760dbcb88b2f58d437d | /rankhospital.R | 93a5e3cb049f24e32a3f36614819ee3abe03e8a5 | [] | no_license | ratiubogdan/ProgrammingAssignment3 | cd29ca351ff2f486494736e0ed0ed205a00f4c4d | 2954fd7d8f1b421f29f637c7d509467036d888f5 | refs/heads/master | 2016-09-06T04:37:58.543458 | 2015-05-27T18:23:39 | 2015-05-27T18:23:39 | 36,307,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,045 | r | rankhospital.R | rankhospital <- function(state, cond, num = "best") {
## Read outcome data
outcome <- read.csv("outcome-of-care-measures.csv", stringsAsFactors = F, na.strings = "Not Available")
## Check that state and outcome are valid
if (state %in% outcome[,7]) {
#nothing
} else {
stop("invalid state")
}
conditions <- c("heart attack", "heart failure", "pneumonia")
if (cond %in% conditions) {
#nothing
} else {
stop("invalid outcome")
}
## Return hospital name in that state with num 30-day death
## rate
if (cond == "heart attack") {
poz <- 11
} else if (cond == "heart failure") {
poz <- 17
} else {
poz <- 23
}
stateoutcome <- subset(outcome, outcome$State == state)
list <- order(stateoutcome[,poz], stateoutcome[,2], na.last = NA)
if (num == "best") {
print(stateoutcome[list[1],2])
} else if (num == "worst") {
print(stateoutcome[list[length(list)],2])
} else if (num > length(list)) {
print("NA")
} else {
print(stateoutcome[list[num],2])
}
} |
d0542a3733546a473611f998051682aea054a935 | bca565aa1ece61dbcd89f064c32739aa62ab6c8d | /validation.R | bc944faa60ab1639d70fb42f093a3f435eb3bc27 | [] | no_license | drewdahlquist/Intro-to-Statistical-Learning | b98fb40bfa0f86dd021930530a9c04e7234589bd | 0813103259e2ddac69a41b75d792a2444234f81f | refs/heads/master | 2022-11-24T06:06:55.292995 | 2020-07-31T04:26:17 | 2020-07-31T04:26:17 | 283,941,269 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,444 | r | validation.R | require(ISLR)
require(boot)
?cv.glm
plot(mpg~horsepower,data=Auto)
### Leave-one-out CV
glm.fit=glm(mpg~horsepower,data=Auto) #fit linear model
cv.glm(Auto,glm.fit)$delta #cross validation, no args for "K" so cv.glm does LOOCV
# Write optimized function for LOOCV, from ISL (5.2)
loocv=function(fit){
h=lm.influence(fit)$h #extract hat matrix diagonal into vector h
mean((residuals(fit)/(1-h))^2)
}
loocv(glm.fit) #test function
cv.error=rep(0,5) #vector for collecting errors
degree=1:5 #iterator for degree
for(d in degree){
glm.fit=glm(mpg~poly(horsepower,d),data=Auto) #fit model for given polynomial degree
cv.error[d]=loocv(glm.fit) #record error
}
plot(degree,cv.error,type="b") #plot errors
### 10-fold CV
cv.error10=rep(0,5)
for(d in degree){
glm.fit=glm(mpg~poly(horsepower,d),data=Auto)
cv.error10[d]=cv.glm(Auto,glm.fit,K=10)$delta[1] #record error
}
lines(degree,cv.error10,type="b",col="red")
### Bootstrap
alpha=function(x,y){ #function for obtaining alpha, vectors x & y
vx=var(x)
vy=var(y)
cxy=cov(x,y)
(vy-cxy)/(vx+vy-2*cxy)
}
alpha(Portfolio$X,Portfolio$Y)
# What is the standard error of alpha?
alpha.fn=function(data,index){
with(data[index,],alpha(X,Y))
}
alpha.fn(Portfolio,1:100)
set.seed(1) #bootstrap involves randomness
alpha.fn(Portfolio,sample(1:100,100,replace=TRUE)) #like a one-time bootstrap
boot.out=boot(Portfolio,alpha.fn,R=1000) #1,000 bootstraps
boot.out #summary
plot(boot.out) |
479215fd994aa153aef7973febf45cbb906d9266 | 9c22eb39be49a8107fabd4f3ff94715ee12e5862 | /man/harmonise_ids.Rd | 22858b70ea5560a97dfe075db63ea06e0bff11ad | [] | no_license | cran/Spectrum | 5026b50551668a38f847a8a508b46f6002b19158 | a4c7bb0de877d49224977d64314404a2a7cbd1b5 | refs/heads/master | 2020-04-21T18:13:47.086980 | 2020-02-10T07:40:02 | 2020-02-10T07:40:02 | 169,761,606 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 643 | rd | harmonise_ids.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core_functions.R
\name{harmonise_ids}
\alias{harmonise_ids}
\title{harmonise_ids: works on a list of similarity matrices to add entries of NA where
there are missing observations between views}
\usage{
harmonise_ids(l)
}
\arguments{
\item{l}{A list of similarity matrices: those to be harmonised.}
}
\value{
A list of harmonised similarity matrices.
}
\description{
Simply adds a column and row of NA with the missing ID for data imputation. The
similarity matrix requires row and column IDs present for this to work.
}
\examples{
h_test <- harmonise_ids(missl)
}
|
e80aa57cda326c562d0dcf7fe8269af60bc50b95 | f0b8e873e84e9216bacaf3587c3b8e585e1ddf9d | /Documents/ggplot2_p/stackbar.r | 165d3fca84965f03017cc48e76d5008b74a0951b | [] | no_license | Waylan49/Self_Learning_ggplot2 | f7ed55b34eb905852fae10bc28525da33a0c8d52 | daf2a0dd956a523a2a7020c6186ee0502bf63199 | refs/heads/master | 2023-02-27T17:39:06.777812 | 2021-02-07T23:45:34 | 2021-02-07T23:45:34 | 336,914,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 677 | r | stackbar.r | library(scales)
data<-data %>% group_by(year) %>% mutate(pos1=cumsum(percentage)-0.5*percentage)
ggplot(data)+theme_bw()+
geom_bar(aes(x=year, y=percentage, fill=product),
stat="identity",
position=position_stack(reverse=TRUE))+
theme(legend.position="bottom", legend.title = element_blank())+
geom_text(aes(x=year, y=pos1, label=paste(
percentage, "%", sep="")), size=3.2)+
ggtitle("Composition of Exports to China (%)")+
labs(x="Year", y="USD Million")+
scale_y_continuous(labels = dollar_format(prefix="", suffix="%"))+
theme(panel.border = element_blank(),axis.line=element_line(
color="black", size=0.8)) |
b75a4da8993275c85564aae368b97c4acc95eab4 | 3df087ccb93de55ddde9fc02a1805432a9a21343 | /man/MomSignal.Rd | d8b1c5cc67e25ad0afc974351ee2f70a13ff815d | [] | no_license | thismely/ExpectedReturns | efc587e92f60cc2bf966a6e477b68025ed5c7806 | ba2507aa1572b2a27d2f6639d45f98e5b2533ece | refs/heads/master | 2023-07-06T20:13:46.863687 | 2021-07-11T17:33:30 | 2021-07-11T17:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,618 | rd | MomSignal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MomSignal.R
\name{MomSignal}
\alias{MomSignal}
\title{Momentum Trading Signal}
\usage{
MomSignal(X, lookback, signal, cutoffs, speed = FALSE, ...)
}
\arguments{
\item{X}{A list of \code{xts} objects, storing assets data. See 'Details'.}
\item{lookback}{A numeric, indicating the lookback period in the same frequency of \code{X} series.}
\item{signal}{A character, specifying the momentum signal. One of \code{SIGN}, \code{MA}, \code{EEMD}, \code{TREND}, or \code{SMT}.}
\item{cutoffs}{A numeric vector, with positional cutoffs for \emph{Newey-West t-statitics} and \eqn{R^2}, see 'Details'.}
\item{speed}{A boolean, whether or not to compute the chosen momentum signal \emph{speed}.}
\item{...}{Any other pass through parameter.}
}
\value{
A list of \code{xts} objects, consisting of the chosen momentum \code{signal} for the
corresponding assets data \code{X} provided. Signals are \eqn{{-1, 0, 1}} for short,
inactive, and long positions, respectively. \code{TREND} and \code{SMT} are the only
signals that can result in inactive positions.
With \code{speed}, additionally the chosen \emph{momentum speed} for the given assets.
}
\description{
Function to generate several momentum trading signals. Signals currently implemented are:
\itemize{
\item \strong{Return Sign} (SIGN) of Moskowitz-Ooi-Pedersen (2012)
\item \strong{Moving Average} (MA)
\item \strong{Time-Trend t-statistic} (TREND)
\item \strong{Statistically Meaningful Trend} (SMT) of Bryhn-Dimberg (2011)
\item \strong{Ensamble Empirical Mode Decomposition} (EEMD) of Wu-Huang (2009)
}
All the signals are as defined in Baltas-Kosowski (2012).
Also, to each signal can be associated a so called \emph{momentum speed}, which is
an activity to turnover-ratio used to assess signals trading intensity.
Letting \eqn{X} the signal, its speed is defined as
\deqn{SPEED_{X} = \sqrt{\frac{E[X^2]}{E[(\Delta X)^2]}}}
The higher the speed, the larger the signal activity and thus the portfolio turnover.
}
\details{
Data strictly needed in \code{X} depends on the \code{signal} chosen. \code{SIGN} is based on
assets returns. \code{MA}, \code{EEMD}, \code{TREND}, and \code{SMT} are price-based momentum signals.
For the \code{TREND}, Newey-West t-statistics lower and upper \code{cutoffs} can be provided.
With \code{SMT}, \code{cutoffs} can additionally provide the lower \eqn{R^2} cut-off.
Defaults are set at \eqn{-2}, \eqn{2} for Newey-West t-statistics and a minimum
\eqn{R^2 = 0.65}.
\code{SMT} over sub-periods is not currently supported.
}
\references{
Baltas, A. N. and Kosowski, R. (2012). \emph{Improving time-series momentum strategies: The role of trading signals and volatility estimators}.
\href{https://risk.edhec.edu/publications/improving-time-series-momentum-strategies-role-trading-signals-and-volatility}{EDHEC-Risk Institute}.
Bryhn, A. C and Dimberg, P. H. (2011). \emph{An operational definition of a statistically meaningful trend}. PLoS One.
Luukko, P. JJ. and Helske, J. and Rasanen, E. (2016). \emph{Introducing libeemd: A program package for performing the ensemble empirical mode decomposition}. Computational Statistics.
Moskowitz, T. J. and Ooi, Y. H. and Pedersen, L. H. (2012). \emph{Time series momentum}. Journal of Financial Economics.
Wu, Z. and Huang, N. E. (2009). \emph{Ensemble empirical mode decomposition: a noise-assisted data analysis method}. Advances in Adaptive Data Analysis.
}
\seealso{
\code{\link[sandwich:NeweyWest]{sandwich::NeweyWest()}}, \code{\link[Rlibeemd:eemd]{Rlibeemd::eemd()}}
}
\author{
Vito Lestingi
}
|
6ee8d8984db762a9cf6f774ee6184f6fdd0e4e00 | 4b662cb87656d07ddfd372cbe4b97f6a60a65cf2 | /Project_2.R | f68bbaf959a8a10b2dc14197cfcdf7a06f722b82 | [] | no_license | kami2020/DARET_Project_2 | 5360639f0ce011931ec51136e8fcd0974d49de08 | 652200ea4d5786b5f38742f01af821c234845945 | refs/heads/master | 2020-04-13T20:47:11.970734 | 2018-12-28T18:40:08 | 2018-12-28T18:40:08 | 163,439,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,760 | r | Project_2.R | library(readxl)
Churn_1_ <- read_excel("D:/Abhyayanam/DARET/Projects/Project 2 - Churn/Churn (1).xls")
View(Churn_1_)
save.image("D:/Abhyayanam/DARET/Projects/Project 2 - Churn/Project_2.Rdata.RData")
pchurn <- Chuurn_1_
head(pchurn,6)
sum(is.na(pchurn))
str(pchurn)
table(pchurn$Churn)
pchurn$Churn <- as.factor(pchurn$Churn)
pchurn$State <- as.factor(pchurn$State)
pchurn$Phone <- as.factor(pchurn$Phone)
pchurn$`Area Code`<- as.factor(pchurn$`Area Code`)
pchurn$`VMail Plan`<- as.factor(pchurn$`VMail Plan`)
pchurn$`Intl Plan`<- as.factor(pchurn$`Intl Plan`)
table(pchurn$State)
table(pchurn$`Area Code`)
pchurn <- pchurn[,-c(21)]
colnames(pchurn)
str(pchurn)
library(CARET)
library(caret)
library(MASS)
set.seed(246)
smp_size <- floor(0.8 * nrow(pchurn))
train_ind <- sample(seq_len(nrow(pchurn)), size = smp_size)
churntrain <- pchurn[train_ind, ]
churntest <- pchurn[-train_ind, ]
colnames(churntrain)
colnames(churntest)
nrow(churntrain)
nrow(churntest)
nrow(pchurn)
table(pchurn$Churn)
table(churntrain$Churn)
table(churntest$Churn)
fit1 <- glm(Churn~.,data = churntrain, family = binomial(link = 'logit'),control = list (maxit=50))
summary(fit1)
step_fit1 <- stepAIC(fit1,method='backward')
summary(step_fit1)
anova(fit1,test='Chisq')
anova(step_fit1, test = 'Chisq')
with(fit1, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE))
with(step_fit1, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE))
plot(fit1$fitted.values)
library(car)
vif(fit1)
vif(step_fit1)
trainpred1 <- predict(fit1, newdata=churntrain, type = 'response')
trainpred_step1 <- predict(step_fit1, newdata=churntrain, type = 'response')
pred1 <- predict(fit1, newdata=churntest, type = 'response')
pred_step1 <- predict(step_fit1, newdata=churntest, type = 'response')
library(pROC)
tg1 <- roc(churntrain$Churn~trainpred1, data=churntrain)
tg2 <- roc(churntrain$Churn~trainpred_step1, data=churntrain)
g1 <- roc(churntest$Churn~pred1, data = churntest)
g2 <- roc(churntest$Churn~pred_step1, data = churntest)
plot(tg1)
plot(tg2)
plot(g1)
plot(g2)
tg1
tg2
g1
g2
library(caret)
library(InformationValue)
churntrain$Pred_Churn1 <- ifelse(trainpred_step1 <0.5,0,1)
confusionMatrix(churntrain$Churn, churntrain$Pred_Churn1)
confusionMatrix(churntrain$Churn, as.factor(churntrain$Pred_Churn1))
table(churntrain$Churn)
table(churntrain$Pred_Churn1)
churntrain$Pred_Churn1 <- ifelse(trainpred_step1 <0.45,0,1)
confusionMatrix(churntrain$Churn, as.factor(churntrain$Pred_Churn1))
table(churntrain$Pred_Churn1)
churntrain$Pred_Churn1 <- ifelse(trainpred_step1 <0.40,0,1)
confusionMatrix(churntrain$Churn, as.factor(churntrain$Pred_Churn1))
table(churntrain$Pred_Churn1)
churntrain$Pred_Churn1 <- ifelse(trainpred_step1 <0.35,0,1)
confusionMatrix(churntrain$Churn, as.factor(churntrain$Pred_Churn1))
table(churntrain$Pred_Churn1)
churntrain$Pred_Churn1 <- ifelse(trainpred_step1 <0.30,0,1)
confusionMatrix(churntrain$Churn, as.factor(churntrain$Pred_Churn1))
table(churntrain$Pred_Churn1)
churntest$Pred_Churn1 <- ifelse(pred_step1 <0.50,0,1)
confusionMatrix(churntest$Churn, as.factor(churntest$Pred_Churn1))
table(churntrain$Pred_Churn1)
table(churntest$Pred_Churn1)
table(churntest$Churn)
churntest$Pred_Churn1 <- ifelse(pred_step1 < 0.30,0,1)
table(churntest$Pred_Churn1)
confusionMatrix(churntest$Churn, as.factor(churntest$Pred_Churn1))
churntest$Pred_Churn1 <- ifelse(pred_step1 < 0.35,0,1)
confusionMatrix(churntest$Churn, as.factor(churntest$Pred_Churn1))
table(churntest$Pred_Churn1)
churntest$Pred_Churn1 <- ifelse(pred_step1 < 0.28,0,1)
table(churntest$Pred_Churn1)
confusionMatrix(churntest$Churn, as.factor(churntest$Pred_Churn1))
|
bb857ec4dcbd5f4aaad1789bfb289bf5ca2706e4 | 8d60d8ee03e9158ba660e2419f6ed45d1c053e8e | /exposure_result.R | 50ec08d6c57b9079f41496c4cd20df3748b07a54 | [] | no_license | kyungtakDS/KRM_inha | e73ae4f15fdbedc6445f6afab12a206ac78d3f7a | 92488e67b944321297dd3c2b749e6938cab4696e | refs/heads/master | 2021-04-24T13:43:57.263846 | 2020-04-11T12:59:06 | 2020-04-11T12:59:06 | 250,124,957 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 19,649 | r | exposure_result.R | #'---
#'title: "Exposure - 총주택,총인구,평균공시지가"
#'author: "Kyungtak Kim"
#'date: '2020 3 26 '
#'output: github_document
#'
#'
#'---
#+ library, warning=FALSE, message=FALSE
library(tidyverse)
library(sf)
library(tmap)
Sys.setenv(Language="En")
library(caret)
library(knitr)
library(leaflet)
library(rgdal)
library(htmltools)
#' # 원본 데이터 읽기 / 특성 분석
#'
DB <- read.csv('input/exposure_db.csv')
head(DB, 3)
#' ## 총주택수 자료 특성(_ex_str)
#' 연도별 확률밀도함수
#' 침수구역내의 주택수에 대한 분포를 보면 0-500채 사이가 가장 높다
#'
DB_s<- DB %>%
select(NameK, SGG, contains("str"))
DB_s_p <- DB_s %>% # pivoting
pivot_longer(c("X16_ex_str", "X17_ex_str"),
names_to = "year",
values_to = "house")
DB_s_p %>%
ggplot()+
geom_density(aes(x=house, y=..density.., color=year))
DB_s %>%
ggplot(aes(X16_ex_str))+
geom_histogram(bins=200)
#' outlier를 찾기 boxplot을 년도 별로 그려본다.
#' 최대값은 서울의 값이며, 큰 값들의 영향을 조금 줄이는 효과를 보기 위해
#' z-score 보다는 min-max scaling(보통 normalizaiton이라고 하고,
#' 경우에 따라 standardization이라고도 함)를 사용
#'
DB_s_p %>%
ggplot(aes(year, house))+
geom_boxplot()
#' 침수구역내 총건축물수
#'
#+ fig.width=12, fig.height=25
DB_s_p %>%
group_by(NameK) %>%
mutate(mean=mean(house))%>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=house))+
geom_boxplot()+
coord_flip()
#' 총건축물수가 적은 지역에 대한 분포 비교
#'
#+ fig.width=6, fig.height=6
DB_s_p %>%
group_by(NameK) %>%
mutate(mean=mean(house))%>%
filter(mean < 300) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=house))+
geom_boxplot()+
coord_flip()
#' 총건축물수가 많은 지역에 대한 분포 비교
#' 인천광역시의 경우 침수구역내 총주택수가 적다.
#' 제주특별자치도의 경우 침수구역내 총주택수가 많은 편에 속한다.(소하천때문??)
#' 인천광역시의 경우 총주택수 (16년 2065 , 17년 2373채, 차이 308채 )
#'
DB_s_p %>%
group_by(NameK) %>%
mutate(mean=mean(house))%>%
filter(mean > 10000) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=house))+
geom_boxplot()+
coord_flip()
#' 년도별 침수구역내 총주택수의 변화
#'
#' **총주택수가 2016년에 비해 2017년에 줄어든 것은 이지역의 재개발**
#' **로 이해 단독주택이 아파트로 바뀌어서 여러 객체가 하나의 객체로**
#' **인식된 것이 아닌지? check해볼 필요가 있다** #'
#'
DB_s %>%
mutate(dif=(X17_ex_str - X16_ex_str)) %>%
filter(NameK == "인천광역시")
DB_s_dif <- DB_s%>%
mutate(dif=(X17_ex_str - X16_ex_str)) %>%
arrange(-dif)
knitr::kable(DB_s_dif[1:10, ]) # 침수구역내 총주택수가 늘어난 시군
knitr::kable(DB_s_dif[152:161, ]) # 침수구역내 총주택수가 줄어든 시군
DB_s_p %>%
group_by(year) %>%
ggplot(aes(house, SGG))+
geom_point(aes(color=factor(SGG)))+
facet_grid(. ~year)+
theme(legend.position = "none")
#' ## 총인구수 자료 특성(_ex_pop)
#' 연도별 확률밀도함수
#' 침수구역내의 인구수에 대한 분포
#'
DB_p <- DB %>%
select(NameK, SGG, contains("pop"))
DB_p_p <- DB_p %>% # pivoting
pivot_longer(c("X16_ex_pop", "X17_ex_pop"),
names_to = "year",
values_to = "people")
DB_p_p %>%
ggplot()+
geom_density(aes(x=people, y=..density.., color=year))
DB_p %>%
ggplot(aes(X17_ex_pop))+
geom_histogram(bins=200)
#' 침수구역내 총인구수
#'
#+ fig.width=12, fig.height=25
DB_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(people))%>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=people))+
geom_boxplot()+
coord_flip()
#' 총인구수가 적은 지역에 대한 분포 비교
#'
#+ fig.width=6, fig.height=6
DB_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(people))%>%
filter(mean < 300) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=people))+
geom_boxplot()+
coord_flip()
#' 총인구수가 많은 지역에 대한 분포 비교
#'
DB_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(people))%>%
filter(mean > 100000) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=people))+
geom_boxplot()+
coord_flip()
#' 침수구역내 총인구수의 변화
#' 년도별 침수구역내 총인구수의 변화
#'
DB_p %>%
mutate(dif=(X17_ex_pop - X16_ex_pop)) %>%
filter(NameK == "서울특별시")
DB_p_dif <- DB_p%>%
mutate(dif=(X17_ex_pop - X16_ex_pop)) %>%
arrange(-dif)
knitr::kable(DB_p_dif[1:10, ]) # 침수구역내 총인구가 늘어난 시군
knitr::kable(DB_p_dif[152:161, ]) # 침수구역내 총인구가 줄어든 시군
DB_p_p %>%
group_by(year) %>%
ggplot(aes(people, SGG))+
geom_point(aes(color=factor(SGG)))+
facet_grid(. ~year)+
theme(legend.position = "none")
#' ## 평균공시지가 자료 특성(_ex_eco)
#' 연도별 확률밀도함수
#' 침수구역내의 평균공시지가에 대한 분포
#'
DB_e <- DB %>%
select(NameK, SGG, contains("eco"))
DB_e_p <- DB_e %>% # pivoting
pivot_longer(c("X16_ex_eco", "X17_ex_eco"),
names_to = "year",
values_to = "price")
DB_e_p %>%
ggplot()+
geom_density(aes(x=price, y=..density.., color=year))
DB_e %>%
ggplot(aes(X16_ex_eco))+
geom_histogram(bins=200)
#' 침수구역내 평균공시지가
#'
#+ fig.width=12, fig.height=25
DB_e_p %>%
group_by(NameK) %>%
mutate(mean=mean(price))%>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=price))+
geom_boxplot()+
coord_flip()
#' 평균공시지가 작은 지역에 대한 분포 비교
#'
#+ fig.width=6, fig.height=6
DB_e_p %>%
group_by(NameK) %>%
mutate(mean=mean(price))%>%
filter(mean < 10000) %>% # 만원
ggplot(aes(x=fct_reorder(NameK, mean),
y=price))+
geom_boxplot()+
coord_flip()
#' 평균공시지가가 큰 지역에 대한 분포 비교
#'
DB_e_p %>%
group_by(NameK) %>%
mutate(mean=mean(price))%>%
filter(mean > 500000) %>% # 50만원
ggplot(aes(x=fct_reorder(NameK, mean),
y=price))+
geom_boxplot()+
coord_flip()
#' 침수구역내 평균공시지가의 변화
#' 년도별 침수구역내 평균공시지가의 변화
#'
#' **check할것-서울, 광명이 타 지역에 비해 너무 크다.?**
#' **check할것-인천광역시의 공시지가가 떨어졌는지???**
#'
DB_e %>%
mutate(dif=(X17_ex_eco - X16_ex_eco)) %>%
filter(NameK == "서울특별시")
DB_e_dif <- DB_p%>%
mutate(dif=(X17_ex_pop - X16_ex_pop)) %>%
arrange(-dif)
knitr::kable(DB_e_dif[1:10, ]) # 침수구역내 평균공시지가가 늘어난 시군
knitr::kable(DB_e_dif[152:161, ]) # 침수구역내 평균공시지가가 줄어든 시군
DB_e_p %>%
group_by(year) %>%
ggplot(aes(price, SGG))+
geom_point(aes(color=factor(SGG)))+
facet_grid(. ~year)+
theme(legend.position = "none")
#' # Exposure 정규화(Normalization Function)함수 - log 정규화
#'
standard_log <- function(x){
return((log(x,base=10)-min(log(x,base=10)))/(max(log(x,base=10))-min(log(x,base=10))))
}
#' # 161개 시군별 변화 Mapping
#'
# 연도별 데이터 프레임에 정규화 적용
exposure <- as.data.frame(lapply(DB[,4:9],
standard_log))
exposure <- cbind(DB[,1:3],
exposure)
colnames(exposure)[4:9] <- c("X16_ex_str_log", "X16_ex_pop_log", "X16_ex_eco_log",
"X17_ex_str_log", "X17_ex_pop_log", "X17_ex_eco_log")
# 16년~17년 Exposure 지수 산정
ex_index_16 <- as.data.frame((rowSums(exposure[,4:6]))/3)
colnames(ex_index_16) <- c("X16_ex_index")
ex_index_17 <- as.data.frame((rowSums(exposure[,7:9]))/3)
colnames(ex_index_17) <- c("X17_ex_index")
exposure <- cbind(exposure, c(ex_index_16,ex_index_17))
#' # 최종 min-max 미포함 ----------------------------------------------------------
# 연도별 Exposure 지수 산정
result <- exposure[,10:11]
colnames(result) <- c("X16_exposure", "X17_exposure")
result <- cbind(DB[,1:3], result)
head(result,3)
summary(result)
#'
result_p_p <- result %>% # pivoting
pivot_longer(c("X16_exposure", "X17_exposure"),
names_to = "year",
values_to = "exposure")
result_p_p %>%
ggplot()+
geom_density(aes(x=exposure, y=..density.., color=year))
result %>%
ggplot(aes(X17_exposure))+
geom_histogram(bins=100)
#'
#+ fig.width=12, fig.height=25
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#'
#+ fig.width=6, fig.height=6
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
filter(mean < 0.25) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#'
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
filter(mean > 0.75) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#'
result %>%
mutate(dif=(X17_exposure - X16_exposure)) %>%
filter(NameK == "서울특별시")
result_p_dif <- result%>%
mutate(dif=(X17_exposure - X16_exposure)) %>%
arrange(-dif)
knitr::kable(result_p_dif[1:10, ])
knitr::kable(result_p_dif[152:161, ])
result_p_p %>%
group_by(year) %>%
ggplot(aes(exposure, SGG))+
geom_point(aes(color=factor(SGG)))+
facet_grid(. ~year)+
theme(legend.position = "none")
#' # Mapping
#'
# 시군 shp 파일 불러오기
library(sf)
analysis <- st_read("input/analysis.shp")
# 폴리곤 에러 체크(기본 파일을 에러 수정한 파일로 변경하였음)
#st_is_valid(analysis)
#library(lwgeom)
#analysis <- st_make_valid(analysis)
st_is_valid(analysis)
# shp파일에 연도별 Exposure 지수(표준화 적용) 추가
library(dplyr)
analysis <- right_join(analysis, result[,3:5])
# 폴리곤 단순화
library(tmap)
analysis_simp <- st_simplify(analysis, dTolerance = 50)
#+ fig.width=12, fig.height=6
# 결과 확인
tmap_mode("plot")
breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1)
facets=c("X16_exposure", "X17_exposure")
tm_shape(analysis_simp)+
tm_polygons(facets,
breaks=breaks,
palette = c("green", "greenyellow", "yellow", "orange", "red"),
legend.reverse = TRUE)+
tm_facets(ncol = 2)+
tm_layout(legend.position = c("right", "bottom"))+
tm_compass(type = "rose",
position = c("right", "top"),
size = 2.5)+
tm_scale_bar(breaks = c(0, 25, 50, 100, 150, 200),
position = c("left", "bottom"))
######################
#library(leaflet)
#library(rgdal)
#library(htmltools)
#+ fig.width=8, fig.height=6
a <- st_transform(analysis_simp, 4326)
pal <- colorBin(
palette=c("green", "greenyellow", "yellow", "orange", "red"),
domain=NULL,
bins = c(0, .2, .4, .6, 0.8, 1),
pretty = FALSE)
leaflet(a) %>%
setView(lng = 128, lat = 35.9, zoom = 7) %>%
# base groups
addPolygons(color = ~pal(X16_exposure),
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.5,
label = ~htmlEscape(NameK),
popup = ~htmlEscape(X16_exposure),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
group="Exposure 2016") %>%
addPolygons(color = ~pal(X17_exposure),
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.5,
label = ~htmlEscape(NameK),
popup = ~htmlEscape(X17_exposure),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
group="Exposure 2017") %>%
#overlay groups
addProviderTiles(providers$Esri.WorldStreetMap,
group="Esri") %>%
addProviderTiles(providers$CartoDB.Positron,
group="CartoDB") %>%
addLegend("bottomright",
pal = pal,
values = ~X17_exposure,
title = "Exposure Index",
labFormat = labelFormat(digits=10),
opacity = 1) %>%
hideGroup("CartoDB") %>%
#Layer controls
addLayersControl(baseGroups = c("Exposure 2016", "Exposure 2017"),
overlayGroups = c("Esri", "CartoDB"),
options=layersControlOptions(collapsed=FALSE))
#' # 결과값 저장
#'
write.csv(result, 'output/exposure_result1.csv', row.names = F)
#' # 최종 min-max 포함 -----------------------------------------------------------
#' ## 년도별 Expsoure 지수를 다시 min-max scaling 적용
#' Exposure 지수 표준화 함수 설정
#'
standard <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
# 연도별 Exposure 지수 표준화 산정
result <- as.data.frame(lapply(exposure[,10:11],
standard))
colnames(result) <- c("X16_exposure", "X17_exposure")
result <- cbind(DB[,1:3],
result)
head(result,3)
#' ## 표준화된 Exposure 지수의 특성 분석
#' 연도별 확률밀도함수
#' ** 표준화후에 정규분포에 가깝게 변동을 확인함**
#'
result_p_p <- result %>% # pivoting
pivot_longer(c("X16_exposure", "X17_exposure"),
names_to = "year",
values_to = "exposure")
result_p_p %>%
ggplot()+
geom_density(aes(x=exposure, y=..density.., color=year))
result %>%
ggplot(aes(X17_exposure))+
geom_histogram(bins=100)
#'
#+ fig.width=12, fig.height=25
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#' **연천군의 exposure값이 0 인것은?**
#'
#+ fig.width=6, fig.height=6
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
filter(mean < 0.25) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#'
result_p_p %>%
group_by(NameK) %>%
mutate(mean=mean(exposure))%>%
filter(mean > 0.75) %>%
ggplot(aes(x=fct_reorder(NameK, mean),
y=exposure))+
geom_boxplot()+
coord_flip()
#'
result %>%
mutate(dif=(X17_exposure - X16_exposure)) %>%
filter(NameK == "서울특별시")
result_p_dif <- result%>%
mutate(dif=(X17_exposure - X16_exposure)) %>%
arrange(-dif)
knitr::kable(result_p_dif[1:10, ]) # 침수구역내 총인구가 늘어난 시군
knitr::kable(result_p_dif[152:161, ]) # 침수구역내 총인구가 줄어든 시군
result_p_p %>%
group_by(year) %>%
ggplot(aes(exposure, SGG))+
geom_point(aes(color=factor(SGG)))+
facet_grid(. ~year)+
theme(legend.position = "none")
#' # Mapping
#'
# 시군 shp 파일 불러오기
library(sf)
analysis <- st_read("input/analysis.shp")
# 폴리곤 에러 체크(기본 파일을 에러 수정한 파일로 변경하였음)
#st_is_valid(analysis)
#library(lwgeom)
#analysis <- st_make_valid(analysis)
st_is_valid(analysis)
# shp파일에 연도별 Exposure 지수(표준화 적용) 추가
library(dplyr)
analysis <- right_join(analysis, result[,3:5])
# 폴리곤 단순화
library(tmap)
analysis_simp <- st_simplify(analysis, dTolerance = 50)
#+ fig.width=12, fig.height=6
# 결과 확인
tmap_mode("plot")
breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1)
facets=c("X16_exposure", "X17_exposure")
tm_shape(analysis_simp)+
tm_polygons(facets,
breaks=breaks,
palette = c("green", "greenyellow", "yellow", "orange", "red"),
legend.reverse = TRUE)+
tm_facets(ncol = 2)+
tm_layout(legend.position = c("right", "bottom"))+
tm_compass(type = "rose",
position = c("right", "top"),
size = 2.5)+
tm_scale_bar(breaks = c(0, 25, 50, 100, 150, 200),
position = c("left", "bottom"))
######################
#library(leaflet)
#library(rgdal)
#library(htmltools)
#+ fig.width=8, fig.height=6
a <- st_transform(analysis_simp, 4326)
pal <- colorBin(
palette=c("green", "greenyellow", "yellow", "orange", "red"),
domain=NULL,
bins = c(0, .2, .4, .6, 0.8, 1),
pretty = FALSE)
leaflet(a) %>%
setView(lng = 128, lat = 35.9, zoom = 7) %>%
# base groups
addPolygons(color = ~pal(X16_exposure),
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.5,
label = ~htmlEscape(NameK),
popup = ~htmlEscape(X16_exposure),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
group="Exposure 2016") %>%
addPolygons(color = ~pal(X17_exposure),
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.5,
label = ~htmlEscape(NameK),
popup = ~htmlEscape(X17_exposure),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
group="Exposure 2017") %>%
#overlay groups
addProviderTiles(providers$Esri.WorldStreetMap,
group="Esri") %>%
addProviderTiles(providers$CartoDB.Positron,
group="CartoDB") %>%
addLegend("bottomright",
pal = pal,
values = ~X17_exposure,
title = "Exposure Index",
labFormat = labelFormat(digits=10),
opacity = 1) %>%
hideGroup("CartoDB") %>%
#Layer controls
addLayersControl(baseGroups = c("Exposure 2016", "Exposure 2017"),
overlayGroups = c("Esri", "CartoDB"),
options=layersControlOptions(collapsed=FALSE))
#############################
#' # 결과값 저장
#'
write.csv(result, 'output/exposure_result.csv', row.names = F)
# 열 명칭별 의미
# Name : 161개 시군별 영문명
# NameK : 161개 시군별 한글명
# SGG : 시군구 코드
# X16_ex_str : 16년도 총 건축물수(개)
# X17_ex_str : 17년도 총 건축물수(개)
# X16_ex_pop : 16년도 총 인구수(명)
# X17_ex_pop : 17년도 총 인구수(명)
# X16_ex_eco : 16년도 평균공시지가(원/m2)
# X17_ex_eco : 17년도 평균공시지가(원/m2)
# X16_ex_str_log : 16년도 총 건축물수(log 표준화 적용)
# X17_ex_str_log : 17년도 총 건축물수(log 표준화 적용)
# X16_ex_pop_log : 16년도 총 인구수(log 표준화 적용)
# X17_ex_pop_log : 17년도 총 인구수(log 표준화 적용)
# X16_ex_eco_log : 16년도 평균공시지가(log 표준화 적용)
# X17_ex_eco_log : 17년도 평균공시지가(log 표준화 적용)
# X16_ex_index : 16년도 Exposure 지수
# X17_ex_index : 17년도 Exposure 지수
# X16_exposure : 16년도 Exposure 지수(표준화 적용)
# X17_exposure : 17년도 Exposure 지수(표준화 적용) |
3d5492c7a8aa80743754677941b7b424cad2ab6f | 7b64c0207a7ecdadbd2a85d213f7277945f6233c | /man/stf_download_information.Rd | 4b77025b92d25824e68372af492e2f5ed51c61b7 | [
"MIT"
] | permissive | jjesusfilho/stf | eca71363b8be224fe9b016dd277a852aab31b8ca | 832f6c2369a95e91defd49f159704a3e7b3628f9 | refs/heads/master | 2023-06-07T07:27:58.415425 | 2023-05-29T14:45:02 | 2023-05-29T14:45:02 | 162,435,941 | 12 | 7 | null | null | null | null | UTF-8 | R | false | true | 467 | rd | stf_download_information.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stf_download_information.R
\name{stf_download_information}
\alias{stf_download_information}
\title{Download html with information based on incidente}
\usage{
stf_download_information(incidente, dir = ".")
}
\arguments{
\item{incidente}{Incidente number}
\item{dir}{Directory where to download the htmls}
}
\value{
htmls
}
\description{
Download html with information based on incidente
}
|
7cde83c1fd03a248a922efc07941b4c1f7c750de | b374514b2333c49c2b1a6b6f86360f6c12c52fc4 | /R_Coursera_Programming/week_2/pollutantmean.R | 79c047ee101f748040d09e7b0738019519731a9a | [] | no_license | jfnavarro21/datasciencecoursera | 13783db94beff413598d6b0c4aebd3685a48f94e | e57ce99e3b09eb6eee289875fef32ff18aa982d3 | refs/heads/master | 2021-01-10T09:08:57.647207 | 2016-03-23T02:46:50 | 2016-03-23T02:46:50 | 49,966,751 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 455 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id = 1:332) {
files <- list.files(directory, pattern=".csv", full.names=TRUE)
values <- numeric()
#Read pollutant data values and append
for (i in id) {
data <- read.csv(files[i], header=TRUE)
values <- c(values, data[[pollutant]])
}
#Compute mean for all available data and format print
mean <- mean(values, na.rm=TRUE)
print(formatC(mean, digits=3, format="f"))
print(mean)
} |
89f564e6f6dbebfd347503a6a427fba1945b0007 | 8a736317e9732b939803d041f2448c125ff49e5f | /tests/testthat/test-examples.R | 432e3cedd1f3261b766a97e468c3fac8b97e6049 | [] | no_license | mbojan/isnar | f753c9d6a6c2623e7725c2f03035c1f9cc89ba85 | 56177701d509b267eff845e15514e4cf48e69675 | refs/heads/master | 2021-06-10T15:32:48.345605 | 2021-02-17T20:00:40 | 2021-02-17T20:00:40 | 19,937,772 | 8 | 3 | null | 2015-04-03T12:41:31 | 2014-05-19T10:28:23 | R | UTF-8 | R | false | false | 64 | r | test-examples.R | context("Testing examples")
pdf(NULL)
test_examples()
dev.off() |
898dac7c0b6be81456959ab2a9a9d2a13efe6626 | c353229e39ed2709bcc73fc918269ed88a2588ce | /man/af.Rd | 405d9d063b79bcd46cc33a8a81c5992ed15bb131 | [] | no_license | amytildazhang/annoplots | 282236f21fd848fce2e643a147c52201de970f55 | 865301c0d8a35e85dac6a33f347c0f29f0bce2ba | refs/heads/main | 2023-08-14T20:22:35.773416 | 2021-09-13T23:27:09 | 2021-09-13T23:27:09 | 406,158,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 305 | rd | af.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annofuns.R
\name{af}
\alias{af}
\title{Internal constructor for annotation functions}
\usage{
af(call, env = parent.frame())
}
\arguments{
\item{env}{}
}
\value{
}
\description{
Internal constructor for annotation functions
}
|
4728f16f90d09997a3b9f185e688d52a14ad7534 | e0296e1c892508c0a116300b2c0d8bb0a01c7677 | /MedMast/R/my.forest.R | b2ee1d9a58107ce8edbf481dac0c39761dfc998b | [] | no_license | gullo-p/Kaggle_Competition | b172c77cd3cde86469fd55bf4ab089ef092d07a5 | 87dabf51efec17319c0e226fd6291a29656d1dff | refs/heads/master | 2021-01-10T11:03:00.167714 | 2016-03-18T01:15:41 | 2016-03-18T01:15:41 | 50,145,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | my.forest.R | # ----------------------------------------------------------------------------------------------------
# Random Forest
# ----------------------------------------------------------------------------------------------------
#' @title Random forest with ranger
#'
#' @description This function uses 'ranger' package to perform a random forest
#' (needed for the rolling windows function).
#'
#' @param x A dataframe containing the features for training.
#' @param y The labels from the training dataset.
#' @param xtest The test dataset for which we want to predict the labels.
#' @param ntree An integer, the number of trees to use.
#' @param mtry An integer, the depth of each tree.
#' @param seed The initial seed, by default is 1234.
#' @return The predicted labels for the test set.
#' @export
#' @import ranger
my.forest <- function(x , y , xtest, ntree = 300, mtry = 4, seed = 1234){
y <- factor(y)
data <- cbind(y = y,x)
rf <- ranger(formula = y ~ . , data = data, num.trees = ntree,
mtry = mtry ,seed = seed, write.forest = TRUE)
pred <- predict(rf , xtest, seed = seed, verbose = FALSE)$predictions
return(pred)
} |
ee5fd5ab4acea9cef267d04fd796e5e505f03c8a | 8c986ec5ac5812df5e3b132cd4519d1b6b13e102 | /labs/Lab11/code/lab11.R | b08135622c0d2925231863f4ecb9cec66cd26cd5 | [] | no_license | Sudarshan-UCB/stat-133-fall2017 | 0c7f343c6f91e8f2205efad17f14d369cf1d66f4 | 393caa3eecb8d02beee1598c4f24595dc23cb48e | refs/heads/master | 2021-01-20T05:20:06.498404 | 2017-11-19T02:41:52 | 2017-11-19T02:41:52 | 101,432,831 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,731 | r | lab11.R | #' ---
#' title: "Lab11"
#' author: Sudarshan Srirangapatanam
#' ---
#'
#' ## Setup
#+ setup
library(knitr)
library(dplyr)
library(ggplot2)
library(readr)
library(shiny)
library(stringr)
library(RgoogleMaps)
library(ggmap)
knitr::opts_chunk$set(fig.path = "../images/")
#' + `knitr` is used for knitting the document as well as other fucntions to cleanup the output
#' + `readr` is used for reading data into R
#' + `dplyr` is used for data wrangling
#' + `ggplot2` is used to generate any necessary plots
#' + `shiny` is used for shiny apps
dat <- read.csv('../data/mobile-food-sf.csv', stringsAsFactors = FALSE)
#' ## Changing Times
#' ### str_sub()
time1 <- '10AM'
str_sub(time1, start = 1, end = 2)
str_sub(time1, start = 3, end = 4)
times <- c('12PM', '10AM', '9AM', '8AM')
# subset time
str_sub(times, 1, nchar(times) - 2)
# subset period
str_sub(times, nchar(times) - 1, nchar(times))
#' ### str_replace()
hours <- as.numeric(str_replace(times, pattern = 'AM|PM', replacement = ''))
periods <- str_sub(times, start = -2)
to24 <- function(x) {
period <- str_sub(x, start = -2)
hour <- as.numeric(str_replace(x, pattern = 'AM|PM', replacement = ''))
result <- NULL
for (i in 1:length(x)) {
if (period[i] == "PM" & hour[i] != 12) {
result[i] <- hour[i] + 12
} else if ((period[i] == "AM" & hour[i] != 12) | (period[i] == "PM" & hour[i] == 12)) {
result[i] <- hour[i]
} else {
result[i] <- 0
}
}
return(result)
}
start24 <- to24(times)
start24
str(dat)
dat <- dat %>% mutate(start = to24(starttime), end = to24(endtime), duration = end - start)
str(dat)
#' ## Latitude and Longitude Coordinates
loc1 <- "(37.7651967350509,-122.416451692902)"
lat_lon <- str_replace_all(loc1, pattern = '\\(|\\)', replacement = '')
str_split(lat_lon, pattern = ',')
#' ### Manipulating more location values
locs <- c(
"(37.7651967350509,-122.416451692902)",
"(37.7907890558203,-122.402273431333)",
"(37.7111991003088,-122.394693339395)",
"(37.7773000262759,-122.394812784799)",
NA
)
lat_lon <- {
x <- str_replace_all(locs, pattern = '\\(|\\)', replacement = '')
str_split(x, pattern = ',', simplify = TRUE)
}
lat <- lat_lon[,1]
lon <- lat_lon[,2]
latitude <- as.numeric(lat)
longitude <- as.numeric(lon)
get_lat <- function(x) {
y <- str_replace_all(x, pattern = '\\(|\\)', replacement = '')
z <- str_split(y, pattern = ',', simplify = TRUE)
return(as.numeric(z[,1]))
}
get_long <- function(x) {
y <- str_replace_all(x, pattern = '\\(|\\)', replacement = '')
z <- str_split(y, pattern = ',', simplify = TRUE)
return(as.numeric(z[,2]))
}
str(dat)
dat <- dat %>% mutate(lat = get_lat(Location), lon = get_long(Location))
str(dat)
#' ## Plotting locations on a map
plot(dat$lon, dat$lat, pch = 19, col = "#77777744")
center <- c(mean(dat$lat, na.rm = TRUE), mean(dat$lon, na.rm = TRUE))
zoom <- min(MaxZoom(range(dat$lat, na.rm = TRUE),
range(dat$lon, na.rm = TRUE)))
map1 <- GetMap(center=center, zoom=zoom, destfile = "../images/san-francisco.png")
PlotOnStaticMap(map1, dat$lat, dat$lon, col = "#ed4964", pch=20)
#' ## ggmap
dat <- na.omit(dat)
sbbox <- make_bbox(lon = dat$lon, lat = dat$lat, f = .1)
sbbox
sf_map <- get_map(location = sbbox, maptype = "terrain", source = "google")
ggmap(sf_map) +
geom_point(data = dat,
mapping = aes(x = lon, y = lat),
color = "red", alpha = 0.2, size = 1)
#' ## Let's look for specific types of food
dat$optionaltext[1:3]
foods <- dat$optionaltext[1:10]
burros <- str_detect(foods, "B|burritos")
burros
foods <- dat$optionaltext
burros <- str_detect(foods, "B|burritos")
tacos <- str_detect(foods, "T|tacos")
quesadillas <- str_detect(foods, "Q|quesadillas")
burritos <- dat[burros,]
lon <- burritos$lon
lat <- burritos$lat
ggmap(sf_map) +
geom_point(burritos, mapping = aes(x = lon, y = lat), col = "blue", alpha = 0.2, size = 1)
#' ## Practice more Regex patterns
animals <- c('dog', 'cat', 'bird', 'dolphin', 'lion',
'zebra', 'tiger', 'wolf', 'whale', 'eagle',
'pig', 'osprey', 'kangaroo', 'koala')
grep('dog', animals)
grep('dog', animals, value = TRUE)
str_detect(animals, 'dog')
str_extract(animals, 'dog')
animals[str_detect(animals, 'dog')]
#' **Your Turn**
display <- function(x) {
animals[str_detect(animals, x)]
}
display("o*")
display("o{0,1}")
display("o{1,}")
display("o{2}")
display("o{1}(?!o|\\b)")
display("[aeiou][aeiou]")
display("[^aeiou]{2}")
display("[^aeiou]{3}")
display("^[a-z]{3}$")
display("^[a-z]{4}$")
#' **File Names**
files <- c('sales1.csv', 'orders.csv', 'sales2.csv',
'sales3.csv', 'europe.csv', 'usa.csv', 'mex.csv',
'CA.csv', 'FL.csv', 'NY.csv', 'TX.csv',
'sales-europe.csv', 'sales-usa.csv', 'sales-mex.csv')
display <- function(x, invert = FALSE) {
if (invert) {
result <- files[!str_detect(files, x)]
} else {
result <- files[str_detect(files, x)]
}
return(result)
}
display("\\d")
display("\\D")
display("^(?![A-Z])")
display("[A-Z]")
display("-")
display("[\\-]", TRUE)
str_replace_all(files, "\\.csv", "\\.txt")
str_split(files, "\\.", simplify = TRUE)[,1]
#' String handling functions
split_chars <- function(x) {
str_split(x, "")[[1]]
}
split_chars('Go Bears!')
split_chars('Expecto Patronum')
reverse_chars <- function(x) {
split <- split_chars(x)
rev <- NULL
for (i in 1:length(split)) {
rev[i] <- split[length(split) - i + 1]
}
return(paste0(rev, collapse = ""))
}
reverse_chars("gattaca")
reverse_chars("Lumox Maxima")
#/*
rmarkdown::render("./lab11.R", output_format = "github_document", output_options = list(toc = TRUE))
file.rename("./lab11.Rmd", "../report/lab11.Rmd")
file.rename("./lab11.md", "../report/lab11.md")
file.rename("./lab11.html", "../html/lab11.html")
#*/
|
8aec7c6e3bed3c0739c848d613b355ff69aabc32 | a9d6df331c57214171279e1d6c23a22b1ab21414 | /R/rtPCRlist.R | af25bc3d19d6cac7933b83b1a71e3f670fa46aad | [] | no_license | pauloref/rtPCR | 53cfb5b5a79fe9e5fe623c602dfb265f88ba0bbe | 73d4623487f65e047e7c35c251840ca93f0677e7 | refs/heads/master | 2020-04-26T04:14:53.217600 | 2019-03-06T11:20:47 | 2019-03-06T11:20:47 | 173,295,364 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,903 | r | rtPCRlist.R |
#The current file describes the rtPCRlist class type
#It is a class that contains a list of rtPCR that can be groupped togethere
#The file contains: The class deffinition, the initializer, a constructor to be used by the user, and
#a function to access the elements inside
#We declare the class rtPCRlist
#' @import methods
#' @import ggplot2 reshape2 functional plyr tools
#' @export
setClass("rtPCRlist",representation(
List="list",
groups="data.frame",
N="numeric"
))
#A simple initializer that will be used by other functions but should never be called upon
#by users
setMethod("initialize",signature="rtPCRlist",definition=function(.Object,...,List=list(),groups=data.frame()){
.Object@List=List
.Object@N=length(List)
.Object@groups=groups
return(.Object)
})
#A simple validity check
setValidity("rtPCRlist",function(object){
if(length(object@groups)!=length(object@List)){stop("The group list is of different size as the list of rtPCR")
}else if(!all(lapply(object@List,FUN=function(x)(class(x)[1])) =="rtPCR" )){
stop("All the elements of the List are not of type rtPCR")
}else{
return(TRUE)
}
})
#The user-friendly constructor to be used. It takes as input a data-frame containing a series of rtPCR
#Signal. The signal are arranged on one of the dimmentions. If they are the rows, then dimm=1. If they are
#on the columns then dimm=2. The default is the collumn
#'@export
rtPCRlist<-function(Data=data.frame(),Groups=data.frame(),dimm=2){
if(is.data.frame(Data)){
names<-colnames(Data)
if(length(Groups)==0){groups<-data.frame(well=names,group=1:length(names),row.names=names)
}else{
groups<-Groups
colnames(groups)<-c("well","group")
rownames(groups)<-names
}
List<-apply(Data,MARGIN=dimm,function(x){rtPCR(x)})
}else if(is.list(Data)){
List<-Data
names<-names(Data)
if(length(Groups)==0){groups<-data.frame(well=names,group=as.factor(1:length(names)),row.names=names)
}else{
groups<-Groups
colnames(groups)<-c("well","group")
rownames(groups)<-names
groups$group<-as.factor(groups$group)
}
}
return(new(Class="rtPCRlist",List=List, groups=groups))
}
#we now define a class action acessor, that can access the different elements of the list
#'@export
setMethod(f="[",signature="rtPCRlist",definition=function(x,i,j = 0,...,drop){
#if the fisrt argument has length 1, then we do one of the following:
if(length(i) ==0 ){return(NULL)}
if(missing(j)){j<-0}
if(length(i)==1){
if(i=="Names"| i=="Wells"){
return(names(x@List))
}else if(i=="Groups" | i=="groups"){
return(x@groups)
}else if(i=="GroupList" | i=="GL"){
return(as.character(unique(x@groups$group)))
}else if(i %in% x@groups$group){
selected<-x@groups$well[which(x@groups$group == i)]
return(new("rtPCRlist",List=x@List[selected],
groups=data.frame(well=selected,group=rep(i,length(selected)),row.names=selected)))
}else if( all(i %in% x@groups$well) ){
#return( new("rtPCRlist",Data=x@List[i],groups=x@groups[i,] ) )
return(x@List[[i]])
}else{
if(j==0|is.null(j)){
return((sapply(x@List,simplify=TRUE,function(a){a[i]})))
}else if(j %in% x@groups$group){
return(as.numeric(sapply(x@List,simplify=TRUE,function(a){a[i]}))[which(x@groups$group==j)])
}else{
return(as.numeric(sapply(x@List,simplify=TRUE,function(a){a[i]})[j]))
}
}
}else if(length(i)>1){
return(new("rtPCRlist",
List=sapply(i,FUN=function(a){x[a]},simplify=FALSE, USE.NAMES=TRUE),
#groups=x@groups[which(x@groups$well %in% i),]))
groups=x@groups[ (which(x@groups$well %in% i | x@groups$group %in% i )),]))
}else{stop("Can't recognise the type of input")}
})
#source('~/Applications/R_scripts/rtPCR/load_functions.R', echo=TRUE)
|
a7c6b9c1771225da8860b6adf530c05c023e3d1a | c5c6e96ffdc9a52cdf7eba542f64fc149a9d7f4b | /R/explore_diagnostics.R | a96b9b99153fc3961ac3bd5565823fedfd6fd4a1 | [
"MIT"
] | permissive | kravitz-eli/prssMixture | e09ac2dc6d3174a445319f5ca8930b9f56039fad | 5b50cc7fb74d9d78a0fbe443a603fa96273e53d7 | refs/heads/master | 2020-12-05T16:16:14.879353 | 2020-02-07T15:05:46 | 2020-02-07T15:05:46 | 232,169,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,009 | r | explore_diagnostics.R | # See posterior of mixing proportion
jags_results_prior_p_mix %>%
spread_draws(ind) %>%
summarise(mean(ind), median(ind), min(ind), max(ind))
post_weight = jags_results_prior_p_mix %>%
spread_draws(weight, ind) %>%
pull(weight)
ggplot(data.frame(post_weight), aes(x = post_weight)) +
geom_histogram(aes(y =..density..), colour="black", fill="white")+
geom_density(alpha = 0.40, fill="red") +
theme_minimal()
prior_var = c(1e-1, 1, 1e2, 1e3, 1e4, 1e5, 1e6)
jags_results = vector("list", length(prior_var))
data[["p_mix"]] = NULL
for (i in seq_along(prior_var)) {
data[c("P2.sigma_1", "P2.sigma_2")] = prior_var[[i]]
jags_results[[i]] <- run_jags(
model_file = "normal_mixture_prior_p",
data = data,
track_variable_names = c("theta", "ind", "weight"),
iter = 1e4,
burn = 1e3,
chains = 2,
progress.bar = "text"
)
}
# Get individual posterior distributions ------------------------------------
weight_post = jags_results %>%
map(spread_draws, weight, ind) %>%
map(pull, weight)
ind_post = jags_results %>%
map(spread_draws, weight, ind) %>%
map(pull, ind)
theta_post = jags_results %>%
map(as.matrix) %>%
map(~.x[, c("theta[1]", "theta[2]")])
theta_1_post = theta_post %>%
map(~.x[, "theta[1]"])
theta_2_post = theta_post %>%
map(~.x[, "theta[2]"])
# Get posterior means and medians --------------------------------
weights_post_means = map_dbl(weight_post, mean)
ind_post_mean = map_dbl(ind_post, mean)
theta1_post_mean = map_dbl(theta_1_post, mean)
theta2_post_mean = map_dbl(theta_2_post, mean)
par(mfrow = c(1,2))
plot(
x = log(prior_var, 10),
y = weights_post_means,
type = "b",
ylab = "Posterior Mean",
xlab = "log10 Prior Variance",
main = "Posterior Mixing Success Prop",
lwd = 3
)
plot(
x = log(prior_var, 10),
y = ind_post_mean,
type = "b",
ylab = "Posterior Mean",
xlab = "log10 Prior Variance",
main = "Posterior Mixing Success Prop",
lwd = 3
)
library(coda)
traceplot(jags_results[[5]])
|
cd056507e19ffa7fbf5915f717b08d075bb1c99c | 64d76673a801f3ae5eb7ccc2b666ff3c220f990d | /Assignment_11.2.R | aafe229c9be524ac91576a2510256aefc14eaf75 | [] | no_license | Tejassingh1010/Assignment-11.2 | b07b7971bee4bed0e2cc04ebcae9600b5e05aeb9 | 2c155f85028e0a77cda397dad7dbbe58fd38ab2b | refs/heads/master | 2020-05-24T16:06:26.215199 | 2019-05-19T15:15:30 | 2019-05-19T15:15:30 | 187,348,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,331 | r | Assignment_11.2.R |
str(bank.additional.full)
bank<-bank.additional.full
#Ques.1.a. Is there any association between Job and default?
bank$default<-as.numeric(bank$default)
bank$job<-as.numeric(bank$job)
cor.test(bank$job,bank$default)
cor(bank$job,bank$default)
#Ques.1.b.Is there any significant difference in duration of last call between people having housing loan
#or not?
avona<-aov(bank$duration~bank$loan)
summary(avona)
TukeyHSD(avona)
#(all the housing loan is significantly different from the duratiom of call)
#Ques.1.c. Is there any association between consumer price index and job?
cor.test(bank$cons.price.idx,bank$job)
#(there is no association between the consumer price index and job.)
#Ques.1.d.Is the employment variation rate consistent across job types?
chisq.test(bank$job ,bank$emp.var.rate)
bank$emp.var.rate<-as.factor(bank$emp.var.rate)
bank$job<-as.factor(bank$job)
plot(bank$emp.var.rate~bank$job)
#Ques.1.e. Is the employment variation rate same across education?
library(car)
bank$education<-as.numeric(bank$education)
bank$emp.var.rate<-as.numeric(bank$emp.var.rate)
scatterplot(bank$education,bank$emp.var.rate)
#the employment variation rate is not same across education
#Ques.1.f. Which group is more confident?
#did not get the question.sorry sir |
803aa13b47bc0b61175b4bdc4b4f3cbe25aa6f02 | 8c88c662f1999f86ecacc1688f458074eefc7e1f | /R/Regressions3.R | e99b97582b36575a7d19754ec30cdf250af55335 | [] | no_license | cdanko42/Simulations | 40b122bd1e6ffa015528c23d9f560c5c0ac181bf | ace6ae16dcc405e839a4398272aa2b4ef3ad882e | refs/heads/master | 2023-02-03T15:12:42.991972 | 2020-12-26T19:47:26 | 2020-12-26T19:47:26 | 298,607,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,493 | r | Regressions3.R | rm(list = ls())
source("DataGen3.R")
library(lmtest)
library(ivpack)
regressions <- function(datmat, exo=1, instrument=1){
r1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
results <- matrix(0, nrow=ncol(datmat), ncol= 5)
c1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
coverage <- matrix(0, nrow=ncol(datmat), ncol= 5)
e5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
endo <- matrix(0, nrow=ncol(datmat), ncol = 1)
test <- function(s){
if (s < .05){
return(1)
}
else {
return(0)
}
}
for (j in 1:ncol(datmat)){
for (i in 1:nrow(datmat)){
dat= unlist(datmat[i,j])
dat = matrix(dat, ncol=5 ,nrow = 1000)
y_values = dat[,1]
ypre = dat[,2]
x <- dat[, 3]
##Obtain IV z (excluded exogenous regressor)
z <- dat[, (4):(3+instrument)]
##Obtain included exogenous regressor
xo <- dat[, (4+instrument):(3+ instrument+exo)]
olspyre <- lm(ypre ~ x + xo)
r1[i, j] <- olspyre$coefficients[2]
cols <- coeftest(olspyre)[2, 2]
cover <- function(estimate, se){
upper <- estimate + 1.96*se
lower <- estimate - 1.96*se
if (.5 > lower & .5 < upper){
return(1)}
else{
return(0)}
}
c1[i, j] <- cover(estimate= r1[i,j], se = cols)
ivpre <- ivreg(ypre~x+ xo, ~z + xo)
r2[i,j] <- ivpre$coefficients[2]
invisible(ivse <- robust.se(ivpre)[2,2])
c2[i, j] <- cover(estimate = r2[i,j], se=ivse)
yvaldata = as.data.frame(cbind(y_values, x, xo))
olsyval <- lm(y_values ~., data=yvaldata)
r3[i, j] <- olsyval$coefficients[2]
invisible(cols3 <- coeftest(olsyval)[2, 2])
c3[i, j] <- cover(estimate = r3[i,j], se=cols3)
dat = as.data.frame(cbind(y_values, x,z,xo))
probyval <- glm(y_values ~., family = binomial(link = "probit"), data = yvaldata)
r4[i, j] <- probyval$coefficients[2]
invisible(seprobit <- coeftest(probyval)[2,2])
c4[i, j] <- cover(estimate = r4[i,j], se=seprobit)
ivyval <- ivreg(y_values~x+ xo, ~z + xo)
r5[i, j] <- ivyval$coefficients[2]
invisible(iv2se <- robust.se(ivyval)[2,2])
c5[i,j] <- cover(estimate = r5[i,j], se=iv2se)
##Endogeneity
firststage <- (lm(x~z+xo))$residuals
secondstep <- lm(y_values~x+xo +firststage)
s <- summary(secondstep)$coefficients[4,4]
e5[i,j] <- test(s=s)
}
results[j, 1] <- mean(abs(r1[, j]-0.5))
results[j, 2] <- mean(abs(r2[, j]-0.5))
results[j, 3] <- mean(abs(r3[, j]-0.5))
results[j, 4] <- mean(abs(r4[, j]-0.5))
results[j, 5] <- mean(abs(r5[, j]-0.5))
coverage[j, 1] <- sum(c1[,j])
coverage[j, 2] <- sum(c2[,j])
coverage[j, 3] <- sum(c3[,j])
coverage[j, 4] <- sum(c4[,j])
coverage[j, 5] <- sum(c5[,j])
endo[j,] = sum(e5[,j])
}
return(list(results =results, coverage=coverage, endo=endo ))
}
sink("NULL")
mad1 <- regressions(datmat=data1)
sink()
mad1$results
mad1$coverage
mad1$endo
setwd("..")
bias <- mad1$results[, 5]
coverage <- mad1$coverage[,5]
endogeneity <- mad1$endo
write.csv(bias, "Data/bias3.csv")
write.csv(coverage, "Data/coverage3.csv")
write.csv(endogeneity, "Data/endo3.csv")
auxbias <- mad1$results[, 1:4]
auxcoverage <- mad1$coverage[,1:4]
write.csv(auxcoverage, "Data/auxbias3.csv")
write.csv(auxbias, "Data/auxcoverage3.csv") |
1f10fe4d58d08172615cf94e84b1c1881bb1ea40 | ec07e170b7d4e575ab28ca77257b0005c4937f0c | /scripts/calculate_equilibrium_state.R | e77c90e67bb2b54c9f26071a3601a3e53d8f826e | [
"CC0-1.0"
] | permissive | KamilSJaron/reproductive_mode_TE_dynamics | 324e359ca932d09eddde38fc3ed3682b2c93a509 | 6187ab8e9aed1f5729efb87225cfb8d0e06e3c08 | refs/heads/master | 2022-03-13T06:17:44.538533 | 2019-10-02T08:08:55 | 2019-10-02T08:08:55 | 112,102,211 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 325 | r | calculate_equilibrium_state.R | getNeq <- function(a, b, u, v){
(sqrt(2 * (a^2 + (u - v))^2) - 2 * a) / (2 * b)
}
get_b <- function(a, u, v, Neq){
(sqrt(2 * (a^2 + (u - v))^2) - 2 * a) / (2 * Neq)
}
get_v <- function(a, b, u, Neq){
u - sqrt((Neq^2 * 32 * b^2 / a^2) - a^2)
}
get_u <- function(a, b, v, Neq){
sqrt((Neq^2 * 32 * b^2 / a^2) - a^2) + v
} |
cdecb2eee7d9ec95de388d77f837ac1c5f044c26 | a5af03d50a368612fe04ad742f975f6d5558bfa9 | /TO_watermain.R | 38621320b756f35534a99d1852711f053c396452 | [] | no_license | gridl/TO_Watermain | 8b8b611e326ba073ce31a87c35eea1db6be7120d | add101933969ee283ff195985089dab3fd719347 | refs/heads/master | 2021-08-18T22:13:21.168445 | 2017-11-24T03:50:28 | 2017-11-24T03:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,675 | r | TO_watermain.R | # Mapping Watermain Breaks in City of Toronto ####
# Initialize Session ####
cat("\014")
rm(list=ls())
cat("\014")
Sys.Date()
sessionInfo()
list.of.packages <- c("readr","readxl","ggplot2","dplyr","magrittr",
"viridis","lubridate","grid","gridExtra",
"maps","ggmap","cluster","knitr","dygraphs","xts")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
getwd()
watermain.files <- list.files("data", pattern = "\\.xlsx$")
# Data ####
wm.df <- read_excel(paste0("data/",watermain.files))
head(wm.df)
names(wm.df) <- c("Date","year","X_coord","Y_coord")
dim(wm.df)
str(wm.df)
wm.df$year_f <- as.factor(wm.df$year)
wm.df$year <- as.integer(wm.df$year)
# Add extra resolution with dates
wm.df$week <- floor_date(wm.df$Date, unit = "week")
wm.df$month <- floor_date(wm.df$Date, unit = "month")
# Save modified data frame ####
#write.table(wm.df,file = "clean_data/TO_watermain_breaks.csv",sep = ",", row.names = FALSE)
# Exploratory Analysis + Cleaning ####
summary(wm.df) #some values for X and Y coords are very high or low? likely errors
wm.df %>% arrange(desc(Y_coord))
wm.df %>% arrange(desc(X_coord)) #error is X_coord = 4845681.6, not a Y_coord either
summary(wm.df$X_coord)
head(sort(wm.df$X_coord,decreasing = T))
wm.df[which(wm.df$X_coord == max(wm.df$X_coord)),]
wm.df <- wm.df[-which(wm.df$X_coord == max(wm.df$X_coord)),] #remove error 2000-01-22
summary(wm.df$Y_coord)
head(sort(wm.df$Y_coord,decreasing = F)) #first three Y_coord are very low, likely errors
sort(wm.df$Y_coord,decreasing = F)[1:3] # Y_coord errors, three
wm.df[which(wm.df$Y_coord %in% sort(wm.df$Y_coord,decreasing = F)[1:3]),]
wm.df <- wm.df[-which(wm.df$Y_coord %in% sort(wm.df$Y_coord,decreasing = F)[1:3]),] #remove these errors
# Rename for left join with weather data
wm.df <- wm.df %>%
mutate(date = as.Date(Date))
# Visualize Time Data ####
# Number of counts in each month and week ####
month.wm <- wm.df %>% count(month)
week.wm <- wm.df %>% count(week)
year.wm <- wm.df %>% group_by(year) %>% count(year)
mthwk.wm <- wm.df %>% group_by(year,week,month) %>%
count(week,month,year) %>% mutate(month_n = month(month, label = T)) %>% mutate(yweek = week(week))
mthwk.wm #counts by week, but corresponding month and year used
# line plot
ggplot(data = week.wm, aes(x=week,y=n)) +
geom_line()
# line plot with smoother and mean
ggplot(data = mthwk.wm, aes(x=month,y=n)) +
geom_boxplot(aes(group = month),alpha=0.9) +
stat_summary(geom="line",fun.y = "mean",color = "red",size=1.5,alpha=0.8) +
geom_smooth()
# seasonality assessment
ggplot(mthwk.wm, aes(x=month_n,y=n)) +
geom_boxplot(aes(group=month_n))
# colour by year
ggplot(mthwk.wm, aes(x=yweek,y=n)) +
geom_line(aes(colour = as.factor(year)), alpha = 0.6, size = 1.5)
#jitter
ggplot(data = mthwk.wm, aes(x = week, y = n)) +
geom_jitter(width = 0.4, size = 2, alpha = 0.5)
# yearly trend line plot
ggplot(data = year.wm, aes(x = year, y = n)) +
geom_line() + geom_smooth(method = "loess", se = TRUE) +
geom_smooth(method = "lm", se = FALSE, colour = "red")
summary(lm(n ~ year, year.wm))
lm(n ~ year, year.wm)$coef[2]
# Interactive htmlwidgets use ####
week.wm <- as.data.frame(week.wm)
wm.ts <- xts(week.wm$n, order.by=week.wm$week, tz="UTC")
# wm.ts <- cbind(wm.ts, m.mean = rollmeanr(wm.ts,k=12))
names(wm.ts) <- c("Breaks")
month.wm <- as.data.frame(month.wm)
wm.tsm <- xts(month.wm$n, order.by=month.wm$month, tz="UTC")
names(wm.tsm) <- c("Breaks")
str(index(wm.ts))
# add shading? https://stackoverflow.com/questions/30805017/dyshading-r-dygraph
ok_periods <- list(
list(from = "1990-01-01", to = "1991-01-01"),
list(from = "1992-01-01", to = "1993-01-01"),
list(from = "1994-01-01", to = "1995-01-01"),
list(from = "1996-01-01", to = "1997-01-01"),
list(from = "1998-01-01", to = "1999-01-01"),
list(from = "2000-01-01", to = "2001-01-01"),
list(from = "2002-01-01", to = "2003-01-01"),
list(from = "2004-01-01", to = "2005-01-01"),
list(from = "2006-01-01", to = "2007-01-01"),
list(from = "2008-01-01", to = "2009-01-01"),
list(from = "2010-01-01", to = "2011-01-01"),
list(from = "2012-01-01", to = "2013-01-01"),
list(from = "2014-01-01", to = "2015-01-01"),
list(from = "2016-01-01", to = "2017-01-01")
)
add_shades <- function(x, periods, ...) {
for( period in periods ) {
x <- dyShading(x, from = period$from , to = period$to, ... )
}
x
}
# by week
dygraph(wm.ts, main = "City of Toronto Watermain Breaks by Week") %>%
dyAxis("y", label = "Watermain Breaks per Week") %>%
dySeries("Breaks", strokeWidth = 1.75, fillGraph = TRUE, color = "#1B5EA2") %>%
dyLegend(show = "always", hideOnMouseOut = FALSE) %>%
dyOptions(includeZero = TRUE, fillAlpha = 0.25) %>%
dyRangeSelector(dateWindow = c("2012-01-01", "2017-01-01")) %>%
add_shades(ok_periods, color = "#E3E2E2")
# by month
dygraph(wm.tsm, main = "City of Toronto Watermain Breaks by Month") %>%
dyAxis("y", label = "Watermain Breaks per Month") %>%
dySeries("Breaks", strokeWidth = 1.75, fillGraph = TRUE, color = "#1B5EA2") %>%
dyLegend(show = "always", hideOnMouseOut = FALSE) %>%
dyOptions(includeZero = TRUE, fillAlpha = 0.25) %>%
dyRangeSelector(dateWindow = c("2007-01-01", "2017-01-01")) %>%
add_shades(ok_periods, color = "#E3E2E2")
# Visualize Spatial Data ####
# Plot the spatial data
source('~/GitHub/TO_Watermain/fnc/plot.TO.wm.R')
# plot of only 2016
plot.TO.wm(2016,band=250)
# plot of only 1991
plot.TO.wm(y1=1991,band = 200, file.out = F,
h = 8.5, w = 11)
# cumulative plot of 2000-2005
plot.TO.wm(2000,2005, face = F, band = 400,
h = 8.5, w = 11)
# facet plot from 1995 to 1997
plot.TO.wm(1993,1998,ncol=2,face = T, band = 200)
# plot all yaers with 4 columns, 8*15 inch output
plot.TO.wm(1990,2016,face = T,ncol = 4,band = 200,
file.out = TRUE, h = 14, w = 9)
# Overlay on Map #
names(wm.df)
wm.coord <- wm.df %>% select(X_coord, Y_coord) %>% as.data.frame()
library(rgdal)
coordinates(wm.coord) <- ~ X_coord + Y_coord
str(wm.coord)
#http://leware.net/geo/utmgoogleapp.htm
wm.coord <- SpatialPoints(wm.coord, proj4string=CRS("+proj=utm + +zone=17T +datum=NAD27"))
wm.latlon <- spTransform(wm.coord, CRS("+proj=longlat +datum=WGS84"))
df.latlon <- as.data.frame(wm.latlon)
sbbox <- make_bbox(lon = df.latlon[,1], lat = df.latlon[,2], f = 0.01)
my_map <- get_map(location = sbbox, maptype = "roadmap",
scale = 2, color="bw", zoom = 10)
ggmap(my_map)
|
c6f1a481e257fd489f85044644b0bb3fdd06da29 | 9622cac6de59b903331fdf2a0b0a2d41fed2ade7 | /Bajar_y_subir.R | 679b624bcb321380ebc2b27f4dac2fe433dcb1df | [] | no_license | repositoriosdesefa/SEFA_SEFA_Reporta_Residuos | 24ebd5fb7ace8522c9ae3d804722b7c75fc3bbd5 | 59512c6d52f13f248e6140c3546b4719abeb21d2 | refs/heads/main | 2023-08-17T11:06:22.356585 | 2021-10-06T20:37:47 | 2021-10-06T20:37:47 | 413,791,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,433 | r | Bajar_y_subir.R | ####################################################################-
########## Consultas SQL a bases de Oracle a través de R #########-
############################# By LE ################################-
################ I. Librerías, drivers y directorio ################
# I.1 Librerías
# i) RJDBC
#install.packages("DBI")
library(DBI)
#install.packages("rJava")
library(rJava)
#install.packages("RJDBC")
library(RJDBC)
# ii) Librerias complementarias
#install.packages("googledrive")
library(googledrive)
#install.packages("googlesheets4")
library(googlesheets4)
#install.packages("httpuv")
library(httpuv)
#install.packages("purrr")
library(purrr)
#install.packages("blastula")
library(blastula)
#install.packages("lubridate")
library(lubridate)
#install.packages("stringr")
library(stringr)
# I.2 Drivers
# i) Oracle
# Driver OJDBC
rutaDriver <- ""
oracleDriver <- JDBC("",
classPath=rutaDriver)
#*El driver debe estar descargado y en una ubicación fija
# ii) Google
correo_usuario <- ""
drive_auth(email = correo_usuario)
gs4_auth(token = drive_auth(email = correo_usuario),
email = correo_usuario)
#*El token debe estar almacenado y con los permisos de Google
# I.3 Directorio
# i) Local
directorio <- ""
consulta_dir <- file.path(directorio, "Consultas")
#*Establecer el directorio donde se encuentran las consultas
# ii) Parámetros
base_rr_gs <- ""
hoja_base_rr_gs <- "RR"
consulta_rr <- ""
#-----------------------------------------------------------------
################ II. Establecimiento de conexión ################
# II.1 Credenciales
usuario <- ""
clave <- ""
hostSEFA <- ""
#*Información sensible y privada
# II.2 Conexión
conexionSEFA <- dbConnect(oracleDriver, hostSEFA,
usuario, clave)
#*Se debe contar con credenciales para establecer la conexión
#-----------------------------------------------------------------
############## III. Descarga y carga de información ##############
# III.1 Funciones
# i) Lectura de SQL
getSQL <- function(filepath){
con = file(filepath, "r")
sql.string <- ""
while (TRUE){
line <- readLines(con, n = 1)
if ( length(line) == 0 ){
break
}
line <- gsub("\\t", " ", line)
if(grepl("--",line) == TRUE){
line <- paste(sub("--","/*",line),"*/")
}
sql.string <- paste(sql.string, line)
}
close(con)
return(sql.string)
}
# ii) Función de descarga y carga de información
baja_y_sube <- function(consulta, ID, hoja){
consulta_ruta = file.path(consulta_dir, consulta)
query = getSQL(consulta_ruta)
# Referencia a objeto en el ambiente global
datos = dbGetQuery(conexionSEFA, query)
write_sheet(datos, ID, hoja)
# hoja_rango = paste0("'",hoja, "'!A2")
# range_write(ID, data = datos,
# range = hoja_rango,
# col_names = F)
}
# iii) Función robustecida de descarga y carga de información
R_baja_y_sube <- function(consulta, ID, hoja){
out = tryCatch(baja_y_sube(consulta, ID, hoja),
error = function(e){
baja_y_sube(consulta, ID, hoja)
})
return(out)
}
# III.2 Descarga y carga de información
R_baja_y_sube(consulta_rr, base_rr_gs, hoja_base_rr_gs)
# III.3 Cierre de conexión
dbDisconnect(conexionSEFA)
|
76313862a77de34186bc7cba0e10d73b8fad8c5b | f511fd964b9478dd184c50124479f4c5592defe6 | /localization.R | 2e8669d52e6c15ad8172fe75e5877c40a29c1270 | [] | no_license | thartbm/exposureconsequences | 08518aa169eaa59259e61a2a7d34cc5f762cb876 | 4f8667f069aacc750fb54a0491db7817f016c12c | refs/heads/master | 2022-02-11T23:23:37.006815 | 2019-07-14T02:59:38 | 2019-07-14T02:59:38 | 115,872,108 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 33,296 | r | localization.R |
source('shared.R')
# PLOT / FIGURE ------
plotLocalization <- function(classicOnline=FALSE, generateSVG=FALSE, selectPerformance=TRUE, remove15=FALSE, thirdPanel='2x2', points=c(15,25,35,45,55,65,75)) {
# thirdPanel=
# 'peakCIs'
# 'classicOnline'
# '2x2'
# get the data to plot:
exp <- getPointLocalization('exposure', difference=TRUE, verbose=FALSE, selectPerformance=selectPerformance, points=points)
cla <- getPointLocalization('classic', difference=TRUE, verbose=FALSE, selectPerformance=FALSE, points=points)
# get the averages for the line plots:
exp.avg <- aggregate(taperror_deg ~ passive_b + handangle_deg, data=exp, FUN=mean)
exp.avg.act <- exp.avg[which(exp.avg$passive_b == 0),]
exp.avg.pas <- exp.avg[which(exp.avg$passive_b == 1),]
cla.avg <- aggregate(taperror_deg ~ passive_b + handangle_deg, data=cla, FUN=mean)
cla.avg.act <- cla.avg[which(cla.avg$passive_b == 0),]
cla.avg.pas <- cla.avg[which(cla.avg$passive_b == 1),]
# get the confidence intervals for polygon areas:
exp.act <- exp[which(exp$passive_b == 0),]
exp.CI.act <- matrix(unlist(by(exp.act$taperror_deg, INDICES=c(exp.act$handangle_deg), FUN=t.interval)),nrow=2)
exp.pas <- exp[which(exp$passive_b == 1),]
exp.CI.pas <- matrix(unlist(by(exp.pas$taperror_deg, INDICES=c(exp.pas$handangle_deg), FUN=t.interval)),nrow=2)
cla.act <- cla[which(cla$passive_b == 0),]
cla.CI.act <- matrix(unlist(by(cla.act$taperror_deg, INDICES=c(cla.act$handangle_deg), FUN=t.interval)),nrow=2)
cla.pas <- cla[which(cla$passive_b == 1),]
cla.CI.pas <- matrix(unlist(by(cla.pas$taperror_deg, INDICES=c(cla.pas$handangle_deg), FUN=t.interval)),nrow=2)
if (thirdPanel == 'classicOnline') {
onl <- getPointLocalization('online', difference=TRUE, verbose=FALSE, selectPerformance=FALSE)
onl.avg <- aggregate(taperror_deg ~ passive_b + handangle_deg, data=onl, FUN=mean)
onl.avg.act <- onl.avg[which(onl.avg$passive_b == 0),]
onl.avg.pas <- onl.avg[which(onl.avg$passive_b == 1),]
onl.act <- onl[which(onl$passive_b == 0),]
onl.CI.act <- matrix(unlist(by(onl.act$taperror_deg, INDICES=c(onl.act$handangle_deg), FUN=t.interval)),nrow=2)
onl.pas <- onl[which(onl$passive_b == 1),]
onl.CI.pas <- matrix(unlist(by(onl.pas$taperror_deg, INDICES=c(onl.pas$handangle_deg), FUN=t.interval)),nrow=2)
}
if (generateSVG) {
installed.list <- rownames(installed.packages())
if ('svglite' %in% installed.list) {
library('svglite')
svglite(file='Fig3.svg', width=7.5, height=3, system_fonts=list(sans='Arial', mono='Times New Roman'))
} else {
generateSVG=FALSE
}
}
par(mfrow=c(1,3))
#points <- c(15,25,35,45,55,65,75,85)
# panel A: exposure localization (active vs. passive)
plot(-1000,-1000, main='exposure', xlab='hand angle [°]', ylab='localization shift [°]', xlim=c(min(points)-5,max(points)+5), ylim=c(0,-15), axes=F)
mtext('A', side=3, outer=TRUE, at=c(0,1), line=-1, adj=0, padj=1)
X <- c(points, rev(points))
exp.act.Y <- c(exp.CI.act[1,],rev(exp.CI.act[2,]))
exp.pas.Y <- c(exp.CI.pas[1,],rev(exp.CI.pas[2,]))
polygon(X,exp.act.Y,border=NA,col=colorset[['expActT']])
polygon(X,exp.pas.Y,border=NA,col=colorset[['expPasT']])
lines(points[1:2],exp.avg.act$taperror_deg[1:2],col=colorset[['expActS']],lty=2,lwd=1.5)
lines(points[2:length(points)],exp.avg.act$taperror_deg[2:length(points)],col=colorset[['expActS']],lty=1,lwd=1.5)
lines(points[1:2],exp.avg.pas$taperror_deg[1:2],col=colorset[['expPasS']],lty=2,lwd=1.5)
lines(points[2:length(points)],exp.avg.pas$taperror_deg[2:length(points)],col=colorset[['expPasS']],lty=1,lwd=1.5)
axis(1,at=points)
axis(2,at=c(0,-5,-10,-15))
legend(10,-15,c('passive','active'),col=c(colorset[['expPasS']],colorset[['expActS']]),lty=c(1,1),lwd=c(1.5,1.5),bty='n')
# panel B: classic localization (active vs. passive)
plot(-1000,-1000, main='classic', xlab='hand angle [°]', ylab='localization shift [°]', xlim=c(min(points)-5,max(points+5)), ylim=c(0,-15), axes=F)
mtext('B', side=3, outer=TRUE, at=c(1/3,1), line=-1, adj=0, padj=1)
X <- c(points, rev(points))
cla.act.Y <- c(cla.CI.act[1,],rev(cla.CI.act[2,]))
cla.pas.Y <- c(cla.CI.pas[1,],rev(cla.CI.pas[2,]))
polygon(X,cla.act.Y,border=NA,col=colorset[['claActT']])
polygon(X,cla.pas.Y,border=NA,col=colorset[['claPasT']])
lines(points[1:2],cla.avg.act$taperror_deg[1:2],col=colorset[['claActS']],lty=2,lwd=1.5)
lines(points[2:length(points)],cla.avg.act$taperror_deg[2:length(points)],col=colorset[['claActS']],lty=1,lwd=1.5)
lines(points[1:2],cla.avg.pas$taperror_deg[1:2],col=colorset[['claPasS']],lty=2,lwd=1.5)
lines(points[2:length(points)],cla.avg.pas$taperror_deg[2:length(points)],col=colorset[['claPasS']],lty=1,lwd=1.5)
axis(1,at=points)
axis(2,at=c(0,-5,-10,-15))
legend(10,-15,c('passive','active'),col=c(colorset[['claPasS']],colorset[['claActS']]),lty=c(1,1),lwd=c(1.5,1.5),bty='n')
if (thirdPanel == 'classicOnline') {
plot(-1000,-1000, main='online', xlab='hand angle [°]', ylab='localization shift [°]', xlim=c(10,80), ylim=c(0,-15), axes=F)
mtext('C', side=3, outer=TRUE, at=c(2/3,1), line=-1, adj=0, padj=1)
X <- c(points, rev(points))
onl.act.Y <- c(onl.CI.act[1,],rev(onl.CI.act[2,]))
onl.pas.Y <- c(onl.CI.pas[1,],rev(onl.CI.pas[2,]))
polygon(X,onl.act.Y,border=NA,col=colorset[['onlActT']])
polygon(X,onl.pas.Y,border=NA,col=colorset[['onlPasT']])
lines(points[1:2],onl.avg.act$taperror_deg[1:2],col=colorset[['onlActS']],lty=2,lwd=1.5)
lines(points[2:7],onl.avg.act$taperror_deg[2:7],col=colorset[['onlActS']],lty=1,lwd=1.5)
lines(points[1:2],onl.avg.pas$taperror_deg[1:2],col=colorset[['onlPasS']],lty=2,lwd=1.5)
lines(points[2:7],onl.avg.pas$taperror_deg[2:7],col=colorset[['onlPasS']],lty=1,lwd=1.5)
axis(1,at=points)
axis(2,at=c(0,-5,-10,-15))
legend(10,-15,c('passive','active'),col=c(colorset[['onlPasS']],colorset[['onlActS']]),lty=c(1,1),lwd=c(1.5,1.5),bty='n')
} else if (thirdPanel == 'peakCIs') {
points=c(15,25,35,45,55,65,75)
exp <- getPointLocalization(group='exposure', difference=TRUE, points=points, movementtype='both', LRpart='all', verbose=FALSE, selectPerformance=selectPerformance)
cla <- getPointLocalization(group='classic', difference=TRUE, points=points, movementtype='both', LRpart='all', verbose=FALSE, selectPerformance=selectPerformance)
exp.act <- exp[which(exp$passive_b == 0 & is.finite(exp$taperror_deg)),]
cla.act <- cla[which(cla$passive_b == 0 & is.finite(cla$taperror_deg)),]
# cla.avg.act <- aggregate(taperror_deg ~ handangle_deg, data=cla.act, FUN=mean)
# exp.avg.act <- aggregate(taperror_deg ~ handangle_deg, data=exp.act, FUN=mean)
# if (remove15) {
# exp.act <- exp.act[which(exp.act$handangle_deg > 15),]
# cla.act <- cla.act[which(cla.act$handangle_deg > 15),]
# }
# fitting on all data:
exp.fit <- getGaussianFit(x=exp.act$handangle_deg,exp.act$taperror_deg,mu=50,sigma=10,scale=-75,offset=-4)
cla.fit <- getGaussianFit(x=cla.act$handangle_deg,cla.act$taperror_deg,mu=50,sigma=10,scale=-75,offset=-4)
# get confidence intervals for the peak of the generalization curve for localization shifts:
cla.locshift <- getPeakLocConfidenceInterval(group='classic',
CIs=c(.95),
movementtype='active',
LRpart='all',
selectPerformance=FALSE,
remove15=remove15)
exp.locshift <- getPeakLocConfidenceInterval(group='exposure',
CIs=c(.95),
movementtype='active',
LRpart='all',
selectPerformance=selectPerformance,
remove15=remove15)
plot(-1000,-1000, main='generalization curves', xlab='hand angle [°]', ylab='localization shift [°]', xlim=c(10,80), ylim=c(0,-15), axes=F)
mtext('C', side=3, outer=TRUE, at=c(2/3,1), line=-1, adj=0, padj=1)
# plot the data, faintly
# lines(points[1:2],cla.avg.act$taperror_deg[1:2],col=colorset[['claActT']],lty=2,lwd=1.5)
# lines(points[2:7],cla.avg.act$taperror_deg[2:7],col=colorset[['claActT']],lty=1,lwd=1.5)
# lines(points[1:2],exp.avg.act$taperror_deg[1:2],col=colorset[['expActT']],lty=2,lwd=1.5)
# lines(points[2:7],exp.avg.act$taperror_deg[2:7],col=colorset[['expActT']],lty=1,lwd=1.5)
lines(points,cla.avg.act$taperror_deg,col=colorset[['claActT']],lty=1,lwd=1.5)
lines(points,exp.avg.act$taperror_deg,col=colorset[['expActT']],lty=1,lwd=1.5)
# plot fitted Gaussian functions to all data:
X <- seq(15,75)
cla.Y.fit <- cla.fit$par['scale']*parGaussian(cla.fit$par,X)
cla.Y.fit <- cla.Y.fit + cla.fit$par['offset']
exp.Y.fit <- exp.fit$par['scale']*parGaussian(exp.fit$par,X)
exp.Y.fit <- exp.Y.fit + exp.fit$par['offset']
lines(X,cla.Y.fit,col=colorset[['claActS']],lty=1,lw=1.5)
lines(X,exp.Y.fit,col=colorset[['expActS']],lty=1,lw=1.5)
cla.idx <- which.min(cla.Y.fit)
exp.idx <- which.min(exp.Y.fit)
# connect peaks of group fits to CIs:
arrows(X[cla.idx],cla.Y.fit[cla.idx],X[cla.idx],-2.5,col=colorset[['claActS']],lwd=1.5,length=.05)
arrows(X[exp.idx],exp.Y.fit[exp.idx],X[exp.idx],-2.5,col=colorset[['expActS']],lwd=1.5,length=.05)
# indicate feedback and hand position during training:
arrows(45,-2.5,45,-1,col='black',lw=1.5,length=0.05)
arrows(75,0,75,-1.5,col='black',lw=1.5,length=0.05)
# plot the bootstrap peaks of the generalization functions
polygon(cla.locshift$value[c(1,3,3,1)],c(0,0,-1,-1),border=NA,col=colorset[['claActT']])
polygon(exp.locshift$value[c(1,3,3,1)],c(-1.5,-1.5,-2.5,-2.5),border=NA,col=colorset[['expActT']])
lines(cla.locshift$value[c(2,2)],c(0,-2.5),col=colorset[['claActS']],lty=1,lw=1.5)
lines(exp.locshift$value[c(2,2)],c(0,-2.5),col=colorset[['expActS']],lty=1,lw=1.5)
# add tick marks:
axis(1,at=points)
axis(2,at=c(0,-5,-10,-15))
} else if (thirdPanel == '2x2') {
# get the data again, because we remove the 15 degree point?
#exp <- getPointLocalization('exposure', difference=TRUE, verbose=FALSE, selectPerformance=selectPerformance)
#cla <- getPointLocalization('classic', difference=TRUE, verbose=FALSE, selectPerformance=FALSE)
# get the averages for the line plots:
exp.act <- aggregate(taperror_deg ~ participant, data=exp[which(exp$passive_b == 0 & exp$handangle_deg > 15),], FUN=mean)
exp.pas <- aggregate(taperror_deg ~ participant, data=exp[which(exp$passive_b == 1 & exp$handangle_deg > 15),], FUN=mean)
#
cla.act <- aggregate(taperror_deg ~ participant, data=cla[which(cla$passive_b == 0 & exp$handangle_deg > 15),], FUN=mean)
cla.pas <- aggregate(taperror_deg ~ participant, data=cla[which(cla$passive_b == 1 & exp$handangle_deg > 15),], FUN=mean)
# get the confidence intervals for polygon areas:
exp.act.CI <- t.interval(exp.act$taperror_deg)
exp.pas.CI <- t.interval(exp.pas$taperror_deg)
cla.act.CI <- t.interval(cla.act$taperror_deg)
cla.pas.CI <- t.interval(cla.pas$taperror_deg)
plot(-1000,-1000, main='localization shifts', xlab='localization task', ylab='mean localization shift [°]', xlim=c(10,80), ylim=c(0,-15), axes=F)
mtext('C', side=3, outer=TRUE, at=c(2/3,1), line=-1, adj=0, padj=1)
X <- c(20,70,70,20)
Yexp <- c(exp.act.CI[1],exp.pas.CI[1],exp.pas.CI[2],exp.act.CI[2])
Ycla <- c(cla.act.CI[1],cla.pas.CI[1],cla.pas.CI[2],cla.act.CI[2])
polygon(X,Yexp,border=NA,col=colorset[['expActT']])
polygon(X,Ycla,border=NA,col=colorset[['claActT']])
lines(c(20,70),c(mean(exp.act$taperror_deg),mean(exp.pas$taperror_deg)),col=colorset[['expActS']],lty=1,lw=1.5)
lines(c(20,70),c(mean(cla.act$taperror_deg),mean(cla.pas$taperror_deg)),col=colorset[['claActS']],lty=1,lw=1.5)
legend(10,-15,c('exposure','classic'),col=c(colorset[['expActS']],colorset[['claActS']]),lty=c(1,1),lwd=c(1.5,1.5),bty='n')
# add tick marks:
axis(1,at=c(20,70),labels=c('active','passive'))
axis(2,at=c(0,-5,-10,-15))
}
if (generateSVG) {
dev.off()
}
}
plotALignedRotatedLocalization <- function(classicOnline=FALSE, generateSVG=FALSE, selectPerformance=TRUE, remove15=FALSE) {
groups <- c('exposure','classic')
if (classicOnline) {
groups <- c(groups,'online')
}
if (generateSVG) {
installed.list <- rownames(installed.packages())
if ('svglite' %in% installed.list) {
# nothing
library('svglite')
} else {
generateSVG=FALSE
}
}
par(mfrow=c(7,3),mai=c(.6,.5,.01,.01))
points <- c(15,25,35,45,55,65,75)
for (group in groups) {
SP <- FALSE
if (group == 'exposure') {
SP <- selectPerformance
}
df <- load.DownloadDataframe(url=localizationURLs[group],filename=sprintf('localization_%s.csv',group))
if (selectPerformance & group=='exposure') {
blinks <- load.DownloadDataframe(informationURLs['blinkdetect'],'blinkdetect_exposure.csv')
OKparticipants <- blinks$participant[which(blinks$rotated_b == 1 & blinks$performance > 0.65)]
df <- df[which(df$participant %in% OKparticipants),]
}
participants <- unique(df$participant)
for (passive in c(0,1)) {
if (generateSVG) {
# should have been loaded if available:
svglite(file=sprintf('Fig7_%s_%s.svg',group,c('active','passive')[passive+1]), width=8.5, height=11, system_fonts=list(sans='Arial', mono='Times New Roman'))
par(mfrow=c(7,3),mai=c(.6,.5,.01,.01))
}
for (participant in participants) {
# create plot:
plot(-1000,-1000, main='', xlab='hand angle [°]', ylab='localization error [°]', xlim=c(0,90), ylim=c(20,-40), axes=F)
lines(c(10,80),c(0,0),col='#999999')
for (rotated in c(0,1)) {
subdf <- df[which(df$participant == participant & df$rotated_b == rotated & df$passive_b == passive),]
color <- colorset[[sprintf('%s%s%s',substr(group,1,3),c('Act','Pas')[passive+1],c('T','S')[rotated+1])]]
points(subdf$handangle_deg, subdf$taperror_deg,col=color)
if (nrow(subdf) == 0) {
# participant has no data in sub-condition?
next()
}
locdf <- getLocalizationPoints(subdf, points=points, removeOutliers=TRUE)
lty <- c(2,1)[rotated]
lines(locdf$handangle_deg,locdf$taperror_deg,lty=1,lw=2,col=color)
}
axis(1,at=points)
axis(2,at=c(10,-10,-30))
}
if (generateSVG) {
dev.off()
}
}
}
}
# ANALYSES ------
exposureLocalization <- function(remove15=TRUE, LMEmethod='chi-squared', selectPerformance=TRUE) {
default.contrasts <- options('contrasts')
options(contrasts=c('contr.sum','contr.poly'))
exp <- getPointLocalization('exposure', difference=FALSE, verbose=FALSE, selectPerformance=selectPerformance)
if (remove15) {
exp <- exp[-which(exp$handangle_deg == 15),]
}
exp$participant <- factor(exp$participant)
exp$rotated_b <- factor(exp$rotated_b)
exp$passive_b <- factor(exp$passive_b)
exp$handangle_deg <- factor(exp$handangle_deg)
attach(exp)
cat('\nLME with session, target and movement type as fixed effects, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ rotated_b * passive_b * handangle_deg, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
exp_model_lmer <- lmer(taperror_deg ~ rotated_b * passive_b * handangle_deg - (1|participant), na.action=na.exclude)
print(anova(exp_model_lmer,ddf='Satterthwaite',type=3))
}
detach(exp)
options('contrasts' <- default.contrasts)
}
exposureLocalizationShift <- function(noHandAngle=FALSE, remove15=TRUE, LMEmethod='chi-squared', selectPerformance=TRUE) {
default.contrasts <- options('contrasts')
options(contrasts=c('contr.sum','contr.poly'))
exp <- getPointLocalization('exposure', difference=TRUE, verbose=FALSE, selectPerformance=selectPerformance)
if (remove15) {
exp <- exp[-which(exp$handangle_deg == 15),]
}
exp$participant <- factor(exp$participant)
exp$passive_b <- factor(exp$passive_b)
exp$handangle_deg <- factor(exp$handangle_deg)
attach(exp)
if (noHandAngle) {
cat('\nLME with movement type as fixed effects - ignoring hand angle, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ passive_b, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
exp_model_lmer <- lmer(taperror_deg ~ passive_b - (1|participant), na.action=na.exclude)
print(anova(exp_model_lmer,ddf='Satterthwaite',type=3))
}
} else {
cat('\nLME with hand angle and movement type as fixed effects, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ passive_b * handangle_deg, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
exp_model_lmer <- lmer(taperror_deg ~ passive_b * handangle_deg - (1|participant), na.action=na.exclude)
print(anova(exp_model_lmer,ddf='Satterthwaite',type=3))
}
}
detach(exp)
options('contrasts' <- default.contrasts)
}
groupLocalization <- function(model='full', remove15=TRUE, LMEmethod='chi-squared', selectPerformance=TRUE) {
default.contrasts <- options('contrasts')
options(contrasts=c('contr.sum','contr.poly'))
exp <- getPointLocalization('exposure', difference=TRUE, verbose=FALSE, selectPerformance=selectPerformance)
cla <- getPointLocalization('classic', difference=TRUE, verbose=FALSE, selectPerformance=selectPerformance)
loc <- rbind(exp, cla)
if (remove15) {
loc <- loc[-which(loc$handangle_deg == 15),]
}
loc$group <- factor(loc$group)
loc$participant <- factor(loc$participant)
loc$passive_b <- factor(loc$passive_b)
loc$handangle_deg <- factor(loc$handangle_deg)
attach(loc)
if (model == 'full') {
cat('\nLME with group, hand angle and movement type as fixed effects, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ group * passive_b * handangle_deg, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
loc_model_lmer <- lmer(taperror_deg ~ group * passive_b * handangle_deg - (1|participant), na.action=na.exclude)
print(anova(loc_model_lmer,ddf='Satterthwaite',type=3))
}
}
if (model == 'restricted') {
cat('\nLME with three terms only, removing some main effects and interactions:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ group + group:passive_b + group:handangle_deg, random = c(~1|passive_b, ~1|handangle_deg, ~1|participant), na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
loc_model_lmer <- lmer(taperror_deg ~ group + group:passive_b + group:handangle_deg - (1|participant), na.action=na.exclude)
print(anova(loc_model_lmer,ddf='Satterthwaite',type=3))
}
}
if (model == 'handangle') {
cat('\nLME with group and hand angle as fixed effects, and participant and movement type as random effects:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ group * handangle_deg, random = ~1|participant/taperror_deg, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
loc_model_lmer <- lmer(taperror_deg ~ group * handangle_deg - (1|participant), na.action=na.exclude)
print(anova(loc_model_lmer,ddf='Satterthwaite',type=3))
}
}
if (model == 'movementtype') {
cat('\nLME with group and movement type as fixed effects and participant and hand angle as random effects:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ group * passive_b, random = ~1|participant/handangle_deg, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
loc_model_lmer <- lmer(taperror_deg ~ group * passive_b - (1|participant), na.action=na.exclude)
print(anova(loc_model_lmer,ddf='Satterthwaite',type=3))
}
}
detach(loc)
options('contrasts' <- default.contrasts)
}
classicLocalizationShift <- function(factors='both',remove15=TRUE, LMEmethod='chi-squared') {
default.contrasts <- options('contrasts')
options(contrasts=c('contr.sum','contr.poly'))
cla <- getPointLocalization('classic', difference=TRUE, verbose=FALSE)
if (remove15) {
cla <- cla[-which(cla$handangle_deg == 15),]
}
cla$participant <- factor(cla$participant)
cla$passive_b <- factor(cla$passive_b)
cla$handangle_deg <- factor(cla$handangle_deg)
attach(cla)
if (factors == 'both') {
cat('\nLME with hand angle and movement type as fixed effects, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ passive_b * handangle_deg, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
cla_model_lmer <- lmer(taperror_deg ~ passive_b * handangle_deg - (1|participant), na.action=na.exclude)
print(anova(cla_model_lmer,ddf='Satterthwaite',type=3))
}
}
if (factors == 'movementtype') {
cat('\nLME with movement type as fixed effects - ignoring hand angle, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ passive_b, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
cla_model_lmer <- lmer(taperror_deg ~ passive_b - (1|participant), na.action=na.exclude)
print(anova(cla_model_lmer,ddf='Satterthwaite',type=3))
}
}
if (factors == 'handangle') {
cat('\nLME with movement type as fixed effects - ignoring hand angle, and participant as random effect:\n\n')
if (LMEmethod=='chi-squared') {
print(Anova(lme(taperror_deg ~ handangle_deg, random = ~1|participant, na.action=na.exclude), type=3))
}
if (LMEmethod=='Satterthwaite') {
cla_model_lmer <- lmer(taperror_deg ~ handangle_deg - (1|participant), na.action=na.exclude)
print(anova(cla_model_lmer,ddf='Satterthwaite',type=3))
}
}
detach(cla)
options('contrasts' <- default.contrasts)
}
# boxPlotLocalization <- function() {
#
# # par(mfrow=c(4,1),mar=c(2,3,1,1))
#
# for (group in c('exposure','classic','online')) {
#
# df <- getPointLocalization(group, difference=FALSE, verbose=FALSE)
#
# # print(str(df))
#
# for (rotated in c(0,1)) {
#
# for (passive in c(0,1)) {
#
# # subdf <- df[which(df$rotated_b == rotated & df$passive_b == passive),]
#
# # boxplot(taperror_deg ~ handangle_deg, data=df, axes=F, bty='n')
#
# for (target in c(15,25,35,45,55,65,75)) {
#
# subdf <- df[which(df$rotated_b == rotated & df$passive_b == passive & df$handangle_deg == target),]
#
# ppno <- subdf$participant
# taperror <- subdf$taperror_deg
#
# idx <- which(abs(taperror - mean(taperror)) > (3 * sd(taperror)))
#
# if (length(idx) > 0) {
#
# cat(sprintf('%s %s %s %d deg\n', group, c('aligned','rotated')[rotated+1], c('active','passive')[passive+1], target))
# print(ppno[idx])
#
# }
#
# }
#
# }
#
# }
#
# }
#
# }
getPeakLocConfidenceInterval <- function(group, CIs=c(.95), movementtype='active', LRpart='all', selectPerformance=TRUE, iterations=1000, remove15=FALSE) {
filename <- sprintf('maxima_LOC_%s.csv', group)
if (file.exists(filename)) {
#cat(sprintf('\nloading peak LOC generalization from file for: %s\n',toupper(group)))
df <- read.csv(filename, stringsAsFactors=FALSE)
} else {
cat(sprintf('\nbootstrapping peak LOC generalization for: %s\n',toupper(group)))
loc <- getPointLocalization(group, difference=TRUE, points=c(15,25,35,45,55,65,75), movementtype=movementtype, LRpart=LRpart, verbose=FALSE, selectPerformance=selectPerformance)
if (remove15) {
loc <- loc[which(loc$handangle_deg > 15),]
}
loc2 <- -1 * xtabs(taperror_deg ~ participant + handangle_deg,loc)
data <- bootstrapGaussianPeak(data=loc2,bootstraps=iterations,mu=47.5,sigma=15,scale=10,offset=4,CIs=CIs)
df <- data.frame('level'=names(data),'value'=data)
write.csv(df,filename,row.names=FALSE,quote=FALSE)
}
return(df)
}
# DATA DESCRIPTIVES ------
countLocNAs <- function(group='exposure', output='count', selectPerformance=selectPerformance) {
loc <- getPointLocalization(group, difference=FALSE, verbose=FALSE, selectPerformance=selectPerformance)
loc <- loc[is.finite(loc$taperror_deg),]
df <- expand.grid(unique(loc$participant), unique(loc$handangle_deg))
names(df) <- c('participant', 'handangle_deg')
df$count <- 0
for (rown in c(1:nrow(df))) {
pp <- df[rown, 'participant']
angle <- df[rown, 'handangle_deg']
subexp <- loc[which(loc$participant == pp & loc$handangle_deg == angle),]
df$count[rown] <- nrow(subexp)
}
if (output == 'count') {
# this is a count of participants with estimates in ALL 4 tasks
df$count[which(df$count < 4)] <- 0
df$count[which(df$count > 0)] <- 1
df <- aggregate(count ~ handangle_deg, data=df, FUN=sum)
}
if (output == 'percentage') {
# this returns a percentage of existing estimates across the 4 tasks
df$count <- df$count/4
df <- aggregate(count ~ handangle_deg, data=df, FUN=mean)
}
return(df)
}
getLocCountTable <- function(output='count', selectPerformance=selectPerformance) {
groups <- c('exposure','classic','online')
for (group in groups) {
counts <- countLocNAs(group=group, output=output, selectPerformance=selectPerformance)
if (group == groups[1]) {
df <- counts
names(df)[2] <- group
} else {
df[,group] <- counts$count
}
}
return(df)
}
countSelectedLocalizations <- function(group, ignoreRepetitions=FALSE, selectPerformance=TRUE) {
df <- load.DownloadDataframe(url=localizationURLs[group],filename=sprintf('localization_%s.csv',group))
if (selectPerformance & group=='exposure') {
blinks <- load.DownloadDataframe(informationURLs['blinkdetect'],'blinkdetect_exposure.csv')
OKparticipants <- blinks$participant[which(blinks$rotated_b == 1 & blinks$performance > 0.65)]
df <- df[which(df$participant %in% OKparticipants),]
}
participant <- c()
rotated <- c()
passive <- c()
repetition <- c()
trials <- c()
mintrials <- 25
participants <- unique(df$participant)
for (ppid in participants) {
ppdf <- df[which(df$participant == ppid),]
for (session in c(0,1)) {
for (movtype in c(0,1)) {
subdf <- ppdf[which(ppdf$rotated_b == session & ppdf$passive_b == movtype),]
iters <- unique(subdf$iteration)
for (iterno in c(1:length(iters))) {
iter <- iters[iterno]
iterdf <- subdf[which(subdf$iteration == iter),]
Ntrials <- dim(iterdf)[1]
if (Ntrials < mintrials) {
mintrials <- Ntrials
}
participant <- c(participant, ppid)
rotated <- c(rotated, session)
passive <- c(passive, movtype)
repetition <- c(repetition, iter)
trials <- c(trials, (Ntrials/.25))
}
}
}
}
#cat(sprintf('\nminimum trials selected: %d\n\n',mintrials))
return(data.frame(participant, rotated, passive, repetition, trials))
}
countBinnedLocalizations <- function(group, ignoreRepetitions=FALSE, selectPerformance=TRUE) {
df <- load.DownloadDataframe(url=localizationURLs[group],filename=sprintf('localization_%s.csv',group))
if (selectPerformance & group=='exposure') {
blinks <- load.DownloadDataframe(informationURLs['blinkdetect'],'blinkdetect_exposure.csv')
OKparticipants <- blinks$participant[which(blinks$rotated_b == 1 & blinks$performance > 0.65)]
df <- df[which(df$participant %in% OKparticipants),]
}
participant <- c()
rotated <- c()
passive <- c()
repetition <- c()
bin <- c()
trials <- c()
bincentres <- c(15,25,35,45,55,65,75)
binspan <- 5
mintrials <- 25
participants <- unique(df$participant)
for (ppid in participants) {
ppdf <- df[which(df$participant == ppid),]
for (session in c(0,1)) {
for (movtype in c(0,1)) {
subdf <- ppdf[which(ppdf$rotated_b == session & ppdf$passive_b == movtype),]
iters <- unique(subdf$iteration)
for (iterno in c(1:length(iters))) {
iter <- iters[iterno]
iterdf <- subdf[which(subdf$iteration == iter),]
for (bincentre in bincentres) {
bindf <- iterdf[which(iterdf$handangle_deg > (bincentre-binspan) & iterdf$handangle_deg < (bincentre+binspan)),]
Ntrials <- dim(bindf)[1]
# if (Ntrials < mintrials) {
# mintrials <- Ntrials
# }
participant <- c(participant, ppid)
rotated <- c(rotated, session)
passive <- c(passive, movtype)
repetition <- c(repetition, iter)
bin <- c(bin, bincentre)
trials <- c(trials, (Ntrials/.25)) # why is this divided by .25? (multiplied by 4 essentially)
}
}
}
}
}
#cat(sprintf('\nminimum trials selected: %d\n\n',mintrials))
return(data.frame(participant, rotated, passive, repetition, bin, trials))
}
plotLocalizationBinCounts <- function() {
par(mfrow=c(1,2))
for (group in c('exposure','classic')) {
df <- countBinnedLocalizations(group, ignoreRepetitions=FALSE, selectPerformance=TRUE)
ylim <- list('exposure'=c(0,700), 'classic'=c(0,350))[[group]]
plot(-1000,-1000,main=group,xlim=c(5,85),ylim=ylim,ylab='number of trials',xlab='hand angle bin centre [deg]',ax=F,bty='n')
colors <- c()
linestyles <- c()
labels <- c()
for (session in c('aligned','rotated')) {
for (movement in c('active','passive')) {
rotated <- list('aligned'=0, 'rotated'=1)[[session]]
passive <- list('active'=0, 'passive'=1)[[movement]]
binCounts <- aggregate(trials ~ bin, data=df[which(df$rotated == rotated & df$passive == passive),], FUN=sum)
col <- colorset[[sprintf('%s%s%sS', substr(group,1,3), toupper(substr(movement,1,1)), substr(movement,2,3))]]
lst <- list('aligned'=1,'rotated'=2)[[session]]
lines(binCounts$bin, binCounts$trials, lty=lst, col=col)
colors <- c(colors, col)
linestyles <- c(linestyles, lst)
labels <- c(labels, sprintf('%s %s',session,movement))
}
}
legend(30,3*(max(ylim)/7),legend=labels,col=colors,lty=linestyles,bty='n')
axis(1,c(15,25,35,45,55,65,75))
axis(2,seq(min(ylim),max(ylim),diff(ylim)/7))
}
}
# PRECISION ------
getLocalizationVariance <- function(group, points=c(15,25,35,45,55,65,75), selectPerformance=TRUE, session='aligned') {
df <- load.DownloadDataframe(url=localizationURLs[group],filename=sprintf('localization_%s.csv',group))
if (selectPerformance & group=='exposure') {
blinks <- load.DownloadDataframe(informationURLs['blinkdetect'],'blinkdetect_exposure.csv')
OKparticipants <- blinks$participant[which(blinks$rotated_b == 1 & blinks$performance > 0.65)]
df <- df[which(df$participant %in% OKparticipants),]
}
df <- aspligned(df) # subtract smooth spline fitted on aligned data only from all data
if (session == 'aligned') {
df <- df[which(df$rotated_b == 0),]
} else if (session == 'rotated') {
df <- df[which(df$rotated_b == 1),]
}
return(aggregate(taperror_deg ~ participant + passive_b, data=df, FUN=var))
}
|
b79dde7eb98f34b80bf9b0878141fabcc8053a65 | 8c7bf42defee862b214d037e5f6629e06f2e9e4e | /R/electromigration.r | b648d1b5fb247bce6132740add764434a982b9d2 | [] | no_license | KDB2/amsReliability | 2c041e82a8214e34d35df43d8e700677395ee0dd | 7ad760b829c7ae14f112044c43173f99a982d4b0 | refs/heads/master | 2021-01-10T16:40:58.158846 | 2017-11-23T08:19:21 | 2017-11-23T08:19:21 | 43,775,918 | 0 | 0 | null | 2015-10-19T12:22:27 | 2015-10-06T20:15:35 | R | UTF-8 | R | false | false | 18,310 | r | electromigration.r | ################################################################################
### ###
### INFORMATIONS ###
### --------------------------------- ###
### ###
### PACKAGE NAME amsReliability ###
### MODULE NAME electromigration.r ###
### VERSION 0.10 ###
### ###
### AUTHOR Emmanuel Chery ###
### MAIL emmanuel.chery@ams.com ###
### DATE 2016/02/24 ###
### PLATFORM Windows 7 & Gnu/Linux 3.16 ###
### R VERSION R 3.1.1 ###
### REQUIRED PACKAGES ggplot2, grid, MASS, nlstools, scales ###
### LICENSE GNU GENERAL PUBLIC LICENSE ###
### Version 3, 29 June 2007 ###
### ###
### ###
### DESCRIPTION ###
### --------------------------------- ###
### ###
### This package is a collection of scripts dedicated to help ###
### the process reliability team of ams AG. It includes tools to ###
### quickly visualize data and extract model parameters in order ###
### to predict device lifetimes. ###
### ###
### This module is dedicated to electromigration experiments. ###
### Extraction of Black's parameters is performed. ###
### ###
### ###
### FUNCTIONS ###
### --------------------------------- ###
### ###
### AddArea Retrieve structure physical dimensions ###
### BlackAnalysis Main function for data analysis ###
### BlackModelization Extraction of Black's parameters ###
### ReadDataAce Read Exportfile and create data table ###
### ###
################################################################################
AddArea <- function(DataTable, DeviceID)
# Retrive the area of an EM device and add it to the dataTable
# Return the new dataTable if it succeeds.
# Return an error otherwise
{
# Create an error to return in case of need
errorMsg <- try(log("a"), silent=TRUE)
# Read the list of device to retrieve the section parameters.
ListDevice <- try(read.delim("//fsup04/fntquap/Common/Qual/Process_Reliability/Process/amsReliability_R_Package/ListDeviceName.txt"),silent=TRUE)
#if file is not present, error is returned.
if (class(ListDevice) == "try-error"){
print("File //fsup04/fntquap/Common/Qual/Process_Reliability/Process/amsReliability_R_Package/ListDeviceName.txt not found.")
return(errorMsg)
} else {
W <- ListDevice$Width[ListDevice$Device==DeviceID] # micrometers
H <- ListDevice$Height[ListDevice$Device==DeviceID] # micrometers
Area <- W*H*1E-12 # m^2
# if Area is a positive number different from 0, we can proceed:
if (is.na(Area) || Area <=0 || length(Area)==0) {
print(paste("Structure",DeviceID, "is not present in the list. Please fill the list!"))
# Force an error in the return for BlackAnalysis.
return(errorMsg)
} else { # we proceed
DataTable$Area <- Area
return(DataTable)
}
}
}
ReadDataAce <- function(ListFileName, StructureList=c())
# Read the exportfiles listed in ListFileName and store them in a dataframe.
# First read all the files and then calculate the probability scale
# for each condition. This allows to work with conditions splitted in different files.
# Data are cleaned to remove bad units
# Exportfile from Ace and Mira have different headers,
# therefore column numbers are used
{
# ResTable initialisation
ResTable <- data.frame()
for (file in ListFileName){
# Read the file and store it in a temporary dataframe
TempTable <- read.delim(file)
# Creation of the new dataframe
TTF <- TempTable[,3]
Status <- TempTable[,2]
Stress <- TempTable[,5]
Temperature <- TempTable[,8]
Condition <- paste(TempTable[,5],"mA/",TempTable[,8],"°C",sep="") #paste(TempTable[,"Istress"],"mA/",TempTable[,"Temp"],"°C",sep="")
Dimension <- TempTable[,6]
# Creation of a dataframe to store the data
TempDataFrame <- data.frame(TTF,Status,Condition,Stress,Temperature,Dimension)
# Force the column names
names(TempDataFrame) <- c("TTF", "Status", "Conditions", "Stress", "Temperature","Dimension")
# Store the data in the final table
ResTable <- rbind(ResTable,TempDataFrame)
}
# security check, we force again the name on ResTable
names(ResTable) <- c("TTF", "Status", "Conditions", "Stress", "Temperature", "Dimension")
# Cleaning to remove units where status is not 1 or 0.
ResTable <- Clean(ResTable)
# If Structure is not empty, we select only the structures listed.
if (length(StructureList) != 0){
NewResTable <- data.frame()
for (strucLength in StructureList){
NewResTable <- rbind(NewResTable,ResTable[ResTable$Dimension==strucLength,])
}
ResTable <- NewResTable
}
# Handle case where some unfailed samples have a lower TTF than finished ones.
ResTable <- AdjustCensor(ResTable)
# List the conditions present in ResTable
CondList <- levels(factor(ResTable$Conditions))
# Probability is missing. Let's add it.
# Final dataframe is ExpDataTable
# Initialisation
ExpDataTable <- data.frame()
# For each condition found, we calculate the probability of failure. Data are stacked in ExpDataFrame. Lognormal scale is used.
for (cond in CondList){
TempDataTable <- CreateDataFrame(ResTable$TTF[ResTable$Conditions==cond], ResTable$Status[ResTable$Conditions==cond],
ResTable$Condition[ResTable$Conditions==cond], ResTable$Stress[ResTable$Conditions==cond], ResTable$Temperature[ResTable$Conditions==cond], Scale="Lognormal",ResTable$Dimension[ResTable$Conditions==cond])
ExpDataTable <- rbind(ExpDataTable,TempDataTable)
}
# We force the new names here as a security check.
names(ExpDataTable) <- c("TTF", "Status", "Probability", "Conditions", "Stress", "Temperature","Dimension")
# Order the condition in numerical/alphabetical order
ExpDataTable <- ExpDataTable[OrderConditions(ExpDataTable),]
# Do the same for conditions levels
ExpDataTable$Conditions <- factor(ExpDataTable$Conditions, SortConditions(levels(ExpDataTable$Conditions)))
return(ExpDataTable)
}
BlackModelization <- function(DataTable, DeviceID)
# Modelize the data using Black equation
# Extract the parameters: A, n and Ea
# as well as the lognormal slope
# TTF = A j^(-n) exp(Ea/kT + Scale * Proba)
# Proba in standard deviations
# Data(TTF,Status,Probability,Conditions,Stress,Temperature, Dimension, Area)
# Data have to be cleaned upfront. Only valid data (status==1) should be given.
{
# Modelization
Model <- ModelFit(DataTable, Law="BlackLaw")
# Parameters Extraction
# A <- coef(Model)[1]
# n <- coef(Model)[2]
# Ea <-coef(Model)[3]
# Scale <- coef(Model)[4]
# Using the parameters and the conditions, theoretical distributions are created
ListConditions <- levels(DataTable$Conditions)
Area <- DataTable$Area[1]
ModelDataTable <- CreateModelDataTable(Model, ListConditions, Area, Law="BlackLaw", Scale="Lognormal")
# Display a few information regarding the model: parameters, goodness of fit...
FitResultsDisplay(Model, DataTable, DeviceID)
return(ModelDataTable)
}
#' Electromigration data analysis
#'
#' Extract Black's parameters from a set of electromigration experiments.
#' The experimental data as well as the resulting model are displayed and
#' can be saved. Extracted parameters are saved in a fit.txt file.
#'
#' @param ErrorBand displays the confidence intervals if set to TRUE.
#' @param ConfidenceValue percentage used in the confidence interval calculation
#' @param Save saves the chart as .png if set to TRUE.
#'
#' @return None
#'
#' @examples
#' BlackAnalysis()
#' BlackAnalysis(ErrorBand=FALSE)
#' @author Emmanuel Chery, \email{emmanuel.chery@@ams.com}
#' @import ggplot2 MASS scales nlstools tcltk
#' @export
BlackAnalysis <- function(ErrorBand=FALSE, ConfidenceValue=0.95, Save=TRUE)
{
# Disable warning for this function.
oldw <- getOption("warn")
options(warn = -1)
#rm(list=ls())
# ListFiles <- list.files(pattern="*exportfile.txt")
# Filters for file selection
Filters <- matrix(c("All files", "*", "Text", ".txt", "Export Files", "*exportfile.txt"),3, 2, byrow = TRUE)
ListFiles <- SelectFilesAdvanced(Filters)
# case 1, there are one or several files available
if (length(ListFiles) != 0){
# List of DeviceID available in the selected exportfiles
DeviceIDList <- levels(sapply(ListFiles,function(x){factor(strsplit(x,split="_")[[1]][2])}))
for (DeviceID in DeviceIDList){
SubListFiles <- ListFiles[grep(DeviceID,ListFiles)]
# Import the file(s) and create the 3 dataframes + display data
DataTable <- try(ReadDataAce(SubListFiles), silent=TRUE)
if (class(DataTable) != "try-error"){
# Reading the file was ok.
# Try to import the area
if (class(AddArea(DataTable, DeviceID)) != "try-error" ){
DataTable <- AddArea(DataTable, DeviceID)
}
# Modelization, errorBands calculation and Graph is made with a clean table where only failed samples are kept.
# DataTable is kept in order to be saved in fit.txt
CleanExpDataTable <- KeepOnlyFailed(DataTable)
# Attempt to modelize. If succes, we plot the chart, otherwise we only plot the data.
ModelDataTable <- try(BlackModelization(CleanExpDataTable, DeviceID),silent=TRUE)
if (class(ModelDataTable) != "try-error"){
if (ErrorBand){
ErrorDataTable <- ErrorEstimation(CleanExpDataTable, ModelDataTable, ConfidenceValue)
} else {
ErrorDataTable <- NULL
}
CreateGraph(CleanExpDataTable, ModelDataTable, ErrorDataTable, aesVec = c("TTF", "Probability", "Conditions"), title = DeviceID,
axisTitles = c("Time to Failure (s)","Probability (%)"), scale.x = "Log", scale.y = "Lognormal", save = Save )
# ExpData are added to the fit.txt file created during modelization
SaveData2File(DataTable, "fit.txt")
# There was an error either with Area or with modelization, we go to fallback mode
} else { # if modelization is not a success, we display the data and return parameters of the distribution in the console (scale and loc) in case user need them.
ModelDataTable <- FitDistribution(CleanExpDataTable,Scale="Lognormal")
CreateGraph(CleanExpDataTable, ModelDataTable, aesVec = c("TTF", "Probability", "Conditions"), title = DeviceID,
axisTitles = c("Time to Failure (s)","Probability (%)"), scale.x = "Log", scale.y = "Lognormal", save = FALSE)
}
} else { # reading the files returned an error.
print("Error detected in the file(s) you selected. Please check your selection.")
}
}
} else { # case 2, there are no files available
print("You need to create the export files first!")
}
# return(DataTable)
# Warning are set on again.
options(warn = oldw)
}
BlackModelization.me <- function(DataTable, DeviceID)
# Modelize the data using Black equation
# Extract the parameters: A, n and Ea
# as well as the lognormal slope
# TTF = A j^(-n) exp(Ea/kT + Scale * Proba)
# Proba in standard deviations
# Data(TTF,Status,Probability,Conditions,Stress,Temperature, Dimension)
{
# Read the list of device to retrieve the section parameters.
ListDevice <- try(read.delim("//fsup04/fntquap/Common/Qual/Process_Reliability/Process/amsReliability_R_Package/ListDeviceName.txt"),silent=TRUE)
#if file is not present, error is returned.
if (class(ListDevice) == "try-error"){
print("File //fsup04/fntquap/Common/Qual/Process_Reliability/Process/amsReliability_R_Package/ListDeviceName.txt not found.")
return(ListDevice)
} else {
W <- ListDevice$Width[ListDevice$Device==DeviceID] # micrometers
H <- ListDevice$Height[ListDevice$Device==DeviceID] # micrometers
S <- W*H*1E-12 # m^2
# if S is a positive number different from 0, we can proceed:
if (is.na(S) || S<=0 ) {
print(paste("Structure",DeviceID, "is not present in the list. Please fill the list!"))
# Force an error in the return for BlackAnalysis.
ModelDataTable <- data.frame()
as(ModelDataTable,"try-error")
return(ModelDataTable)
} else { # we proceed
# Physical constants
k <- 1.38E-23 # Boltzmann
e <- 1.6E-19 # electron charge
# Remove the units where status is 0
CleanDataTable <- DataTable[DataTable$Status==1,]
B.f <- deriv(~log(exp(A)*(Stress*1E-3/1)^(-n)*exp((Ea*1.6E-19)/(1.38E-23*(Temperature+273.15))+Scale*Probability))/log(10),namevec = c('A','Ea','n','Scale'), function.arg = c('A','Ea','n','Scale','Stress', 'Temperature','Probability'))
y = log10(DataTable$TTF)
fit.nlmer <- nlmer(log10(y) ~ B.f(A,Ea,n,Scale,Stress, Temperature,Probability) ~ Temperature | Conditions, start=list(nlpars=c(A=30,Ea=0.7,n=2,Scale=0.3)), data=DataTable)
summary(fit.nlmer)
# Parameters Extraction
A <- coef(Model)[1]
n <- coef(Model)[2]
Ea <-coef(Model)[3]
Scale <- coef(Model)[4]
# Residual Sum of Squares
RSS <- sum(resid(Model)^2)
# Total Sum of Squares: TSS <- sum((TTF - mean(TTF))^2))
TSS <- sum(sapply(split(CleanDataTable[,1],CleanDataTable$Conditions),function(x) sum((x-mean(x))^2)))
Rsq <- 1-RSS/TSS # R-squared measure
#print(paste("Size on 150 rows:", format(object.size(Model), unit="Mb")))
# Using the parameters and the conditions, theoretical distributions are created
ListConditions <- levels(CleanDataTable$Conditions)
# Initialisation
ModelDataTable <- data.frame()
# y axis points are calculated. (limits 0.01% -- 99.99%) Necessary to have nice confidence bands.
Proba <- seq(qnorm(0.0001),qnorm(0.9999),0.05)
for (i in seq_along(ListConditions)){
# Experimental conditions:
Condition <- ListConditions[i]
I <- CleanDataTable$Stress[CleanDataTable$Conditions==Condition][1]
Temp <- CleanDataTable$Temperature[CleanDataTable$Conditions==Condition][1] # °C
# TTF calculation
TTF <- exp(A)*(I*0.001/S)^(-n)*exp((Ea*e)/(k*(273.15+Temp))+ Proba * Scale)
# Dataframe creation
ModelDataTable <- rbind(ModelDataTable, data.frame('TTF'=TTF,'Status'=1,'Probability'=Proba,'Conditions'=Condition,'Stress'=I,'Temperature'=Temp))
}
# Drawing of the residual plots
plot(nlsResiduals(Model))
# Display of fit results
print(DeviceID)
print(summary(Model))
print(paste("Residual squared sum: ",RSS,sep=""))
#print(coef(Model))
#print(sd(resid(Model)))
# Save in a file
capture.output(summary(Model),file="fit.txt")
cat("Residual Squared sum:\t",file="fit.txt",append=TRUE)
cat(RSS,file="fit.txt",append=TRUE)
cat("\n \n",file="fit.txt",append=TRUE)
cat("Experimental Data:",file="fit.txt",append=TRUE)
cat("\n",file="fit.txt",append=TRUE)
capture.output(DataTable,file="fit.txt",append=TRUE)
return(ModelDataTable)
}
}
}
AdjustCensor <- function(DataTable)
# Adjust the censoring level
# Handle case where some unfailed samples have a lower TTF than finished ones.
# Return the DataTable with adjusted status
# DataTable(TTF,Status,Probability,Conditions,Stress,Temperature, Dimension)
{
# List of available conditions
listConditions <- levels(as.factor(DataTable$Conditions))
for (cond in listConditions){
minTimeOngoingSample <- min(DataTable$TTF[DataTable$Status==0 & DataTable$Conditions == cond])
DataTable$Status[DataTable$Status==1 & DataTable$TTF > minTimeOngoingSample & DataTable$Conditions == cond] <- 0
}
return(DataTable)
}
|
2427d406c799132e63a1a524210442d8542cee69 | eab78e955aaee69c14d206c8e5bd76cf246a2372 | /man/jobs.Rd | 7488b74341bb3b888582c92680a0d3ba0fd930d3 | [
"MIT"
] | permissive | rstudio/connectapi | 3ad96af4e7935035658bf09aa8082cae4c68ffb5 | 427ac1fe2eb72d45f6048c376ec94b6c545faf8d | refs/heads/main | 2023-08-08T11:55:20.698746 | 2023-07-10T16:36:27 | 2023-07-10T16:36:27 | 167,249,814 | 34 | 17 | NOASSERTION | 2023-08-03T13:26:37 | 2019-01-23T20:32:57 | R | UTF-8 | R | false | true | 1,221 | rd | jobs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/content.R
\name{get_jobs}
\alias{get_jobs}
\alias{get_job}
\title{Get Jobs}
\usage{
get_jobs(content)
get_job(content, key)
}
\arguments{
\item{content}{A Content object, as returned by \code{content_item()}}
\item{key}{The key for a job}
}
\description{
\lifecycle{experimental} Retrieve details about jobs associated with a \code{content_item}.
"Jobs" in Posit Connect are content executions
}
\seealso{
Other content functions:
\code{\link{acl_add_user}()},
\code{\link{content_delete}()},
\code{\link{content_item}()},
\code{\link{content_title}()},
\code{\link{content_update}()},
\code{\link{create_random_name}()},
\code{\link{dashboard_url_chr}()},
\code{\link{dashboard_url}()},
\code{\link{delete_vanity_url}()},
\code{\link{deploy_repo}()},
\code{\link{get_acl_user}()},
\code{\link{get_bundles}()},
\code{\link{get_environment}()},
\code{\link{get_image}()},
\code{\link{get_vanity_url}()},
\code{\link{git}},
\code{\link{permissions}},
\code{\link{set_image_path}()},
\code{\link{set_run_as}()},
\code{\link{set_vanity_url}()},
\code{\link{swap_vanity_url}()},
\code{\link{verify_content_name}()}
}
\concept{content functions}
|
07e5d9df1e5aff523fcbf2f3005741b5f6156f0f | 8414e92e6f4840b9ac815f2d4386ad4747ffa722 | /R/dp.R | fe7f3fed0917c41b5a08c624a000d77a9f0018c8 | [] | no_license | krlmlr/deparse | 8a7c8c2045e0590df2cd939f0367c832af71bd53 | 86f10431542dc8a74ee1735107dd03518ed25789 | refs/heads/master | 2021-01-17T05:09:09.732938 | 2017-06-22T14:07:50 | 2017-06-22T14:07:50 | 65,830,221 | 10 | 1 | null | 2017-06-22T13:31:14 | 2016-08-16T15:10:18 | R | UTF-8 | R | false | false | 2,152 | r | dp.R | #' A nicer deparse
#'
#' \code{deparse} is a reimplementation of \code{\link[base]{dput}} and related
#' functions. It tries its best to produce output that is easy to read
#' (for humans), yet produces (almost) identical results to the input
#' (for machines). This function is a generic, so other packages can easily
#' provide implementations for the objects they define.
#'
#' @param x object to deparse
#' @param ... passed to other methods
#'
#' @import rlang
#' @export
deparse <- function(x, ...) {
UseMethod("deparse")
}
#' @export
deparse.default <- function(x, ...) {
if (is.list(x)) {
deparse.list(x, ...)
} else {
paste(base::deparse(x, 500L, backtick = TRUE), collapse = "")
}
}
#' @export
deparse.Date <- function(x, ...) {
deparse_call("as.Date", format(x))
}
#' @export
deparse.POSIXct <- function(x, ...) {
deparse_call("as.POSIXct", format(x, usetz = TRUE))
}
#' @export
deparse.POSIXlt <- function(x, ...) {
deparse_call("as.POSIXlt", format(x, usetz = TRUE))
}
deparse_call <- function(call, argument) {
paste0(call, "(", deparse(argument), ")")
}
#' @export
deparse.function <- function(x, ...) {
fun_in_namespace <- find_function_in_namespace(x)
if (is.null(fun_in_namespace))
NextMethod()
else {
paste0(deparse(as.name(fun_in_namespace$ns)), "::", deparse(as.name(fun_in_namespace$fun)))
}
}
find_function_in_namespace <- function(fun) {
env <- environment(fun)
if (!isNamespace(env))
return(NULL)
namespace_funs <- as.list(env)
namespace_funs <- namespace_funs[order(names(namespace_funs))]
same <- vapply(namespace_funs, identical, fun, FUN.VALUE = logical(1L))
same_name <- names(which(same))
if (length(same_name) == 0L)
return(NULL)
list(ns = getNamespaceName(env), fun = same_name[[1L]])
}
#' @rdname deparse
#'
#' @description
#' The \code{deparsec} function leverages \code{deparse} by creating
#' a \code{call} object which can be evaluated but retains formatting
#' (in the form of a \code{\link[base]{srcref}} attribute).
#' @export
deparsec <- function(x, ...) {
text <- deparse(x, ...)
as.srcref_call(srcfilecopy("<deparsec>", text))
}
|
e8cb5fd413d199eed7569babdfa0c14e0525f56d | 40de17508d3206d2bb905a7ed44068c77efc2386 | /wine.R | 2c37ba56653e5ece4de5ebdc69d9c59fd0dc2e5d | [] | no_license | priyankaankireddypalli/PCA | eb84706428c42eced5f9d810370e0f3fb351776e | 82f3f3ef6b51e8b1aafc046a58e2d5013b77fa22 | refs/heads/main | 2023-07-29T09:55:05.423950 | 2021-08-20T04:01:10 | 2021-08-20T04:01:10 | 398,149,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,878 | r | wine.R | # Performing PCA on Wine dataset
library(readr)
wine <- read.csv('C:\\Users\\WIN10\\Desktop\\LEARNING\\wine.csv')
View(wine)
# Performing EDA for the dataset
# Checking for NA values
sum(is.na(wine))
# There are no NA values in our dataset
# Plotting histogram for getting the skewness
hist(wine$Alcohol,xlab = 'Alcohol',ylab = 'Frequency',main = 'Alcohol vs Frequency', breaks = 20,col = 'blue',border = 'black') # Histogram is normally skewed
hist(wine$Malic,xlab = 'Malic',ylab = 'Frequency',main = 'Malic vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
hist(wine$Ash,xlab = 'Ash',ylab = 'Frequency',main = 'Ash vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is normally skewed
hist(wine$Alcalinity,xlab = 'Alcalinity',ylab = 'Frequency',main = 'Alcalinity vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is normally skewed
hist(wine$Magnesium,xlab = 'Magnesium',ylab = 'Frequency',main = 'Magnesium vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
hist(wine$Phenols,xlab = 'Phenols',ylab = 'Frequency',main = 'Phenols vs Frequency',breaks = 20,col = 'blue',border = 'black') # HIstogram is normally skewed
hist(wine$Flavanoids,xlab = 'Flavanoids',ylab = 'Frequency',main = 'Flavanoids vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is normally skewed
hist(wine$Nonflavanoids,xlab = 'Non Flavanoids',ylab = 'Frequency',main = 'Non Flavanoids vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
hist(wine$Proanthocyanins,xlab = 'Proanthocyanins',ylab = 'Frequency',main = 'Proanthocyanins vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
hist(wine$Color,xlab = 'Color',ylab = 'Frequency',main = 'Color vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
hist(wine$Hue,xlab = 'Hue',ylab = 'Frequency',main = 'Hue vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is negatively skewed
hist(wine$Dilution,xlab = 'Dilution',ylab = 'Frequency',main = 'Dilution vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is negatively skewed
hist(wine$Proline,xlab = 'Proline',ylab = 'Frequency',main = 'Proline vs Frequency',breaks = 20,col = 'blue',border = 'black') # Histogram is positively skewed
# Plotting boxplot for finding the outliers
alcohol <- boxplot(wine$Alcohol,xlab='Alcohol',ylab='Frequency',main='Alcohol vs Frequency',col = 'black',border = 'blue')
alcohol$out
# There are no outliers in Alcohol column
Malic <- boxplot(wine$Malic,xlab='Malic',ylab='Frequency',main='Malic vs Frequency',col = 'black',border = 'blue')
Malic$out # There are outliers in Malic column
# We will remove the outliers by winsorization method
quant1 <- quantile(wine$Malic,probs = c(0.25,0.75))
quant1
wins1 <- quantile(wine$Malic,probs = c(0.05,0.95))
wins1
a1 <- 1.5*IQR(wine$Malic)
a1
b1 <- quant1[1] - a1
b1
c1 <- quant1[2] + a1
c1
# Replacing the outliers
wine$Malic[wine$Malic<b1] <- wins1[1]
wine$Malic[wine$Malic>c1] <- wins1[2]
d1 <- boxplot(wine$Malic)
d1$out
# Outliers have been replaced
ash <- boxplot(wine$Ash,xlab='Ash',ylab='Frequency',main='Ash vs Frequency',col = 'black',border = 'blue')
ash$out
# There are outliers in Ash column
# Therefore we will remove the outliers by winsorization
quant2 <- quantile(wine$Ash,probs = c(0.25,0.75))
quant2
wins2 <- quantile(wine$Ash,probs = c(0.05,0.95))
wins2
a2 <- 1.5*IQR(wine$Ash)
a2
b2 <- quant2[1] - a2
b2
c2 <- quant2[2] + a2
c2
# Replacing the outliers
wine$Ash[wine$Ash<b2] <- wins2[1]
wine$Ash[wine$Ash>c2] <- wins2[2]
d2 <- boxplot(wine$Ash)
d2$out
# Outliers are replaced
alcalinity <- boxplot(wine$Alcalinity,xlab='Alcalinity',ylab='Frequency',main='Alcalinity vs Frequency',col = 'black',border = 'blue')
alcalinity$out
# There are outliers in Alcalinity column
# Therefore we will replacing by winsorization
quant3 <- quantile(wine$Alcalinity,probs = c(0.25,0.75))
quant3
wins3 <- quantile(wine$Alcalinity,probs = c(0.05,0.95))
wins3
a3 <- 1.5*IQR(wine$Alcalinity)
a3
b3 <- quant3[1] - a3
b3
c3 <- quant3[2] + a3
c3
# REplacing the outliers
wine$Alcalinity[wine$Alcalinity<b3] <- wins3[1]
wine$Alcalinity[wine$Alcalinity>c3] <- wins3[2]
d3 <- boxplot(wine$Alcalinity)
d3$out
# Therefore outliers are replaced
mag <- boxplot(wine$Magnesium,xlab='Magnesium',ylab='Frequency',main='Magnesium vs Frequency',col = 'black',border = 'blue')
mag$out
# Outliers are there in Magnesium column
# Replacing them by winsorization
quant4 <- quantile(wine$Magnesium,probs = c(0.25,0.75))
quant4
wins4 <- quantile(wine$Magnesium,probs = c(0.05,0.95))
wins4
a4 <- 1.5*IQR(wine$Magnesium)
a4
b4 <- quant4[1] - a4
b4
c4 <- quant4[2] + a4
c4
# Replacing the outliers
wine$Magnesium[wine$Magnesium<b4] <- wins4[1]
wine$Magnesium[wine$Magnesium>c4] <- wins4[2]
d4 <- boxplot(wine$Magnesium)
d4$out
# Outliers are replaced
phenols <- boxplot(wine$Phenols,xlab='phenols',ylab='Frequency',main='Phenols vs Frequency',col = 'black',border = 'blue')
phenols$out
# There are no outliers in Phenols column
flavanoids <- boxplot(wine$Flavanoids,xlab='Flavanoids',ylab='Frequency',main='Flavanoids vs Frequency',col = 'black',border = 'blue')
flavanoids$out
# There are no outliers in Flavanoids coloumn
nonflava <- boxplot(wine$Nonflavanoids,xlab='Non Flavanoids',ylab='Frequency',main='Non Flavanoids vs Frequency',col = 'black',border = 'blue')
nonflava$out
# There are no outliers in Non Flavanoids column
proantho <- boxplot(wine$Proanthocyanins,xlab='proanthocyanins',ylab='Frequency',main='Proanthocyanins vs Frequency',col = 'black',border = 'blue')
proantho$out
# There are outliers in Proanthocyanins column
# Therefore we will do winsorization
quant5 <- quantile(wine$Proanthocyanins,probs = c(0.25,0.75))
quant5
wins5 <- quantile(wine$Proanthocyanins,probs = c(0.05,0.95))
wins5
a5 <- 1.5*IQR(wine$Proanthocyanins)
a5
b5 <- quant5[1] - a5
b5
c5 <- quant5[2] + a5
c5
# Replacing the outliers
wine$Proanthocyanins[wine$Proanthocyanins<b5] <- wins5[1]
wine$Proanthocyanins[wine$Proanthocyanins>c5] <- wins5[2]
d5 <- boxplot(wine$Proanthocyanins)
d5$out
# Outliers are replaced
color <- boxplot(wine$Color,xlab='Color',ylab='Frequency',main='Color vs Frequency',col = 'black',border = 'blue')
color$out
# There are outliers in Color column
# Therefore we will replace by winsorization method
quant6 <- quantile(wine$Color,probs = c(0.25,0.75))
quant6
wins6 <- quantile(wine$Color,probs = c(0.05,0.95))
wins6
a6 <- 1.5*IQR(wine$Color)
a6
b6 <- quant6[1] - a6
b6
c6 <- quant6[2] + a6
c6
# Replacing the outliers
wine$Color[wine$Color<b6] <- wins6[1]
wine$Color[wine$Color>c6] <- wins6[2]
d6 <- boxplot(wine$Color)
d6$out
# Outliers are replaced
hue <- boxplot(wine$Hue,xlab='Hue',ylab='Frequency',main= 'hue vs Frequency',col = 'black',border = 'blue')
hue$out
# There is a outlier in Hue column
quant7 <- quantile(wine$Hue,probs = c(0.25,0.75))
quant7
wins7 <- quantile(wine$Hue,probs = c(0.05,0.95))
wins7
a7 <- 1.5*IQR(wine$Hue)
a7
b7 <- quant7[1] - a7
b7
c7 <- quant7[2] + a7
c7
# Replacing the outliers
wine$Hue[wine$Hue<b7] <- wins7[1]
wine$Hue[wine$Hue>c7] <- wins7[2]
d7 <- boxplot(wine$Hue)
d7$out
# Outliers are replaced
dil <- boxplot(wine$Dilution,xlab='Dilution',ylab='Frequency',main='Dilution vs Frequency',col = 'black',border = 'blue')
dil$out
# There are no outliers in Dilution column
proline <- boxplot(wine$Proline,xlab='Proline',ylab='Frequency',main='Proline vs Frequency',col = 'black',border = 'blue')
proline$out
# There are no outliers in Proline column
# Checking the Normality of the data
qqnorm(wine$Alcohol)
qqline(wine$Alcohol)
# Alcohol data is Normal
qqnorm(wine$Malic)
qqline(wine$Malic)
# Malic data is non normal therefore we apply transformation
qqnorm(log(wine$Malic))
qqline(log(wine$Malic))
qqnorm(sqrt(wine$Malic))
qqline(sqrt(wine$Malic))
qqnorm((1/wine$Malic))
qqline((1/wine$Malic))
# Even after applying the transformation the data is non normal
qqnorm(wine$Ash)
qqline(wine$Ash)
# Ash data is normal
qqnorm(wine$Alcalinity)
qqline(wine$Alcalinity)
# The data is normal
qqnorm(wine$Magnesium)
qqline(wine$Magnesium)
# The data is normal
qqnorm(wine$Phenols)
qqline(wine$Phenols)
# The data is normal
qqnorm(wine$Flavanoids)
qqline(wine$Flavanoids)
# The data is normal
qqnorm(wine$Nonflavanoids)
qqline(wine$Nonflavanoids)
# The data is normal
qqnorm(wine$Proanthocyanins)
qqline(wine$Proanthocyanins)
# The data is normal
qqnorm(wine$Color)
qqline(wine$Color)
# The data is normal
qqnorm(wine$Hue)
qqline(wine$Hue)
# The data is normal
qqnorm(wine$Dilution)
qqline(wine$Dilution)
# The data is non normal
qqnorm(log(wine$Dilution))
qqline(log(wine$Dilution))
qqnorm(sqrt(wine$Dilution))
qqline(sqrt(wine$Dilution))
qqnorm((1/wine$Dilution))
qqline((1/wine$Dilution))
# Even after applying the transformations the data is non normal
qqnorm(wine$Proline)
qqline(wine$Proline)
# The data is non normal therefore we apply transformation
qqnorm(log(wine$Proline))
qqline(log(wine$Proline))
qqnorm(sqrt(wine$Proline))
qqline(sqrt(wine$Proline))
qqnorm((1/wine$Proline))
qqline((1/wine$Proline))
# After applying reciprocal the data is normal
wine$Proline <- (1/wine$Proline)
# Checking for variance in all columns
apply(wine, 2,var)
which(apply(wine,2,var)==0)
# We have variance in all columns
# We will drop first column
wine1 <- wine[-1]
# Finding Principle Components
pcaobj <- princomp(wine1,cor = TRUE,scores = TRUE,covmat = NULL)
str(pcaobj)
summary(pcaobj)
loadings(pcaobj)
plot(pcaobj) # This Plot will show the importance of principle components
biplot(pcaobj)
plot(cumsum(pcaobj$sdev*pcaobj$sdev)*100/(sum(pcaobj$sdev*pcaobj$sdev)),type = 'b')
pcaobj$scores
s <- pcaobj$scores[,1:3]
# Top 3 PCA scores
pcafinal <- as.data.frame(cbind(wine[,1],s))
View(pcafinal)
# Plotting scatter diagram
plot(pcafinal$Comp.1,pcafinal$Comp.2)
# Performing Clustering
# Distance matrix
d <- dist(pcafinal,method = 'euclidean')
fit <- hclust(d,method = 'complete')
# Plotting Dendrogram
plot(fit,hang = -1)
groups <- cutree(fit,k=3) # Making 3 groups
rect.hclust(fit,k=3,border = 'red')
membership <- as.matrix(groups)
finalhclust <- data.frame(membership,wine1)
aggregate(wine1[,1:13],by=list(finalhclust$membership),FUN = mean)
# To get the output with having clustered group value column in it.
write_csv(final, "hclustwine.csv")
getwd() #to get working directory
# Performing K-Means
# Plotting elbow curve for deciding k value
twss <- NULL
for (i in 2:8) {
twss <- c(twss,kmeans(pcafinal,centers = i)$tot.withinss)
}
twss
plot(2:8,twss,type = 'b',xlab = 'Number of clusters',ylab = 'within sum of squares',main = 'K Means clustering scree plot')
# Clustering Solution
fit <- kmeans(pcafinal,3)
str(fit)
fit$cluster
kmeansfinal <- data.frame(fit$cluster,wine)
aggregate(wine1[,1:13],by = list(fit$cluster),FUN = mean)
# To get the output with having clustered group value column in it.
write_csv(final, "hclustwine.csv")
getwd() #to get working directory
# By both Hierarchial and K-Means clustering we can conclude that,
# The wine belonging to group 1 has high alcohol content, low Alcalinity and color,
# therefore we may consider this group as Premium Quality Wines
# The wine belonging to group 2 have moderate alcohol content,ash content and color,
# therefore this group maybe considered as White Wine Section
# The wine belonging to group 3 are less alcoholic and high color and ash,
# therefore this section can be categorised as Low Quality Wines.
|
cf7486fedd19c44cf26331189213080ad3ceabcd | cb66ae3bf5bd2422e70df574340e0d5f5388eb8e | /Unconditional Lorenz Effects.R | 9e919ffe9d88c055c9d7d67e917d548e7c68f393 | [] | no_license | jvoorheis/MSA_Ineq | 779f28947f243495d4c28b6841b56d2c51dc97e6 | 3dbec52e82d0ae86d6d88c6550aadba4b43cb81a | refs/heads/master | 2016-08-02T22:44:29.331869 | 2013-12-28T07:50:20 | 2013-12-28T07:50:20 | 11,228,792 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,199 | r | Unconditional Lorenz Effects.R | library(foreign)
library(sandwich)
library(lmtest)
library(reldist)
library(ineq)
library(ggplot2)
CPS.work<-read.dta("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Data/CPS_topcode_MSA_demo_cleaned.dta")
CA2011<-subset(CPS.work, CPS.work$year==2011 & CPS.work$statefip=="California")
CA2000<-subset(CPS.work, CPS.work$year==2000 & CPS.work$statefip=="California")
rif_glorenz<-function(y, p, formula){
v_p <- quantile(y, probs=p)
rif_y <- apply(data.frame(y), 1, function(x) (if (x<v_p) {x-(1-p)*v_p} else{p*v_p}))
return(rif_y)
}
rif_lorenz<-function(y, p){
v_p <- quantile(y, probs=p)
T_Lp <- Lc(y)$L[as.integer(p*length(y))]
mu_F <- mean(y)
rif_y <- apply(data.frame(y), 1, function(x) if (x<v_p){(x-(1-p)*v_p)/mu_F + T_Lp*(1-x/mu_F)} else{p*v_p/mu_F-T_Lp*(1-x/mu_F)})
return(rif_y)
}
CA$RIF_inc1<-rif_glorenz(CA$cellmean_equivinc, 0.95)
try1 <- lm(RIF_inc1~female+black+asian+other+latino+hs+somecollege+college+grad+union_mem+union_cov+married+divorced, data=CA)
coeftest(try1, vcovHC(try1))
CA$RIF_inc2<-rif_lorenz(CA$cellmean_equivinc, 0.1)
try2 <- lm(RIF_inc2~female+black+asian+other+latino+hs+somecollege+college+grad+union_mem+union_cov+married+divorced, data=CA)
try3<-coeftest(try2, vcovHC(try2))
print(xtable(try2), file="Lorenz_reg_10.tex")
grid <- seq(0.05, 0.95, by=0.05)
grid<-append(grid, 0.99)
grid<-append(grid, 0.995)
fitted_vals<-c()
robust_se<-c()
for (i in grid){
CA$RIF_inc2<-rif_lorenz(CA$cellmean_equivinc, i)
try2 <- lm(RIF_inc2~female+black+asian+other+latino+hs+somecollege+college+grad+union_mem+union_cov+married+divorced, data=CA)
fitted_vals<-append(fitted_vals,coeftest(try2, vcovHC(try2))[11,1])
robust_se<-append(robust_se,coeftest(try2, vcovHC(try2))[11,2] )
}
data_x <- data.frame(fitted_vals, robust_se)
data_x$lower <- data_x$fitted_vals - 1.96*data_x$robust_se
data_x$upper <- data_x$fitted_vals + 1.96*data_x$robust_se
data_x$ord<-grid
png(file="LPE_unions.png", width=900, height=900, units="px", type="cairo-png")
print(ggplot(data_x, aes(grid))+
geom_line(aes(y=upper, colour="upper CI"))+
geom_line(aes(y=lower, colour= "lower CI"))+
geom_line(aes(y=fitted_vals, colour= "LPE, Unions")))
dev.off() |
49ada904568cfc7723765e94be8f63014d4d6068 | a60396342c967726b2ce5b87be365cb370d16e38 | /R/continret.R | ee96cb7c836dc34f956f2d73edd6ae5cd13b12b6 | [] | no_license | kashenfelter/dsetsR | 8db72c2921e40f5489a1de8006d55f01b4bae975 | 1daa9626d0e9f5ea224150b17cf7e1ab2a516876 | refs/heads/master | 2021-05-29T18:39:52.121320 | 2015-09-28T22:01:54 | 2015-09-28T22:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 686 | r | continret.R | #' Continuously compunded returns for a company in Dhaka Stock Exchange
#'
#' This function returns continuously compunded returns for a company enlisted and traded in the Dhaka Stock Exchange between 2005
#' and 2015.
#'
#' @param id2 The vector containing closing price for any company
#' @return Vector containing the continuously compunded returns of the company
#' @author Syed M. Fuad
#' @details This function takes a vector containing the closing price and returns the continuously compunded returns.
#' @seealso \code{log}, \code{length}
#' @export
continret <- function(id2, summarize = FALSE){
n <- length(id2)
ret1 <- log(id2[2:n]) - log(id2[1:(n-1)])
return(ret1)
}
|
eba1c9e3db4a6d568b787f751cb1ad208a0a0905 | 561149f7ef8b323c59aa3fc7b24c287e39770362 | /man/LegoR-package.Rd | d5f8f32bffe8c1ef76b2d97fe7a4f43014f300a6 | [
"MIT"
] | permissive | dA505819/LegoR | dff6a1a03a8a2f04bc68ac4f63ede3fe67648742 | 3abc8d3ed08efeacac22f77190d0dc65f9e50d95 | refs/heads/master | 2021-02-23T18:32:11.852013 | 2019-09-30T02:40:36 | 2019-09-30T02:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 569 | rd | LegoR-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LegoR-package.R
\docType{package}
\name{LegoR-package}
\alias{LegoR}
\alias{LegoR-package}
\title{LegoR: Package to scrape data from shop.lego.com and brickset}
\description{
\if{html}{\figure{logo.png}{options: align='right'}}
Collects data from shop.lego.com and brickset for integration. The inst folder includes a script to automatically update downloaded data.
}
\author{
\strong{Maintainer}: Susan Vanderplas \email{srvanderplas@gmail.com} (0000-0002-3803-0972)
}
\keyword{internal}
|
2b83a6df25991284a8a5c4615a92d52b20676eda | 95e41d3766d0e3acefe5177f985794fcf7a6ed0d | /R/hmi_imp_roundedcont_2017-12-27.R | aceb52a761d52181e929f9004b2664b6c1b732a7 | [] | no_license | matthiasspeidel/hmi | d9717ecb92542269ae261a8a8107fc66760373b8 | 8edeb644283b36fb2e692baed819d488ff841a42 | refs/heads/master | 2021-07-11T00:14:15.489711 | 2020-08-16T18:28:05 | 2020-08-16T18:28:05 | 63,349,571 | 6 | 2 | null | 2020-07-30T18:27:22 | 2016-07-14T15:48:11 | R | UTF-8 | R | false | false | 19,554 | r | hmi_imp_roundedcont_2017-12-27.R | #' The function to impute rounded continuous variables
#'
#' For example the income in surveys is often reported rounded by the respondents.
#' See Drechsler, Kiesl and Speidel (2015) for more details.
#' @param y A Vector with the variable to impute.
#' @param X A data.frame with the fixed effects variables.
#' @param rounding_degrees A numeric vector with the presumed rounding degrees.
#' @references Joerg Drechsler, Hans Kiesl, Matthias Speidel (2015):
#' "MI Double Feature: Multiple Imputation to Address Nonresponse and Rounding Errors in Income Questions".
#' Austrian Journal of Statistics Vol. 44, No. 2, http://dx.doi.org/10.17713/ajs.v44i2.77
#' @return A n x 1 data.frame with the original and imputed values.
imp_roundedcont <- function(y, X, rounding_degrees = c(1, 10, 100, 1000)){
# ----------------------------- preparing the Y data ------------------
if(is.factor(y)){
y <- as.interval(y)
}
n <- length(y)
# ----------------------------- preparing the X data ------------------
# remove excessive variables
X1 <- cleanup(X)
# standardize X
X1_stand <- stand(X1, rounding_degrees = rounding_degrees)
#The imputation model of missing values is Y ~ X.
#In order to get a full model matrix, we need two things
#1. A place holder ph with an precice structure
#(meaning that ph is not of class interval. Nevertheless the elements in ph
#can be an aggregate of imprecise observations (e.g. the mean of lower and upper bound))
#2. The place holder ph must not contain any NAs, NaNs or Infs.
decomposed_y <- decompose_interval(interval = y)
#short check for consistency:
if(any(decomposed_y[, "lower_general"] > decomposed_y[, "upper_general"], na.rm = TRUE)){
stop("in your interval covariate, some values in the lower bound exceed the upper bound.")
}
# classify the data into the three types of observations:
# 1. precise data (like 3010 or 3017 - in interval notation "3010;3010", "3017;3017")
# 2. imprecise data (like "3000;3600")
# 3. missing data (NA - in interval notation "-Inf;Inf")
#get the indicator of the missing values
indicator_precise <- !is.na(decomposed_y[, "precise"])
indicator_imprecise <- !is.na(decomposed_y[, "lower_imprecise"])
indicator_missing <- is.infinite(decomposed_y[, "lower_general"]) &
is.infinite(decomposed_y[, "upper_general"])
#Preparation for standadizing all observations in y, based on the precise values of y
#y_precise <- decomposed_y[, "precise"]
mean_of_y_precise <- mean(decomposed_y[, "precise"], na.rm = TRUE)
sd_of_y_precise <- stats::sd(decomposed_y[, "precise"], na.rm = TRUE)
# We intentionally add + 1 because otherwise with the standardized x,
# the intercept in the regression y ~ x can be exactly 0
# standardise all observations
y_stand <- (y - mean_of_y_precise)/sd_of_y_precise + 1
# standardise the decomposed y
decomposed_y_stand <- (decomposed_y - mean_of_y_precise)/sd_of_y_precise + 1
# run a linear model to get the suitable model.matrix for imputation of the NAs
# Later, another model is run. In many cases, both models are redundant.
# But in cases with categorical covariates, X_model_matrix_1 will generate
# additional covariates compared to X_imp_stand.
# The names of these variables are then stored in tmp_1.
# Then in the second model it is checked for unneeded variables
# (e.g. unneeded categories).
ph_for_y <- sample_imp(rowMeans(decomposed_y_stand[, 4:5]))[, 1]
df_for_y_on_x <- data.frame(ph_for_y = ph_for_y)
# run a linear model to get the suitable model.matrix for imputation of the NAs
xnames_0 <- paste("X", 1:ncol(X1_stand), sep = "")
df_for_y_on_x[xnames_0] <- X1_stand
model_y_on_x <- stats::lm(ph_for_y ~ 0 + . , data = df_for_y_on_x)
#model matrix
MM_y_on_x_0 <- stats::model.matrix(model_y_on_x)
xnames_1 <- paste("X", 1:ncol(MM_y_on_x_0), sep = "")
df_for_y_on_x <- data.frame(ph_for_y = ph_for_y)
df_for_y_on_x[, xnames_1] <- MM_y_on_x_0
safetycounter <- 0
unneeded <- TRUE
while(any(unneeded) & safetycounter <= ncol(MM_y_on_x_0)){
safetycounter <- safetycounter + 1
# Run another model and...
reg_1 <- stats::lm(ph_for_y ~ 0 +., data = df_for_y_on_x)
MM_y_on_x_1 <- stats::model.matrix(reg_1)
#... remove unneeded variables with an NA coefficient
unneeded <- is.na(stats::coefficients(reg_1))
xnames_1 <- colnames(MM_y_on_x_1)[!unneeded]
df_for_y_on_x <- data.frame(ph_for_y = ph_for_y)
df_for_y_on_x[, xnames_1] <- MM_y_on_x_1[, !unneeded, drop = FALSE]
}
reg_2 <- stats::lm(ph_for_y ~ 0 + ., data = df_for_y_on_x)
MM_y_on_x_2 <- stats::model.matrix(reg_2)
# Now check for variables with too much variance
max.se <- abs(stats::coef(reg_2) * 3)
coef.std <- sqrt(diag(stats::vcov(reg_2)))
includes_unimportants <- any(coef.std > max.se)
safetycounter <- 0
while(includes_unimportants & safetycounter <= ncol(MM_y_on_x_0)){
safetycounter <- safetycounter + 1
xnames_1 <- colnames(MM_y_on_x_2)[coef.std <= max.se]
df_for_y_on_x <- data.frame(ph_for_y = ph_for_y)
df_for_y_on_x[, xnames_1] <- MM_y_on_x_2[, xnames_1, drop = FALSE]
reg_2 <- stats::lm(ph_for_y ~ 0 +., data = df_for_y_on_x)
MM_y_on_x_2 <- stats::model.matrix(reg_3)
#check the regression parameters on very high standard errors
max.se <- abs(stats::coef(reg_2) * 3)
coef.std <- sqrt(diag(stats::vcov(reg_2)))
includes_unimportants <- any(coef.std > max.se)
}
# --preparing the ml estimation
# -define rounding intervals
half_interval_length <- rounding_degrees/2
# Determine the rounding degrees of the precise observations
rounding_categories_indicator <- array(dim = c(sum(indicator_precise),
length(rounding_degrees)))
for(i in 1:ncol(rounding_categories_indicator)){
rounding_categories_indicator[, i] <- decomposed_y[indicator_precise, "precise"] %% rounding_degrees[i] == 0
}
p <- factor(rowSums(rounding_categories_indicator))
# Define a matrix for the model p ~ Y + X
df_for_p_on_y_and_x <- data.frame(ph_for_p = p, df_for_y_on_x[indicator_precise, , drop = FALSE])
#####maximum likelihood estimation using starting values
####estimation of the parameters
# estimation of the starting values for eta and the thresholds on the x-axis:
# ordered probit maximum possible rounding on the rounded in income data
tryCatch(
{
#polr throws an warning, if no intercept is included in the model formula
#(See ?polr)
#so we add one in the formula and exclude the constant variable in the data.frame
#before hand.
constant_variables <- apply(df_for_p_on_y_and_x, 2, function(x) length(unique(x)) == 1)
df_for_p_on_y_and_x_2 <- df_for_p_on_y_and_x[, !constant_variables, drop = FALSE]
if(ncol(df_for_p_on_y_and_x_2) == 1){
probitstart <- MASS::polr("target ~ 0 + .",
data = df_for_p_on_y_and_x,
contrasts = NULL, Hess = TRUE, model = TRUE,
method = "logistic")
}else{
probitstart <- MASS::polr("ph_for_p ~ 1 + .",
data = df_for_p_on_y_and_x_2,
contrasts = NULL, Hess = TRUE, model = TRUE,
method = "probit")
}
},
error = function(cond) {
cat("We assume that perfect separation occured in your rounded continuous variable, because of too few observations.\n
Consider specifying the variable to be continuous via list_of_types (see ?hmi).\n")
cat("Here is the original error message:\n")
cat(as.character(cond))
return(NULL)
},
warning = function(cond) {
cat("We assume that perfect separation occured in your rounded continuous variable, because of too few observations.\n
Consider specifying the variable to be continuous via list_of_types (see ?hmi).\n")
cat("Here is the original warning message:\n")
cat(as.character(cond))
return(NULL)
},
finally = {
}
)
gamma1start <- probitstart$coefficients[names(probitstart$coefficients) == "ph_for_y"]
kstart <- as.vector(probitstart$zeta) # the tresholds (in the summary labeled "Intercepts")
#explaining the tresholds for the example of rounding degrees 1, 10, 100 and 1000:
#0 (rounding degree 1), 0|1 (reounding degree 10), 1|2 (100), 2|3 (1000)
# it might be more practical to run the model
#only based on the observed data, but this could cause some covariates in betastart2 to be dropped
betastart <- as.vector(model_y_on_x$coef)
sigmastart <- sigma(model_y_on_x)
#####maximum likelihood estimation using the starting values
#The intercept of the model for y has not be maximized as due to the standardizations
#of y and x, it's value is exactly 1.
starting_values <- c(kstart, betastart, gamma1start, sigmastart)
names(starting_values)[1:length(kstart)] <- paste("threshold", 1:length(kstart), sep = "")
names(starting_values)[length(kstart) + 1:length(betastart)] <-
paste("coef_y_on_x", 1:length(betastart), sep = "")
names(starting_values)[length(kstart) + length(betastart) + 1:length(gamma1start)] <-
paste("coef_p_on_y_and_x", 1:length(gamma1start), sep = "")
names(starting_values)[length(starting_values)] <- "sigma"
###exclude obs below (above) the 0.5% (99.5%) income quantile before maximizing
###the likelihood. Reason: Some extrem outliers cause problems during the
###maximization
quants <- stats::quantile(decomposed_y_stand[indicator_precise, "precise"],
c(0.005, 0.995), na.rm = TRUE)
indicator_outliers <- (decomposed_y_stand[indicator_precise, "precise"] < quants[1] |
decomposed_y_stand[indicator_precise, "precise"] > quants[2])
m2 <- stats::optim(par = starting_values[-(length(kstart) + 1)], negloglik,
X_in_negloglik = MM_y_on_x_0,
y_precise_stand = decomposed_y_stand[indicator_precise, "precise"],
lower_bounds = decomposed_y_stand[indicator_imprecise, 2],
upper_bounds = decomposed_y_stand[indicator_imprecise, 3],
my_p = as.numeric(as.character(p)),
sd_of_y_precise = sd_of_y_precise,
rounding_degrees = rounding_degrees,
indicator_precise = indicator_precise,
indicator_imprecise = indicator_imprecise,
indicator_outliers = indicator_outliers,
method = "Nelder-Mead",#"BFGS",
control = list(maxit = 10000), hessian = TRUE)
par_ml2 <- m2$par
hess <- m2$hessian
# link about nearest covariance matrix:
# http://quant.stackexchange.com/questions/2074/what-is-the-best-way-to-fix-a-covariance-matrix-that-is-not-positive-semi-defi
# nearPD(hess)$mat
# isSymmetric(Sigma_ml2)
Sigma_ml2 <- tryCatch(
{
Sigma_ml2 <- solve(hess)
},
error = function(cond) {
cat("Hessian matrix couldn't be inverted (in the imputation function of the rounded continuous variable).
Still, you should get a result, but which needs special attention.\n")
cat("Here is the original error message:\n")
cat(as.character(cond))
Sigma_ml2 <- diag(ncol(hess))
diag(Sigma_ml2) <- abs(pars)/100
},
warning = function(cond) {
cat("There seems to be a problem with the Hessian matrix in the imputation of the rounded continuous variable\n")
cat("Here is the original warning message:\n")
cat(as.character(cond))
Sigma_ml2 <- solve(hess)
},
finally = {
}
)
####draw new parameters (because it is a Bayesian imputation)
# Boolean value indicating whether the parameters are valid or not
invalid <- TRUE
#numerical problems can result in a not positive definite Matrix.
Sigma_ml3 <- as.matrix(Matrix::nearPD(Sigma_ml2)$mat)
counter <- 0
while(invalid & counter < 1000){
counter <- counter + 1
pars <- mvtnorm::rmvnorm(1, mean = par_ml2, sigma = Sigma_ml3)
#first eq on page 63 in Drechsler, Kiesl, Speidel (2015)
####test if drawn parameters for the thresholds are in increasing order
####and if the standard deviation of the residuals is <= 0
####if yes, draw again
# pars takes the starting values c(kstart, betastart2, gamma1start, sigmastart2)
invalid <- is.unsorted(pars[1:(length(rounding_degrees) - 1)]) | pars[length(pars)] <= 0
}
# derive imputation model parameters from previously drawn parameters
if(ncol(MM_y_on_x_0) == 1){
beta_hat <- matrix(1, ncol = 1)
}else{
beta_hat <- as.matrix(c(1, pars[length(rounding_degrees):(length(pars) - 2)]), ncol = 1)
}
gamma1_hat <- pars[length(pars) - 1]
sigma_hat <- pars[length(pars)]
mu_g <- gamma1_hat * (as.matrix(MM_y_on_x_0) %*% beta_hat)
mu_y <- as.matrix(MM_y_on_x_0) %*% beta_hat
#The covariance matrix from equation (3)
Sigma <- matrix(c(1 + gamma1_hat^2 * sigma_hat^2,
gamma1_hat * sigma_hat^2, gamma1_hat * sigma_hat^2,
sigma_hat^2), nrow = 2)
###########################################################
#BEGIN IMPUTING INTERVAL-DATA AND COMPLETELY MISSING DATA#
# The imputation for precise but rounded data follows in the next section.
# precise and not rounded data need no impuation at all.
lower_general_stand <- decomposed_y_stand[, "lower_general"][indicator_imprecise | indicator_missing]
upper_general_stand <- decomposed_y_stand[, "upper_general"][indicator_imprecise | indicator_missing]
#draw values from the truncated normal distributions
# the bounds are straight forward for the interval data.
# for the missing data, the bounds are -Inf and +Inf,
# which is equivalent to draw from a unbounded normal distribution
mytry_interval <- msm::rtnorm(n = sum(indicator_imprecise | indicator_missing),
lower = lower_general_stand,
upper = upper_general_stand,
mean = mu_y[indicator_imprecise | indicator_missing],
sd = sigma_hat)
# proposed values for imputation
#do the backtransformation from standardised to unstandardised
imp_tmp <- decomposed_y[, "precise"]
imp_tmp[indicator_imprecise | indicator_missing] <-
(mytry_interval - 1) * sd_of_y_precise + mean_of_y_precise
###############################################################################
########################### BEGIN UNROUNDING-IMPUTATION########################
###define bounds for the rounding basis
bounds_for_g_hat <- c(-Inf, pars[1:(length(rounding_degrees) - 1)], Inf)
###define interval bounds for maximum possible rounding intervals
#Principally this could be done without standardization, but it makes the following functions
#work more reliably.
#If standardization happens, it is important to adjust the parameters accordingly.
y_lower <- (decomposed_y[indicator_precise, "precise"] -
half_interval_length[as.numeric(as.character(p))] - mean_of_y_precise)/sd_of_y_precise + 1
y_upper <- (decomposed_y[indicator_precise, "precise"] +
half_interval_length[as.numeric(as.character(p))] - mean_of_y_precise)/sd_of_y_precise + 1
g_upper <- bounds_for_g_hat[as.numeric(as.character(p)) + 1]
#elements <- cbind(mymean, -Inf, y_lower, g_upper, y_upper)#ORIGINAL
elements <- cbind(-Inf, mu_g[indicator_precise, 1], g_upper,
y_lower, mu_y[indicator_precise, 1], y_upper)
# Note: we set g_lower to -Inf because we state that a value of 1500 is not necessarily
# a multiple of 500; it could also be rounded to the next multiple of 10 or even 1.
colnames(elements) <- c("g_lower", "mean_g","g_upper", "y_lower","mean_y", "y_upper")
###indicator which of the precise observations need to be imputed due to rounding
#(and not because they are missing)
rounded <- rep(TRUE, length(p))
while(any(rounded)){
###draw values for g and y from a truncated multivariate normal
###drawn y must be between y_lower and y_upper
###drawn g must be between g_lower and g_upper
mytry <- t(apply(elements[rounded, , drop = FALSE],
1, sampler, Sigma))
#It can happen, that rtmvnorm can't sample values from a truncated normal distribution
#properly. See the following example returning two NaNs
#instead a values from [0;1]:
#tmvtnorm::rtmvnorm(1, mean = c(40, 0.5),
# sigma = diag(2),
# lower = c(0, 0),
# upper = c(1, 1),
# algorithm = "gibbs", burn.in.samples = 1000)
#So, if for individual i no valid value for g or y could be sampled,
# it could either be because mu_g[i] lies outisde of the interval
#[g_lower[i];g_upper[i]] or because mu_y[i] outside of y_lower[i];y_upper[i]].
# We then check whether it is mu_g or mu_y, that loes outside its interval
#and then replace the corresponding mean
#by a uniform sample between the lower and the upper bound.
#replace the invalid draws with valid ones.
# For the latent rounding tendency, we use the highest possible rounding tendency
# For y, we use a uniform sample between the highest and lowest possible
#bounds of y.
problematic_draws <- is.na(mytry[, 1])
problematic_elements <- elements[problematic_draws, , drop = FALSE]
# check if there are problematic means of g. This is the case if the mean is outside
# the interval for a possible g.
toosmall_gs <- problematic_elements[, 2] < problematic_elements[, 1]
toolarge_gs <- problematic_elements[, 2] > problematic_elements[, 3]
elements[which(problematic_draws)[toosmall_gs], 2] <-
elements[which(problematic_draws)[toosmall_gs], 1]
elements[which(problematic_draws)[toolarge_gs], 2] <-
elements[which(problematic_draws)[toolarge_gs], 3]
toosmall_ys <- problematic_elements[, 5] < problematic_elements[, 4]
toolarge_ys <- problematic_elements[, 5] > problematic_elements[, 6]
elements[which(problematic_draws)[toosmall_ys], 5] <-
elements[which(problematic_draws)[toosmall_ys], 4]
elements[which(problematic_draws)[toolarge_ys], 5] <-
elements[which(problematic_draws)[toolarge_ys], 6]
####get imputed rounding indicator
round_int <- apply(mytry[, 1, drop = FALSE], 1,
function(x) sum(x > bounds_for_g_hat))
###get imputed income on original scale
imp_precise_temp <- (mytry[, 2, drop = FALSE] - 1) * sd_of_y_precise + mean_of_y_precise
#Store these results as imputation values...
imp_tmp[indicator_precise][rounded] <- imp_precise_temp
#... but test if estimated rounding degree and proposed y can explain the observed y.
# E.g. the estimated rounding degree 10 and the proposed y 2063 doesn't match
#to an observed value 2100. A degree of 100 would match in this case.
#If degree and y do match set the value for rounded to FALSE.
# The remaining (non-matching) observations get a new proposal y and rounding degree.
domatch <- floor(imp_precise_temp[, 1]/rounding_degrees[round_int] + 0.5) * rounding_degrees[round_int] ==
decomposed_y[indicator_precise, "precise"][rounded]
rounded[rounded][domatch] <- FALSE
}
y_ret <- data.frame(y_ret = imp_tmp)
return(y_ret)
}
|
5abc73712c70a776ee4938eb751b50c8cd493711 | ccb7a35f060cb6b03ff1bec980f4c46983d5ef8d | /combined_plots.R | 821b783a2707ea96682af6caf90da60b933aeaa1 | [] | no_license | PeerChristensen/UrBIOfuture_survey | 0816e6db41bf5058da121b99044d66ea2db5e3a5 | 0e9172f40a3360d999dfae6c2141046bc2cc655d | refs/heads/master | 2020-07-23T01:34:32.634762 | 2019-10-17T07:51:11 | 2019-10-17T07:51:11 | 207,402,066 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,997 | r | combined_plots.R | # combined plots
library(cowplot)
library(scales)
library(tidyverse)
df <- read_csv2("Survey_consolidated_filter_23092019.csv") %>%
mutate_if(is.character,
~replace(., grepl("Bachelor/engineer", .), "BA/engineer")) %>%
filter(n_complete > 90,
company_type == "i")
#########
theme_ubf <- function () {
theme_ipsum_rc(plot_title_size = 28) %+replace%
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
plot.margin = margin(1,1, 1,1, "cm"),
axis.text.y = element_text(size = 18,
margin = margin(r = .3, unit = "cm")),
axis.text.x = element_text(size = 18),
panel.grid.major.x = element_blank(),
)
}
theme_set(theme_ubf())
############################################
age <- df %>%
count(q_101) %>%
filter(!is.na(q_101)) %>%
mutate(pct = n/sum(n) * 100,
q_101 = fct_relevel(factor(q_101),"> 60", after = 6)) %>%
ggplot(aes(q_101,pct)) +
geom_col(fill="#98C455",color=NA) +
scale_y_continuous(labels=percent_format(scale=1)) +
ggtitle("Age")
# ----
# 4.1.5. Gender distribution "q_102"
# ----
gender <- df %>%
select(q_102_1,q_102_2,q_102_3) %>%
gather() %>%
count(key,value) %>%
filter(value != "Not selected") %>%
mutate(Gender = recode(key, q_102_1 = "Male",q_102_2 = "Female",q_102_3 = "Non-binary"),
pct = n/sum(n) * 100) %>%
ggplot(aes(Gender,pct)) +
geom_col(fill="#98C455",color=NA) +
scale_y_continuous(labels=percent_format(scale=1)) +
ggtitle("Gender")
cowplot::plot_grid(age,gender,align="v",ncol=2,rel_widths = c(7/11, 4/11))
ggsave("images/q_101_102.png", width=15,height=7)
# ----
# 4.1.6. Educational level q_103
# ----
education <- df %>%
count(q_103) %>%
filter(!is.na(q_103)) %>%
mutate(pct = n/sum(n) * 100,
q_103 = recode(q_103,
"VET - Vocational Education and Training" = "VET")) %>%
ggplot(aes(q_103,pct)) +
geom_col(fill="#98C455",color=NA) +
scale_y_continuous(labels=percent_format(scale=1)) +
ggtitle("Educational level") +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
# ----
# 4.1.7. Experiences in the bio-based industries q_104
# ----
experience <- df %>%
count(q_104) %>%
filter(!is.na(q_104)) %>%
mutate(pct = n/sum(n) * 100,
q_104 = fct_relevel(factor(q_104),"> 15 years", after = Inf)) %>%
ggplot(aes(q_104,pct)) +
geom_col(fill="#98C455",color=NA) +
scale_y_continuous(labels=percent_format(scale=1)) +
ggtitle("Industry experience") +
theme(axis.text.x=element_text(angle=45,vjust=1,hjust=1))
# ----
# 4.1.8. Job positions q_105
# ----
job_positions <- df %>%
count(q_105) %>%
filter(!is.na(q_105)) %>%
mutate(pct = n/sum(n) * 100,
q_105 = recode(q_105,
"Plant and machine operator and assembler" = "Operator and assembler",
"Technician and associate professional" = "Technician and assoc. professional",
"Clerical support (personal assistant, secretary, etc..)" = "Clerical support",
"Service and market sales worker" = "Service and market sales",
"Elementary (low education level or no qualifications at all)" = "Elementary")) %>%
ggplot(aes(reorder(q_105,pct),pct)) +
geom_col(fill="#98C455",color=NA) +
scale_y_continuous(labels=percent_format(scale=1)) +
ggtitle("Job positions") +
theme(axis.text.y = element_text(hjust=1),
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line()) +
coord_flip()
bottom_row <- cowplot::plot_grid(education, experience,rel_widths = c(6/11,5/11))
cowplot::plot_grid(job_positions,bottom_row,ncol=1, rel_heights = c(5/11,6/11))
ggsave("images/q_103_104_105.png", width=10,height=10)
# countries
# ----
# 4.1.9. Country of company headquarters distribution "q_109"
# ----
hq <- df %>%
filter(!is.na(q_109)) %>%
count(q_109) %>%
mutate(pct = n/sum(n)*100,
q_109 = recode(q_109, "Outside the European Union" = "Outside EU")) %>%
ggplot(aes(reorder(q_109,pct),pct)) +
geom_col(fill="#98C455",color=NA) +
labs(title="Country of company headquarters") +
theme(axis.text.y = element_text(hjust=1),
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line()) +
scale_y_continuous(labels=percent_format(scale=1)) +
coord_flip()
# ----
# 4.1.10. Country of workplace "q_110"
# ----
workplace <- df %>%
filter(!is.na(q_110)) %>%
count(q_110) %>%
mutate(pct = n/sum(n)*100) %>%
ggplot(aes(reorder(q_110,pct),pct)) +
geom_col(fill="#98C455",color=NA) +
labs(title="Country of workplace") +
theme(axis.text.y = element_text(hjust=1),
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line()) +
scale_y_continuous(labels=percent_format(scale=1)) +
coord_flip()
cowplot::plot_grid(hq,workplace,ncol=2)
ggsave("images/q_109_110.png", width=14,height=8)
|
87f973d205c7ef6bb1defbb1fd76ad60e74bd633 | fc07c3c93c58252ac6a13cf06a4a325862092c21 | /R/RcppExports.R | 61658b6c67ab7b9281d26c3f76443db0363ab7cf | [] | no_license | alexkowa/wrswoR | 998a2b226da76b115e5ed84d85a00a5862b69d19 | a4ea0c1cd3dfb17e14b60a330ac75adb43618cf6 | refs/heads/master | 2020-03-11T08:09:16.363544 | 2018-02-02T08:10:25 | 2018-02-02T08:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 535 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sample_int_crank <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_crank`, n, size, prob)
}
sample_int_ccrank <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_ccrank`, n, size, prob)
}
sample_int_expj <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_expj`, n, size, prob)
}
sample_int_expjs <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_expjs`, n, size, prob)
}
|
fc16257837d8f947861809eb11eb2d954bdfd746 | 9182470bc9ff0562a879b6ea3e4da92e011bf509 | /recombination_intepolation.r | 62fcd6c46ad147b9c68a84969622f1be0b69661d | [] | no_license | cshukai/genwin_parallel | 9c19bcfcb8b3354adb49573418487c9e1f78a5b6 | 4f8ed6c7bea491327f702deaf86be61bed6e923d | refs/heads/master | 2020-05-17T10:35:27.578780 | 2017-12-09T21:22:49 | 2017-12-09T21:22:49 | 183,660,765 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,640 | r | recombination_intepolation.r | rec=read.table("GeneticMapFromNam.txt",header=T)
fst=read.table("FstTable.txt",header=T)
#clean out missing data and correct misplaced start/end
colnames(rec)=c("p.rs.","chr","start","end","cM")
rec2=NULL
for(i in 1:nrow(rec)){
if(!is.na(rec[i,"start"]) && !is.na(rec[i,"end"]) ){
if(rec[i,"start"]<rec[i,"end"]){
thisRow=c(rec[i,"p.rs."],rec[i,"chr"],rec[i,"start"],rec[i,"end"],rec[i,"cM"])
rec2=rbind(rec2,thisRow)
}
else{
thisRow=c(rec[i,"p.rs."],rec[i,"chr"],rec[i,"end"],rec[i,"start"],rec[i,"cM"])
rec2=rbind(rec2,thisRow)
}
}
}
#transform accumulative centimorgan so that all centimorgan values are positive
colnames(rec2)=c("p.rs.","chr","start","end","cM")
chrNum=10
for(i in 1:chrNum){
these_cm=rec[which(rec[,"chr"]==i),"cM"]
if(min(these_cm)<1){
mag=1-min(these_cm)
for(j in 1:nrow(rec2)){
if(rec2[j,"chr"]==i){
rec2[j,"cM"]=rec2[j,"cM"]+mag
}
}
}
}
#map window to markers with FST values
library(doSNOW)
threads= 11
cl=makeCluster(threads)
registerDoSNOW(cl)
windows=NULL
foreach(i = 1:nrow(rec2)) %dopar% { this_chr=rec2[i,"chr"]
this_cm=rec2[i,"cM"]
this_target=fst[which(fst[,"Chromosome"]==this_chr),]
win_start=rec2[i,"start"]
win_end=rec2[i,"end"]
fst_mapped=NULL
for(j in 1:nrow(this_target)){
if(this_target[j,"Position"]>=win_start && this_target[j,"Position"]<win_end){
windows=rbind(windows,c(i,this_chr,win_start,win_end,this_cm,this_target[j,"Position"],this_target[,"Fst"]))
}
}
}
# fit genwin to every window
stopCluster(cl)
# fit genwin to every window
|
8fcaafca5288b45dd309d054414ef069af04f957 | e1b16c8f51275fb7c3ac5c0b1259797db102466e | /R/searchMaxProbMultinom.R | 861c8348feb57c7e652fcb44146384cdf0a92758 | [] | no_license | cran/Exact | 2e40cd661c476ef63ba9d5f60853c01ade003c45 | dee64666d46fa2b993bdd0f515a731b981ef26f1 | refs/heads/master | 2022-10-05T23:27:33.407421 | 2022-09-25T22:40:02 | 2022-09-25T22:40:02 | 17,679,070 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,292 | r | searchMaxProbMultinom.R | searchMaxProbMultinom <-
function(Tbls, N, prob) {
prob[, 3] <- NA
nRows <- nrow(prob)
# Calculate first record
prob[1, 3] <- sum(multinom(Tbls[ , 1], Tbls[ , 2], Tbls[ , 3], N-rowSums(Tbls),
p1=prob[1, 1], p2=prob[1, 2]))
while (any(is.na(prob[,3]))) {
p1unique <- unique(prob[is.na(prob[ , 3]), 1])
m <- floor(length(p1unique)/2) + 1 #Very slightly faster
p1Index <- which(prob[, 1] == p1unique[m])
prob[p1Index, 3] <- sum(multinom(Tbls[ , 1], Tbls[ , 2], Tbls[ , 3], N-rowSums(Tbls),
p1=prob[p1Index, 1], p2=prob[p1Index, 2]))
if ( prob[p1Index, 3] == max(prob[, 3], na.rm=TRUE) ) {
prob[1:(p1Index-1), 3] <- -999
}
if ( prob[p1Index, 3] != max(prob[, 3], na.rm=TRUE)) {
prob[p1Index:nRows, 3] <- -999
}
}
# for (probIndex in 1:nrow(prob)) {
# prob[probIndex, 3] <- sum(multinom(Tbls[ , 1], Tbls[ , 2], Tbls[ , 3], N-rowSums(Tbls),
# p1=prob[probIndex, 1], p2=prob[probIndex, 2]))
# }
# plot(prob[,1], prob[,3])
return(list(np1 = prob[prob[, 3] == max(prob[, 3], na.rm=TRUE), 1],
np2 = prob[1,2],
maxProb = max(prob[, 3], na.rm=TRUE)))
}
|
14bf45f18c8b3438fc2f2d893d6b2148d0d98b0e | 34fd7065357a619bf994bc07fa7f5110e15c8278 | /scripts/study_area/fCreateSinglePolygon.R | 179ac6d75aea4fd4f4b1f39a180836311694a4b8 | [] | no_license | tropileaf/PacificProj | 9a1bc6cc641fc5ae67086ab38d84c2704d4f4d51 | da5fb2ab3a5dfcb11297ff645fc05f21f7f1a955 | refs/heads/main | 2023-07-04T05:44:11.782728 | 2021-08-13T08:29:08 | 2021-08-13T08:29:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | fCreateSinglePolygon.R | fCreateSinglePolygon <- function (df, res){
library(raster)
library(fasterize)
# Creating a empty raster
rs <- raster(ncol = 360*(1/res), nrow = 180*(1/res))
rs[] <- 1:ncell(rs)
crs(rs) <- CRS(longlat)
# Fasterize the land object
df_rs <- fasterize(df, rs)
pol <- as(df_rs, "SpatialPolygonsDataFrame")
pol$layer <- seq(1, length(pol))
# Now to a sf object and create ONE BIG polygon that we can use to populate with PUs
pol_sf <- st_as_sf(pol) %>%
dplyr::select(layer) %>%
summarise(total_layer = sum(layer, do_union = TRUE))
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.