blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71a430fb09dfdf7dda00dc808bfa06a336b9f884 | 3425c9e6a3a1c526a27dcd9c9b0a2af7aeed2c56 | /handover/data/COMTRADE_API.R | 9759d617ed37a1d50467479e1a621113d06264ac | [] | no_license | Waztom/FSA | 4632e521c5066df5503f74402be3b5f4a2ab7a0d | dd21e611b82bbbcfb6e5958661fb336e95b7b738 | refs/heads/master | 2021-10-10T07:03:22.445821 | 2018-10-09T07:09:47 | 2018-10-09T07:09:47 | 164,501,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,396 | r | COMTRADE_API.R | #Use example: Rscript COMTRADE_API.R --year0 2008 --yeare 2017 --month0 1 --monthe 12 --com1 2206
library(optparse)
library(rjson)
library(tidyverse)
library(comtradr)
library(ggplot2)
option_list = list(
make_option(c("-a", "--year0"), default=2010, type='integer', help="Start year"),
make_option(c("-b", "--yeare"), default=2010, type='integer', help="End year"),
make_option(c("-c", "--month0"), default=1, type='integer', help="Start month"),
make_option(c("-d", "--monthe"), default=2, type='integer', help="End month"),
make_option(c("-e", "--com1"), default="1006", type='character', help="Commodity 1"),
make_option(c("-f", "--com2"), default=NA, type='character', help="Commodity 2"),
make_option(c("-g", "--com3"), default=NA, type='character', help="Commodity 3"),
make_option(c("-i", "--com4"), default=NA, type='character', help="Commodity 4"),
make_option(c("-j", "--com5"), default=NA, type='character', help="Commodity 5"),
make_option(c("-k", "--com6"), default=NA, type='character', help="Commodity 6"),
make_option(c("-m", "--com7"), default=NA, type='character', help="Commodity 7")
)
opt = parse_args(OptionParser(option_list=option_list))
uu <- c(opt$com1,opt$com2,opt$com3,opt$com4,opt$com5,opt$com6,opt$com7)
commodities <- uu[!is.na(uu)]
year0 <- opt$year0
mont0 <- opt$month0
yeare <- opt$yeare
monte <- opt$monthe
comco <- commodities
outputfile <- paste(sprintf("%02d", mont0),year0,'-',sprintf("%02d", monte),yeare,'_',paste(comco, collapse = '_'),'.RData',sep="")
print(outputfile)
FSA_token <- "yGa9ysvivTWUUteZVeQUY4rMsCRBcxGTkDbcFbL773EMywrn6cLEDgIq7Wg3vfwZbYkXyhGsblu0wjZjbiwc2EZC0kh/Zp8SmWsXansq3zNEG17gryZAZaRphkp1Mf95Zkjb3aMX/Rr/uAaiKLJbOOwkmv9X3NoA7TCDAA7Go8Y="
ct_register_token(FSA_token)
maxm <- 5
nmon <- (yeare - year0)*12 + monte - mont0 + 1
pass <- as.integer(ceiling(nmon / maxm))
lagm <- (pass * maxm)-nmon
monte <- monte + lagm
nmon <- (yeare - year0)*12 + monte - mont0 + 1
mydf = data.frame()
mm <- mont0
yy <- year0
for(i in seq(from=1, to=nmon, by=maxm)){
mmf <- sprintf("%02d", mm)
d0 <- paste(yy,'-',mmf,sep="")
mm <- mm + maxm
if(mm>12){
yy <- yy + 1
mm <- mm-12
}
yx <- yy
mx <- mm-1
if(mx<1){
mx <- mx + 12
yx<-yx-1
}
mxf <- sprintf("%02d", mx)
de <- paste(yx,'-',mxf,sep="")
print(paste('Querying period: ',d0,de))
tmp <- ct_search("All", "All", trade_direction = c("imports","exports"), freq = c("monthly"),
start_date = d0, end_date = de, commod_codes = comco,
max_rec = NULL, type = c("goods"),
url = "https://comtrade.un.org/api/get?")
mydf <- rbind(mydf,tmp)
}
#ignore <- list('World', 'EU-27', 'Other Asia, nes', 'Other Europe, nes', 'Areas, nes')
ignore <- c('World', 'EU-27', 'Other Asia, nes', 'Other Europe, nes', 'Areas, nes')
dff <- mydf %>%
select(period,trade_flow_code,trade_flow,reporter,partner,netweight_kg,trade_value_usd,year,commodity,commodity_code)
df <- rename(dff, net_weight_kg = netweight_kg) %>%
mutate(period_date = as.Date(paste0(as.character(period), '01'), format='%Y%m%d')) %>%
mutate(year_date = as.Date(paste0(as.character(year), '0101'), format='%Y%m%d'))
cc <- df[!(df$partner %in% ignore) ,]
cc <- df[!(df$reporter %in% ignore) ,]
# Saving on object in RData format
save(cc, file = outputfile)
|
dbf6900c6dd234493f82777ee37636982b28d6c8 | 1f5590d3276d541e8a916bc6d589d6dd98562854 | /man/interval.dist.Rd | c9affdcc6977d8a746fc68ea03e6778e94e91749 | [] | no_license | Frenchyy1/RSDA | 0e8c683e79c73c214b3487991ea3d6e2b8f8b621 | 60445a2749d8f009a748a158f89f53d022edb6f0 | refs/heads/master | 2020-04-14T14:27:57.888698 | 2018-10-10T19:36:39 | 2018-10-10T19:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,166 | rd | interval.dist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interval.dist.R
\name{interval.dist}
\alias{interval.dist}
\title{Interval Distance Matrix}
\usage{
interval.dist(sym.data, distance = c('hausdorff', 'centers', 'interscal'), p = 2)
}
\arguments{
\item{sym.data}{Symbolic data matrix with the variables of interval type.}
\item{distance}{The distance to be use.}
\item{p}{The p in the Hausdorff distance :
\deqn{d(w_{u_1},w_{u_2}) = \left( \sum_{j=1}^m \Phi_j(w_{u_1},w_{u_2})^p \right)^{1/p}}}
}
\value{
Return a R distance triangular matrix.
}
\description{
Compute a distance matrix from a symbolic interval data matrix.
}
\examples{
\dontrun{
data(VeterinaryData)
VD <- VeterinaryData
interval.dist(VD)
interval.dist(VD,distance='centers')
}
}
\references{
Groenen, P.J.F., Winsberg, S., Rodriguez, O., Diday, E. (2006). I-Scal: Multidimensional
scaling of interval dissimilarities. Computational Statistics and Data Analysis, 51,
360-378.
Billard L. and Diday E. (2006).
Symbolic data analysis: Conceptual statistics and data mining. Wiley, Chichester.
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Distance}
\keyword{Symbolic}
|
d423aac46b107b9a49b079e59f860abbf8351d77 | 6ac5e3e1930737182af9f110c688143959d8b412 | /venndiagram.R | 1751beec786f8d3a5dffd3f3d741ffcf58dbc1c6 | [] | no_license | AHdeRojas/Experimental-Design-and-Biometry-R- | faaa3642fc2182008e6df28c84cd2f5cf7f587ce | 5f11e9a6fbf54e2c04e67025b15c4c1ed917ae64 | refs/heads/main | 2023-01-03T06:51:06.534760 | 2020-11-01T20:53:17 | 2020-11-01T20:53:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | venndiagram.R | #Independent
peas=matrix(c(900,300,300,100),nrow=2,ncol=2)
dimnames(peas)=list(c("Tall","Short"),c("Green","Yellow"))
mosaicplot(peas)
#Not independent
peas=matrix(c(900,100,300,300),nrow=2,ncol=2)
dimnames(peas)=list(c("Tall","Short"),c("Green","Yellow"))
mosaicplot(peas)
|
f04283b6a8b2d27211fa1f8f091e13df6b267815 | fc01fc89503fb6f4f87fb5f8d3315127ccf3a178 | /appres/R/script_boxsel_costas.r | b6b5ba02668c5f3fed46b0c4792a7141f2a0459c | [] | no_license | alejandro-reyesamaro/Memoire | 63d5a486242192d3d5cb15dc499485850206527a | 9a11af8c574612e9b6422bb1ef6ae2c517907e6f | refs/heads/master | 2021-01-17T02:50:37.928471 | 2017-02-13T11:28:32 | 2017-02-13T11:28:32 | 55,986,070 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,358 | r | script_boxsel_costas.r | script_boxsel_costas <- function(col_par_box, col_comm_box){
sec=c(55.500,177.520,122.810,191.430,87.350,51.950,42.590,243.340,127.080,121.750,296.950,74.560, rep.int(NA, 18))
par=c(49.190,27.590,22.370,27.660,16.220,22.550,2.730,21.000,15.310,31.520,16.940,27.770,20.900,14.310,47.250,34.830,26.210,28.700,59.630,11.790,16.350,15.610,12.200,5.600,19.710,75.710,31.740,25.880,30.440,7.610)
data<-data.frame(sec,par)
dev=pdf("c19_select_BP.pdf", height=14, width=7)
par(cex.axis=2)
par(mar=c(10,6,1,1)+.1)
p_ylim=c(0,300);
col_boxes = c(col_par_box, col_comm_box)
boxplot(data, las = 2, names = c("", ""), cex = 2, ylim=p_ylim, col=col_boxes)
labels1<-as.vector(c("Sequential", "Parallel"))
labels2<-as.vector(c("", "without"))
labels3<-as.vector(c("", "communciation"))
mtext(at = c(1:3), labels1, side=1, line=2, cex=2)
mtext(at = c(1:3), labels2, side=1, line=3.5, cex=2)
mtext(at = c(1:3), labels3, side=1, line=5, cex=2)
x <- c(0:3)
lines(x, rep.int(mean(sec[1:12]), 4), col="red", lwd=3, lty=2)
lines(x, rep.int(mean(par), 4), col="blue", lwd=3, , lty=2)
legend( 'topright',
c("Sequential mean", "Parallel mean"),
lty=2, lwd=4,
col=c("red", "blue"),
bty='n', # box type around legend
pch = c(NA, NA),
cex=1.8)
mtext("Solver strategy", side=1, line=7.5, cex=2.5)
mtext("Time (seconds)", side=2, line=4, cex=2.5)
dev.off()
}
|
e2b85f487ca3706d5cb07a1f00cbb1a2ec2b4894 | 0a36d7506471b1fb339eab56498ea9a4b50fa3fd | /K5 Suspensions.R | 7cb2daaa206557810ac0d9e21d6a07620e31fcad | [] | no_license | djrobillard/SCSD | 89db29783cf3c9bc49ce76a90d347ffea5e74a93 | d5b9d403bacfb6997eed2d403dc4eb04b0d97752 | refs/heads/master | 2021-08-20T09:36:59.867736 | 2017-11-28T20:53:28 | 2017-11-28T20:53:28 | 112,387,167 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,297 | r | K5 Suspensions.R | ehout<-ehout_all_years
library(lubridate)
###parse dates and create time intervals
ehout$date <- mdy(ehout$incidentdate) # puts into date format
ehout$year<-year(ehout$date)
ehout$month<-month(ehout$date)
ehout$month<-sprintf("%02d",ehout$month)
ehout$day<-day(ehout$date)
ehout$day<-sprintf("%02d",ehout$day)
ehout$date.num <- as.numeric(paste0(ehout$year, ehout$month, ehout$day))
ehout$ytd<-ifelse(ehout$date.num>20130901 & ehout$date.num<20140201, 1,
ifelse(ehout$date.num>20140901 & ehout$date.num<20150201, 1,
ifelse(ehout$date.num>20150901 & ehout$date.num<20160201, 1,
ifelse(ehout$date.num>20160821 & ehout$date.num<20170201, 1, 0)
)
)
)
#####filter to proper data set
library(plyr)
library(dplyr)
ehout2<-filter(ehout, incidenttype=="referral")
ehout3<-filter(ehout2, ytd==1)
ehout3<-filter(ehout3, grade!=15)
####data analysis for all incidents
ehout3$raceethnicity<-revalue(ehout3$raceethnicity, c("A" = "O", "I" = "O",
"M" = "O", "P" = "O"))
total.oss<-as.data.frame(aggregate(oss ~ schoolyear + schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
total.ossdays<-as.data.frame(aggregate(ossdays ~ schoolyear+ schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
total.iss<-as.data.frame(aggregate(iss ~ schoolyear+ schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
total.issdays<-as.data.frame(aggregate(issdays ~ schoolyear+ schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
total.susp<-as.data.frame(aggregate(totalsuspension ~ schoolyear+ schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
total.suspdays<-as.data.frame(aggregate(totalsuspduration ~ schoolyear+ schoolname + gender + raceethnicity + iep + ell + frpl + grade, data=ehout3, FUN=sum))
all.susp.categories<-merge(total.oss, total.ossdays)
all.susp.categories<-merge(all.susp.categories, total.iss)
all.susp.categories<-merge(all.susp.categories, total.issdays)
all.susp.categories<-merge(all.susp.categories, total.susp)
all.susp.categories<-merge(all.susp.categories, total.suspdays)
#subset for grades
all.susp.categories1<-subset(all.susp.categories,grade=="KF")
all.susp.categories2<-subset(all.susp.categories,grade=="1")
all.susp.categories3<-subset(all.susp.categories,grade=="2")
all.susp.categories4<-subset(all.susp.categories,grade=="3")
all.susp.categories5<-subset(all.susp.categories,grade=="4")
all.susp.categories6<-subset(all.susp.categories,grade=="5")
all.susp.categories7<-subset(all.susp.categories,grade=="13")
# merge for grades
k5suspytd<-rbind(all.susp.categories1,all.susp.categories2)
k5suspytd<-rbind(k5suspytd,all.susp.categories3)
k5suspytd<-rbind(k5suspytd,all.susp.categories4)
k5suspytd<-rbind(k5suspytd,all.susp.categories5)
k5suspytd<-rbind(k5suspytd,all.susp.categories6)
k5suspytd<-rbind(k5suspytd,all.susp.categories7)
k5suspytd$K2<-ifelse(k5suspytd$grade<3,1,0)
#write CSV
write.csv(k5suspytd, "C:/Users/drobil66/Desktop/RFiles/k5suspytd.csv")
|
f01afc179f4ee52fa4361a245e9a8a82439511c3 | 8d9929e0393339e94042892799aeea6bc2596e7c | /HCB_obitos/obitos_02_frequencies.R | 4ff134c84544d9864853031adede6277937cf509 | [] | no_license | rsoren/201706_moz_research | 340b732e7e412bf66db367d98a3e7bd972d62046 | b43f8205a0880f97f503061bc1fc5a1a179fb19c | refs/heads/master | 2021-01-01T18:39:14.852006 | 2017-10-29T04:43:57 | 2017-10-29T04:43:57 | 98,392,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,383 | r | obitos_02_frequencies.R | #
# obitos_02_frequencies.R
#
# Reed Sorensen
# July 2017
#
library(dplyr)
rm(list = ls())
#####
# SYMPTOM FREQUENCIES
standard_symptom_list <- readRDS("_intermediate_files/standard_symptom_list.RDS")
df3 <- read.csv("_data/obitos_data_combined_symptoms.csv", as.is = TRUE)
# function for getting frequency information from a dichotomous variable
# -- most applicable to the standard symptom list
get_freq <- function(var, data, multiple_levels = FALSE) {
# var <- "N1Tosse"; data <- df3
# var <- "Hb_categories"; data <- df4
n_total <- nrow(data)
n_nonmissing <- sum(!is.na(data[, var]))
tmp_levels <- names(table(data[, var]))
result <- do.call("rbind", lapply(tmp_levels, function(x) {
# x <- tmp_levels[1] # dev variable
tmp_level <- x
tmp <- data.frame(
variable = var,
level = tmp_level,
count = table(data[, var])[[tmp_level]],
n_nonmissing = n_nonmissing,
n_total = n_total )
tmp2 <- tmp %>%
mutate(
pct_among_nonmissing = round(count / n_nonmissing, digits = 4) * 100,
pct_among_total = round(count / n_total, digits = 4) * 100 )
return(tmp2)
}))
return(result)
}
symptom_results <- lapply(standard_symptom_list, get_freq, data = df3)
symptom_results2 <- do.call("rbind", symptom_results) %>%
filter(level != "FALSE") %>%
arrange(desc(count))
write.csv(
x = symptom_results2,
file = "HCB_obitos/obitos_frequencies_symptoms.csv",
row.names = FALSE
)
#####
# FREQUENCIES OF OTHER VARIABLES
df4 <- df3 %>%
mutate(
# principal complaint
on_TARV = as.logical(NA),
on_TARV = ifelse(EstadoTARV == 0, TRUE, on_TARV),
on_TARV = ifelse(EstadoTARV %in% c(1,2), FALSE, on_TARV),
had_TB = as.logical(NA),
had_TB = ifelse(AntecedentedeTB == 1, TRUE, had_TB),
Hb = abs(Hb),
Hb_categories = cut(Hb, breaks = c(0, 6, 9, 11, 999), right = FALSE),
LocaldetestedeHIV = ifelse(LocaldetestedeHIV == "", NA, LocaldetestedeHIV),
freq_card_cat = cut(FrequenciaCardiaca, breaks = c(0, 60, 100, 999), right = FALSE),
freq_resp_cat = cut(FrequenciaRespiratoria, breaks = c(0, 12, 20, 999), right = FALSE),
age_cat = cut(Idadeanos, breaks = c(0, 25, 35, 45, 999), right = FALSE)
)
# process blood pressure variables (super weird format)
#-- systolic
table(df4$TensaoArterial) # start again
df4$TensaoArterial <- gsub("\\\\", "/", df4$TensaoArterial)
df4 <- df4 %>%
mutate(
TensaoArterial = ifelse(TensaoArterial == "12\0370-80", "", TensaoArterial),
TensaoArterial = ifelse(TensaoArterial == "", NA, TensaoArterial) )
df4$TensaoArterial_systolic <- sapply(df4$TensaoArterial, function(x) {
# x <- "100/50" # dev variable
as.numeric(strsplit(x, split = "/")[[1]][1])
})
df4$tensao_systolic_cat <- cut(
df4$TensaoArterial_systolic, breaks = c(0, 60, 139, 999), right = FALSE )
#-- diastolic
df4$TensaoArterial_diastolic <- sapply(df4$TensaoArterial, function(x) {
# x <- "100/50" # dev variable
as.numeric(strsplit(x, split = "/")[[1]][2])
})
df4$tensao_diastolic_cat <- cut(
df4$TensaoArterial_diastolic, breaks = c(0, 60, 90, 999), right = FALSE )
other_var_list <- c(
"on_TARV",
"Hb_categories",
"oms_score",
"EstadoTARV",
"AntecedentedeTB",
"LocaldetestedeHIV",
"LinhadeTARVusada",
"freq_card_cat",
"freq_resp_cat",
"tensao_systolic_cat",
"tensao_diastolic_cat",
"age_cat",
"Sexo"
)
other_results <- lapply(other_var_list, get_freq, data = df4)
other_results2 <- do.call("rbind", other_results)
write.csv(
x = other_results2,
file = "HCB_obitos/obitos_frequencies_other.csv",
row.names = FALSE
)
"C:/Users/path/Analise R/graf_2cpp.jpg"
dev.off()
# percent with RxTorax among those with at least 1 sinais_pulmonar
df5 <- df4 %>%
dplyr::select(sinais_pulmonar, RxTorax) %>%
filter(sinais_pulmonar > 0)
get_freq("RxTorax", data = df5)
#####
# DISEASES
# get frequencies of disease variables
df6 <- read.csv("_intermediate_files/obitos_data_symptoms_and_diseases.csv",
as.is = TRUE)
disease_vars <- names(df6)[substr(names(df6),1,2) == "y_"]
disease_results <- lapply(disease_vars, get_freq, data = df6)
disease_results2 <- do.call("rbind", disease_results) %>%
filter(level != FALSE) %>%
arrange(desc(count))
write.csv(disease_results2, "HCB_obitos/obitos_frequencies_diseases.csv", row.names = FALSE)
|
deb702edacb52a78726637988676951d2543a6a3 | bc722f06af27474e2df4df312cd554460b280e58 | /Plot_Images_Scripts/Plot3.R | ae78ab192906106801ada9655b42673f67a0a286 | [] | no_license | Justin92/ExData_Plotting1 | 95fe4807d58dea7d8ce5e2d14a002cdf201d4383 | cabb66b2cbbc9b2db108d01f31e5bccf1f65a6ab | refs/heads/master | 2020-04-13T06:43:00.185836 | 2018-12-25T01:40:03 | 2018-12-25T01:40:03 | 163,029,051 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,282 | r | Plot3.R | ##Plot3
#Assuming data in "Downloads" file
PowerUsage <- read.table("Downloads/household_power_consumption.txt", sep = ";", header = T)
PowerUsage <- filter(PowerUsage, Date == "2/2/2007" | Date == "1/2/2007")
PowerUsage$Global_active_power <- as.numeric(as.character(PowerUsage$Global_active_power))
PowerUsage <- mutate(PowerUsage, Date_Time =
as.POSIXct(paste(as.character(PowerUsage2$Date),
as.character(PowerUsage2$Time), sep = " "),
format = "%d/%m/%Y %H:%M:%S"))
PowerUsage$Sub_metering_1 <- as.numeric(as.character(PowerUsage$Sub_metering_1))
PowerUsage$Sub_metering_2 <- as.numeric(as.character(PowerUsage$Sub_metering_2))
PowerUsage$Sub_metering_3 <- as.numeric(as.character(PowerUsage$Sub_metering_3))
png(file = "plot3.png")
plot(PowerUsage$Date_Time, PowerUsage$Sub_metering_1, type = "l", col = "black", xlab = "",
ylab = "Energy sub metering")
lines(PowerUsage$Date_Time, PowerUsage$Sub_metering_2, type = "l", col = "red")
lines(PowerUsage$Date_Time, PowerUsage$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub metering 1", "Sub metering 2", "Sub metering 3"), cex = 0.8)
dev.off()
|
403d85bfb88761099140c3cd448a26de7bc46afe | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/qgtools/examples/ad.reml.jack.Rd.R | a9e0612f3d01bb8046e9047edf92a8d795006505 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | ad.reml.jack.Rd.R | library(qgtools)
### Name: ad.reml.jack
### Title: AD model with REML analysis and jackknife resampling test
### Aliases: ad.reml.jack
### Keywords: AD model cotton REML cotf2 jackknife
### ** Examples
library(qgtools)
data(cotf2)
dat=cotf2[which(cotf2$Env==1),]
Ped=dat[,c(1:5)]
Y=dat[,-c(1:5)]
res=ad.reml.jack(Y,Ped)
res$Var
res$FixedEffect
res$RandomEffect
##End
|
1f5f4f1562babb109e51d711fcbb7be4c79fa9ef | 437b6455b3af3d258ab7df1eaaece82e4015cfaa | /R/int-impute_fn_knn.R | 9986c4a251cc6c09776f1e716ff9c9fb0b378015 | [] | no_license | alexWhitworth/imputation | 8e57825ffab037d450f2bfad477aeec74fb935bb | 7e939fcde999dcf60cbf6d28d9aa3f9a848fc82e | refs/heads/master | 2020-04-04T18:37:37.505515 | 2017-04-29T00:19:05 | 2017-04-29T00:19:05 | 42,534,932 | 3 | 4 | null | 2015-09-15T17:26:03 | 2015-09-15T17:26:02 | null | UTF-8 | R | false | false | 5,138 | r | int-impute_fn_knn.R |
# @description wrapper to impute_fn_knn for all observations and all columns.
# This function is not parallelized
# @param x_missing From impute_prelim(...)$x_missing
# @param x_complete The complete data matrix x
# @param k ... input to kNN_impute
# @param q ... input to kNN_impute
# @param sigma ... calculated inside kNN_impute
# @param verbose if \code{TRUE} print status updates
impute_fn_knn_all.nonPar <- function(x_missing, x_complete, k, q, sigma,
verbose) {
if (!is.matrix(x_missing)) {x_missing <- matrix(x_missing, nrow=1)}
# impute row-by-row -- non parallel
x_missing_imputed <- t(apply(x_missing, 1, function(i) {
rowID = as.numeric(i[1])
i_original = unlist(i[-1])
x_comp_rowID <- which(as.integer(rownames(x_complete)) == rowID)
# verbose option
if(verbose) {print(paste("Imputing row", rowID, sep=" "))}
missing_cols <- which(is.na(x_complete[x_comp_rowID,]))
# calculate distances
distances <- dist_q.matrix(rbind(x_complete[x_comp_rowID, ], x_complete[-x_comp_rowID,]),
ref= 1L, q= q)
# within the given row, impute by column
imputed_values <- unlist(lapply(missing_cols, function(j, distances) {
# which neighbors have data on column j?
neighbor_indices = which(!is.na(x_complete[,j]))
# impute
return(impute_fn_knn(x_complete[neighbor_indices, j],
distances[neighbor_indices + ifelse(x_comp_rowID < neighbor_indices, 0, -1)],
k=k, sigma= sigma))
}, distances= distances))
i_original[missing_cols] <- imputed_values
return(i_original)
}))
return(x_missing_imputed)
}
# @description wrapper to impute_fn_knn for all observations and all columns.
# This function is parallelized
# @param x_missing From impute_prelim(...)$x_missing
# @param x_complete The complete data matrix x
# @param k ... input to kNN_impute
# @param q ... input to kNN_impute
# @param sigma ... calculated inside kNN_impute
# @param leave_cores How many cores do you wish to leave open to other processing?
impute_fn_knn_all.Par <- function(x_missing, x_complete, k, q, sigma,
leave_cores) {
if (!is.matrix(x_missing)) {x_missing <- matrix(x_missing, nrow=1)}
### [AW 10/20] resolve edge case when nnodes > nrow(x_missing)
nnodes <- min(nrow(x_missing), parallel::detectCores() - leave_cores)
if (grepl("Windows", utils::sessionInfo()$running)) {cl <- parallel::makeCluster(nnodes, type= "PSOCK")}
else {cl <- parallel::makeCluster(nnodes, type= "FORK")}
# impute row-by-row -- parallel
x_missing_imputed <- parallel::parRapply(cl= cl, x_missing, function(i, x_complete, sigma) {
rowID = as.numeric(i[1])
i_original = unlist(i[-1])
x_comp_rowID <- which(as.integer(rownames(x_complete)) == rowID)
missing_cols <- which(is.na(x_complete[x_comp_rowID,]))
# calculate distances
distances <- dist_q.matrix(x=rbind(x_complete[x_comp_rowID, ], x_complete[-x_comp_rowID,]),
ref= 1L, q= q)
# within the given row, impute by column
imputed_values <- unlist(lapply(missing_cols, function(j, distances) {
# which neighbors have data on column j?
neighbor_indices = which(!is.na(x_complete[,j]))
# impute
return(impute_fn_knn(x_complete[neighbor_indices, j],
distances[neighbor_indices + ifelse(x_comp_rowID < neighbor_indices, 0, -1)],
k=k, sigma= sigma))
}, distances= distances))
i_original[missing_cols] <- imputed_values
return(i_original)
}, x_complete= x_complete, sigma= sigma)
stopCluster(cl)
x_missing_imputed <- matrix(x_missing_imputed, nrow= dim(x_missing)[1],
ncol= dim(x_missing)[2] - 1, byrow= TRUE)
return(x_missing_imputed)
}
# @description wrapper to impute_all_knn (C++) for all observations and all columns.
# @param x_missing From impute_prelim(...)$x_missing
# @param x_complete The complete data matrix x
# @param k ... input to kNN_impute
# @param q ... input to kNN_impute
# @param sigma ... calculated inside kNN_impute
# @param leave_cores How many cores do you wish to leave open to other processing?
# [AW 11/11/2015] -- not needed. The c++ version of impute_all_knn is NOT Faster than the R version
# impute_fn_knn_all <- function(x_missing, x_complete, k, q, sigma,
# verbose, leave_cores= NULL) {
#
# if (is.null(leave_cores)) {
# x_missing_imputed <- .Call('imputation_impute_all_knn', PACKAGE = 'imputation',
# x_missing, x_complete, k, q, sigma, verbose)
# } else {
# nnodes <- min(nrow(x_missing), detectCores() - leave_cores)
# cl <- makeCluster(nnodes)
# x_missing_imputed <- do.call("rbind", clusterApply(cl,
# x= parallel:::splitRows(x_missing, nnodes),
# fun= impute_all_knn,
# x_complete = x_complete, k= k, q= q, sigma= sigma, verbose= verbose))
# stopCluster(cl)
# }
# return(x_missing_imputed)
# } |
8d50ecd0d8be9d3339f1a9a7739f6907eb5e4117 | f7f63838b028cc57a9824e575124b6f4ac0241a6 | /app/settings.R | 8c386817be9377cf92a32934110af83e0443b192 | [
"MIT"
] | permissive | RobinEchoAlex/flexTeaching | 3cfb01fb88b2c9dfdabdcfba4fd27e3f028ff542 | 033b8fb9959d05014d963429f1607a1147e810ca | refs/heads/master | 2023-08-16T13:27:17.208387 | 2021-07-19T13:18:58 | 2021-07-19T13:18:58 | 378,942,363 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,895 | r | settings.R | require(digest)
require(base64enc)
require(haven)
require(xlsx)
require(lubridate)
cols1 = c("#edd9c0",
"#c9d8c5",
"#a8b6bf",
"#7d4627",
"#506C61",
"#E06C50",
"#004073")
par.list = list(bg = "white", #col = cols1[7], col.axis = cols1[7],
#col.lab = cols1[7], col.main = cols1[7], col.sub = cols1[7],
las = 1,
lwd = 2,
cex = 1.1,
cex.axis = 1.1,
cex.lab = 1.1,
yaxs="i",mgp = c(2.5,.5,0), tcl = -0.25,
mar=c(4.5,4.5,1,1))
par.list2 = par.list
par.list2[['mar']] = c(4.5,1,1,1)
getAssignments <- function(sort_locale = "en_US"){
a = dir("assignments/", no..=TRUE)
meta_info = sapply(a, function(d){
mpath = file.path("assignments",d,"meta_info.csv")
if(file.exists(mpath)){
meta_info0 = read.csv(mpath, header = TRUE, stringsAsFactors = FALSE)[1,]
meta_info = unlist(meta_info0)
names(meta_info) = names(meta_info0)
}else{
meta_info = c()
}
meta_info["title"] = ifelse(is.null(meta_info["title"]) || is.null(meta_info["title"]),
d,
meta_info["title"])
meta_info["sortkey"] = ifelse(is.null(meta_info["sortkey"]) || is.na(meta_info["sortkey"]),
d,
meta_info["sortkey"])
meta_info["group"] = ifelse(is.null(meta_info["group"]) || is.na(meta_info["group"]),
"Other",
meta_info["group"])
meta_info["dir"] = d
return(meta_info)
})
# If there's only one assignment, return that
if(length(a) == 1){
names(a) = meta_info["title",]
return(a)
}
# If there's more than one assignment,
# figure out all the groups, etc
groups = unique(meta_info["group",])
meta_list = sapply(groups, function(el){
a = meta_info["dir", meta_info["group",] == el]
names(a) = meta_info["title", meta_info["group",] == el]
sk = meta_info["sortkey", meta_info["group",] == el]
ord = stringi::stri_order(sk, locale=sort_locale)
a[ord]
}, simplify = FALSE)
# Only one group
if(length(meta_list) == 1){
meta_list = meta_list[[1]]
}else{
# Sort groups by average sort rank
# ignore the tie problem for now
ord0 = stringi::stri_order(meta_info["sortkey",], locale=sort_locale)
avg_ranks = sort(tapply(ord0[ord0], meta_info["group",], mean))
meta_list = meta_list[names(avg_ranks)]
}
return(meta_list)
}
### http://stackoverflow.com/questions/10910698/questions-about-set-seed-in-r
set.seed.alpha <- function(x) {
hexval <- paste0("0x",digest(x,"crc32"))
intval <- type.convert(hexval) %% .Machine$integer.max
set.seed(intval)
}
getURIdata<-function(seed, secret, format, assignment_list, session = NULL){
require(digest)
if(!is.null(session)){
url_pieces = strsplit(session$clientData[['url_pathname']],"/")[[1]]
page_name = strsplit(url_pieces[length(url_pieces)], ".", fixed=TRUE)[[1]][1]
}else{
page_name = "NULL"
}
myData = assignment_list$getData(seed, secret, assignment_list$assignment)
if(is.null(myData)){
return("")
}
if(!is.data.frame(myData)){
if(is.list(myData) & is.data.frame(myData[['data']])){
myData = myData[['data']]
}else{
stop("Assignment configuration error. Data is not in correct format.")
}
}
obj_hash = digest::digest(myData)
name_prefix = paste0("data_", page_name ,"_", seed , "_", assignment_list$assignment,"_", obj_hash)
if(format=="SPSS"){
ext="sav"
filenameWithExt = tempfile(name_prefix,fileext=paste0(".",ext))
write_sav(myData, path=filenameWithExt)
}else if(format=="Excel"){
ext="xlsx"
filenameWithExt = tempfile(name_prefix,fileext=paste0(".",ext))
write.xlsx(myData, file=filenameWithExt)
}else if(format=="R data"){
ext="Rda"
filenameWithExt = tempfile(name_prefix,fileext=paste0(".",ext))
save(myData,file = filenameWithExt)
}else{
ext="csv"
filenameWithExt = tempfile(name_prefix,fileext=paste0(".",ext))
write.csv(myData, file=filenameWithExt)
}
divname = "dl.data.file"
textHTML = "Click here to download the data."
download_filename = paste0(name_prefix, ".", ext)
uri = dataURI(file = filenameWithExt, mime = "application/octet-stream", encoding = "base64")
paste0("<a style='text-decoration: none; cursor: pointer;' id='",divname,"' title='",download_filename,"'></a>
<script>
var my_uri = '",uri,"';
var my_blob = dataURItoBlob(my_uri);
var div = document.getElementById('",divname,"');
div.innerHTML = '",textHTML,"';
div.setAttribute('onclick', 'saveAs(my_blob, \"",download_filename ,"\");');
</script>")
}
assignment_time = function(assignmentDir, secret, tz = "Europe/London"){
dates_fn = paste0(assignmentDir, "/times.csv")
if(file.access(dates_fn, mode = 4) == -1){
return(TRUE)
}
date.constraints = read.csv(dates_fn, header=TRUE)
check.dates = apply(date.constraints, 1, function(row, secret, tz){
cur = now(tz)
dl = ymd_hms(row['date'], tz = tz)
regex = row['secret']
if(grepl(regex, secret)){
res = cur>dl
}else{
res = TRUE
}
}, secret = secret, tz = tz)
can_do = all(check.dates)
if(!can_do){
stop(safeError("You cannot access this resource at this time."))
}
return(can_do)
}
writeHeaders = function( file ){
require(htmltools)
assignments = getAssignments()
html.content = NULL
allTags = tagList()
# top js directory
fs = dirFilesOnly(paste0("js/"), full.names = TRUE)
for(f in fs){
lns = paste(readLines(f),collapse="\n")
allTags = tagAppendChild( allTags, tags$script(HTML(lns), type="text/javascript") )
}
for(a in assignments){
# CSS
fs = dirFilesOnly(paste0("assignments/",a,"/include/css/"), full.names = TRUE)
for(f in fs){
lns = paste(readLines(f),collapse="\n")
allTags = tagAppendChild( allTags, tags$style(HTML(lns),type="text/css") )
}
# JS
fs = dirFilesOnly(paste0("assignments/",a,"/include/js/"), full.names = TRUE)
for(f in fs){
lns = paste(readLines(f),collapse="\n")
allTags = tagAppendChild( allTags, tags$script(HTML(lns), type="text/javascript") )
}
# HTML
fs = dirFilesOnly(paste0("assignments/",a,"/include/html/"), full.names = TRUE)
for(f in fs){
lns = pastereadLines(f)
html.content = paste(html.content, lns,
sep = "\n", collapse="\n")
}
}
all.content = paste(html.content,as.character(allTags),sep="\n")
writeLines( all.content, con = file )
invisible(NULL)
}
#https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r
numbers2words <- function(x){
## Function by John Fox found here:
## http://tolstoy.newcastle.edu.au/R/help/05/04/2715.html
## Tweaks by AJH to add commas and "and"
helper <- function(x){
digits <- rev(strsplit(as.character(x), "")[[1]])
nDigits <- length(digits)
if (nDigits == 1) as.vector(ones[digits])
else if (nDigits == 2)
if (x <= 19) as.vector(teens[digits[1]])
else trim(paste(tens[digits[2]],
Recall(as.numeric(digits[1]))))
else if (nDigits == 3) trim(paste(ones[digits[3]], "hundred and",
Recall(makeNumber(digits[2:1]))))
else {
nSuffix <- ((nDigits + 2) %/% 3) - 1
if (nSuffix > length(suffixes)) stop(paste(x, "is too large!"))
trim(paste(Recall(makeNumber(digits[
nDigits:(3*nSuffix + 1)])),
suffixes[nSuffix],"," ,
Recall(makeNumber(digits[(3*nSuffix):1]))))
}
}
trim <- function(text){
#Tidy leading/trailing whitespace, space before comma
text=gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,",",",text)))
#Clear any trailing " and"
text=gsub(" and$","",text)
#Clear any trailing comma
gsub("\ *,$","",text)
}
makeNumber <- function(...) as.numeric(paste(..., collapse=""))
#Disable scientific notation
opts <- options(scipen=100)
on.exit(options(opts))
ones <- c("", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine")
names(ones) <- 0:9
teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", " seventeen", "eighteen", "nineteen")
names(teens) <- 0:9
tens <- c("twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety")
names(tens) <- 2:9
x <- round(x)
suffixes <- c("thousand", "million", "billion", "trillion")
if (length(x) > 1) return(trim(sapply(x, helper)))
helper(x)
}
dirFilesOnly = function(...){
fs = dir(...)
fs[!file.info(fs)$isdir]
} |
926086f57fc5ad22a27ebf3b73aa610b4e05c9f0 | 8a97255cb66455dbef0cf01864a3b334cf20a66b | /data_table_basic/permutation.R | 25e1283f609fe23471a64f58f95d43f4cc93c531 | [] | no_license | AshutoshAgrahari/R_Practice | c56bbb3c0893e101305f150c0b74045f24cf5a44 | 4c31ce94f130b363f894177a1505ccac290547e0 | refs/heads/master | 2020-03-19T17:51:05.826260 | 2020-01-25T10:34:55 | 2020-01-25T10:34:55 | 136,781,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 150 | r | permutation.R |
library(data.table)
library(combinat)
a <- c("Mum","Blr","Hyd","Lko")
write.csv(data.table(t(as.data.frame.list(combinat::permn(a)))),"data.csv") |
35c0598ad80ce63cea24b3270fb8965b45b94349 | 8ce370da5bfb86eaea3cda891e74a01ac9ebdc1a | /examples/speed.r | 56ef72ffb48fa511e1588efc02497a76e70d556a | [] | no_license | damjad/OPI | d75515bc3b037e7c9df97ff7892536671b051a36 | b03d6c1f0fa1aec3c2c36b8b976a29f5014bf411 | refs/heads/master | 2021-01-02T09:00:25.918322 | 2017-08-10T15:46:19 | 2017-08-10T15:46:19 | 99,120,598 | 0 | 0 | null | 2017-08-10T15:46:20 | 2017-08-02T13:40:59 | R | UTF-8 | R | false | false | 3,600 | r | speed.r | #
# Measure reaction times to numStims presenations of
# Size III white-on-white stimuli at random locations
# in the central 30 degrees of the field.
# Save results in a file.
#
# This was used by me as a demo at my university's Open Day
# August 2012.
#
# Andrew Turpin 17/8/2012 (aturpin@unimelb.edu.au)
#
numStims <- 20 # number per go
# chooseOpi("Octopus900")
# opiInitialize(
# eyeSuiteJarLocation="C:/Program Files/EyeSuite/",
# eyeSuiteSettingsLocation="C:/Program Files/EyeSuite/",Mu
# eye="right"
# )
chooseOpi("SimGaussian")
opiInitialize(sd=2)
##########################################################################
# Helper function to create stim objects
##########################################################################
makeStim <- function(x, y, db, rt) {
s <- list(x=x, y=y, level=dbTocd(db, 4000/pi), size=26/60, color="white",
duration=200, responseWindow=rt)
class(s) <- "opiStaticStimulus"
return(s)
}
##########################################################################
# Read high scores and get name of current user
##########################################################################
# This is the initial, empty file
#"name" "V1" "V2" "V3" "V4" "V5" "V6" "V7" "V8" "V9" "V10" "V11" "V12" "V13" "V14" "V15" "V16" "V17" "V18" "V19" "V20"
#"Dr T" "1" "2" "3" "4" "5" "6" "7" "8" "9" "10" "11" "12" "13" "14" "15" "16" "17" "18" "19" "20"
options(stringsAsFactors = FALSE)
globalScoreTable <- read.table("openDayReactionTimes.csv", colClasses=c("character",rep("numeric",numStims)))
globalScoreTable <- rbind(globalScoreTable, NA)
cat("Enter Name (for high score table!): ")
globalScoreTable[nrow(globalScoreTable),1] <- readline()
##########################################################################
# Initialise matrix of stims and then present
##########################################################################
xy <- matrix(runif(2*numStims,-30,30), ncol=2) # random (x,y) coords
rt <- 1000 # speed
res <- NULL
for(i in 1:nrow(xy)) {
s <- makeStim(xy[i,1], xy[i,2], 10, rt)
if (i == nrow(xy))
n <- s
else
n <- makeStim(xy[i+1,1], xy[i+1,2], 10, rt)
r <- opiPresent(s,n)
res <- c(res, list(r))
if (r$seen)
globalScoreTable[nrow(globalScoreTable),i+1] <- 1000
else
globalScoreTable[nrow(globalScoreTable),i+1] <- r$time
}
opiClose()
##########################################################################
# Store the high score
##########################################################################
numberBits <- 2:(numStims+1)
hist(as.numeric(globalScoreTable[nrow(globalScoreTable), numberBits]))
write.table(globalScoreTable, file="openDayReactionTimes.csv", quote=TRUE, row.names=FALSE, col.names=FALSE)
means <- apply(globalScoreTable[,numberBits], 1, mean)
cat("====================================================")
cat("====================================================")
cat("Your time (on average) was ")
cat(means[nrow(globalScoreTable)])
cat(" which is ")
cat(rank(means)[nrow(globalScoreTable)])
cat("/")
cat(nrow(globalScoreTable))
cat(" fastest for the day.\n")
cat("You pressed early ")
cat(sum(unlist(lapply(res, "[", "err")) == "NotValid"))
cat(" times, which incurs max penalty of 1000 ms.\n")
cat("You did not see ")
cat(sum(unlist(lapply(res, "[", "err")) == "NotSeen"))
cat(" targets, which also incurs the max penalty.\n")
cat("\n\n----------------------\n")
cat("Fastest ten mean times\n")
cat("----------------------\n")
o <- head(order(means), 10)
cat(sprintf("%10.3fms %s\n",means[o], cbind(globalScoreTable[o,1]))) |
292656602bab9deb488a6b4d1dd49d04ef85c7ee | 0dca09f2b18e108d930b6ed20041f2a3737bd3cc | /man/sim_data.Rd | a98fd4e4fe197fcce2e3826b55ba44d3ca75e4d4 | [] | no_license | FocusPaka/SCBN | 39dee85e3e525bcd3e7221728dfab9c7f2bd0305 | 8d0142333da5054a21c35a0c151e899189169f64 | refs/heads/master | 2023-04-14T08:27:45.879997 | 2023-04-10T02:26:35 | 2023-04-10T02:26:35 | 145,002,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 583 | rd | sim_data.Rd | \name{sim_data}
\docType{data}
\alias{sim_data}
\title{A simulation dataset of orthologous genes between the different species.}
\description{
This data set gives 4149 orthologous genes which include read counts and
genes length between the two different species.
}
\usage{sim_data}
\format{A data.frame containing 4149 orthologous genes.}
\source{
Zhou Y, Zhu JD, Tong TJ, Wang JH, Lin BQ, Zhang J(2018, pending publication).
A Novel Normalization Method and Differential Expression Analysis of RNA-seq
Data between Different Species.
}
\keyword{datasets}
|
78bf500b1fdcd399236e68de2ff58682353cf7b6 | 80ea90e1bf7a5651fa2153b1d897afc41e9f218a | /R/readpars.R | 7808500cb25e67924c6ee79f93b6053574a6ec86 | [
"MIT"
] | permissive | slphyx/MEEM | 4d8091210c4c3350ae0d1e81205c26bfaf859bfd | d54ec9314d2c9797ee297274b4c40897ed75813c | refs/heads/master | 2021-01-21T10:59:23.926578 | 2017-07-13T04:46:53 | 2017-07-13T04:46:53 | 83,507,141 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,211 | r | readpars.R |
# for removing NA from a data frame/list
removeNA<-function(inputDF){
tmpDF <- inputDF
for(n in names(inputDF)){
txt<-paste0("tmpDF$",n,"<-","tmpDF$",n,"[!is.na(tmpDF$",n,")]")
eval(parse(text = txt))
}
return(tmpDF)
}
MM_readpars <- function(parfile){
if(!file.exists(parfile)){
stop("Your parameter file does not exist!")
}
tmp <- read.csv(parfile)
tmp <- removeNA(as.list(tmp))
return(list(
# Common parameters to both species
Pmaxv=tmp$Pmaxv,
Pmaxf=tmp$Pmaxf,
amp=tmp$amp,
phi=tmp$phi,
indexhet=tmp$indexhet,
propgd=tmp$propgd,
delta_m=tmp$delta_m,
bites=tmp$bites,
prob_h=tmp$prob_h,
demog=tmp$demog,
eff_itn=tmp$eff_itn,
stableveff=tmp$stableveff,
hl_net=tmp$hl_net,
eff_vmw=tmp$eff_vmw,
eff_his=tmp$eff_his,
eff_oth=tmp$eff_oth,
elneff=tmp$elneff,
pgd=tmp$pgd,
sensgd=tmp$sensgd,
mask=tmp$mask,
# Humans
#falciparum parameters
gamma_m=tmp$gamma_m,
prob_m= tmp$prob_m,
ps=tmp$ps,
psn=tmp$psn,
pr=tmp$pr,
prn=tmp$prn,
nuc=tmp$nuc,
nua=tmp$nua,
nus=tmp$nus,
psev=tmp$psev,
pmort=tmp$pmort,
pmortt=tmp$pmortt,
rep_fat=tmp$rep_fat,
tausev=tmp$tausev,
gamma_h=tmp$gamma_h,
zeta_a=tmp$zeta_a,
zeta_n=tmp$zeta_n,
chi=tmp$chi,
# this will change if the RDT detection limit changes chi=chi_old*(mn_c-dl_RDTold)/(mn_c-dl_RDTnew)
omega=tmp$omega, # loss of immunity
#control
nut=tmp$nut, # recovery rate under treatment (r_t)
nuq=tmp$nuq, # recovery rate under quinine treatment (r_q)
ptf=tmp$ptf, #Probability of treatment failure
ptfc=tmp$ptfc, # Probability of being clinical after treatment failure
ptftr=tmp$ptftr, #Probability of seeking trt if clinical, after treatment failure
# diagnostics
dl_RDT=tmp$dl_RDT, # standard RDT detection limit
dl_micro=tmp$dl_micro, # micro detection limit
dl_qPCR=tmp$dl_qPCR, # standard PCR detection limit
# dl_inf=log10(26), # detection limit for infectiousness
mn_n=tmp$mn_n, # mean parasiteamia for sub micro
mn_a=tmp$mn_a, # mean parasiteamia for asym
mn_c=tmp$mn_c, # mean parasiteamia for clinical
mn_s=tmp$mn_s, # mean parasiteamia for severe
sd_n=tmp$sd_n, # sd parasiteamia for sub micro
sd_a=tmp$sd_a, # sd parasiteamia for asym
sd_c=tmp$sd_c, # sd parasiteamia for clinical
sd_s=tmp$sd_s, # sd parasiteamia for severe
#vivax parameters
vgamma_m=tmp$vgamma_m, # latent period
vprob_h=tmp$vprob_h, # probability that a bite will result in infection (human to mosquito)
vps=tmp$vps, # probability of clinical if non-immune
vpsn=tmp$vpsn, # probability of sub-patent given not clin, non-immune
vpr=tmp$vpr, # probability of clinical if immune
vprn=tmp$vprn, # probability of sub-patent given not clin, immune
vnuc=tmp$vnuc, # recovery from clinical symptoms untreated (r_c)
vnua=tmp$vnua, # recovery from asym untreated (r_a)
vnus=tmp$vnus, # recovery from severe (r_s)
vpsev=tmp$vpsev, # probability of severe disease given clinical
vpmort=tmp$vpmort, # proportional of all untreated severe cases that die (theta)
vpmortt=tmp$vpmortt, # proportional of all treated severe cases that die (theta)
vtausev=tmp$vtausev, # probability that a severe infection is treated
vgamma_h=tmp$vgamma_h, # latent period in humans
vzeta_a=tmp$vzeta_a, # relative infectiousness of asym to clinical
vzeta_n=tmp$vzeta_n, # relative infectiousness of submicro to clinical
vomega=tmp$vomega, # loss of immunity
vprel=tmp$vprel, # probability of relapse
vincprel=tmp$vincprel, #Probabily of relapse due to triggering
vrel=tmp$vrel, # rate of relapse
vph=tmp$vph, # probability of recovering with hypnozoites under ACT
vphprim=tmp$vphprim, # probability of recovering with hypnozoites under primaquine
vkappa=tmp$vkappa , # hypnozoite death rate
vnut=tmp$vnut, # recovery rate under treatment (r_t)
vnuq=tmp$vnuq, # recovery rate under quinine treatment (r_q)
vnup=tmp$vnup, # recovery rate under primaquine
vptf=tmp$vptf, #Probability of treatment failure on FLT
vptfp=tmp$vptfp, #Probability of treatment failure on Primaquine( clinical failure and adherance)
vptfc=tmp$vptfc, # Probability of being clinical after treatment failure
vptftr=tmp$vptftr, #Probability of seeking trt if clinical, after treatment failure
vdl_RDT=tmp$vdl_RDT, # standard RDT detection limit
vdl_micro=tmp$vdl_micro, # micro detection limit
vdl_qPCR=tmp$vdl_qPCR, # standard PCR detection limit
vmn_n=tmp$vmn_n, # mean parasiteamia for sub micro
vmn_a=tmp$vmn_a, # mean parasiteamia for asym
vmn_c=tmp$vmn_c, # mean parasiteamia for clinical
vmn_s=tmp$vmn_s, # mean parasiteamia for severe
vsd_n=tmp$vsd_n, # sd parasiteamia for sub micro
vsd_a=tmp$vsd_a, # sd parasiteamia for asym
vsd_c=tmp$vsd_c, # sd parasiteamia for clinical
vsd_s=tmp$vsd_s, # sd parasiteamia for severe
t1=tmp$t1, # Entanglement 1 - dual treatment switch
t2=tmp$t2 # Entanglement 2 - triggering relapse from Pf infection switch
))
}
|
fc92cf586540646b60a3eebf714d75a830ef7e19 | 44da3f9d182ce027b2758f5919f1eaa91927363a | /Module-6-Example-4.R | 2c2f0e90161f78bb6e71dbbe9f36d187857b1061 | [
"Apache-2.0"
] | permissive | febikamBU/R-Examples | 1af609ec2bd0789cdff347431b67f9f785d067ae | 748789cbf37507ad0992efa78cf2117703081c90 | refs/heads/master | 2021-06-25T18:48:59.999242 | 2021-01-29T20:24:36 | 2021-01-29T20:24:36 | 204,198,013 | 0 | 0 | Apache-2.0 | 2019-08-24T18:41:13 | 2019-08-24T18:41:13 | null | UTF-8 | R | false | false | 1,918 | r | Module-6-Example-4.R | # setwd("SET THE Working Director to THE PATH TO THIS DIRECTORY")
# install.packages("aod")
# install.packages("pROC")
library(aod)
library(stats)
library(pROC)
data<-read.csv("Datasets/cevent.csv")
attach(data)
# print a small part of the data
head(data)
# multiple logistic regression
data$male <- ifelse(data$sex =="M", 1, 0)
m2<-glm(data$event ~ data$chol + data$male + data$age, family=binomial)
summary(m2)
# ROC curve
# install.package("pROC")
library(pROC)
# using model with chol and sex and age
data$prob <-predict(m2, type=c("response"))
roc.info <-roc(data$event ~ data$prob, legacy.axes=T)
# different thresholds
roc.info$thresholds
roc.df <-data.frame(
tpp=roc.info$sensitivities * 100,
fpp=(1-roc.info$specificities) * 100,
threshholds = roc.info$thresholds
)
# look at the head of this dataframe
head(roc.df)
# look at the tail of this dataframe
tail(roc.df)
# Filter all of the values bigger than 40 and smaller than 80
roc.df[roc.df$tpp > 40 & roc.df$tpp < 80, ]
#############
# Partial ROC curve
############
par(pty="s")
roc(data$event ~ data$prob, plot=TRUE, legacy.axes=T, percent=T,
xlab="False Positive (%)", ylab="True Positive (%)", col="blue", lwd=4,
print.auc=T, print.auc.x=45, partial.auc=c(100, 80),
auc.polygon = T, auc.polygon.col="gray"
)
#
# Compare ROC curves
#
# Let us do another classification like randomForest
# install.packages("randomForest")
library(randomForest)
# Build the model for randomForest
rf.model <- randomForest(formula = factor(event) ~ . , data=data, importance = TRUE)
rf.model
par(pty="s")
# Logistic
roc(data$event ~ data$prob, plot=TRUE, legacy.axes=T, percent=T,
xlab="False Positive (%)", ylab="True Positive (%)", col="blue", lwd=4, print.auc=T, print.auc.x=45)
# Random Forest
plot.roc(data$event, rf.model$votes[,1], percent=T, col="green", lwd=4, print.auc=T, add=T, print.auc.y=40)
|
a6d5a78c074879110aaf65a5bf2cbb15857ca7ef | d028256882be77ba6f4188da0b4b462c8db8eefb | /R/RcppExports.R | 19b9191f669c85256bd6a89685a8928e7f420dd1 | [] | no_license | njitclass/cidian | 810bc03e7c6aad414d27ea67b27eb904acff5935 | 56b32a9d5ae9efcc5c6a7932ccae412bf5ab3296 | refs/heads/master | 2021-01-18T14:49:05.740685 | 2016-03-14T00:38:22 | 2016-03-14T00:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 419 | r | RcppExports.R | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
loadUserDict <- function(filePath, defaultWeight, defaultTag) {
.Call('cidian_loadUserDict', PACKAGE = 'cidian', filePath, defaultWeight, defaultTag)
}
decode_scel_cpp <- function(file, output, freq, disp) {
.Call('cidian_decode_scel_cpp', PACKAGE = 'cidian', file, output, freq, disp)
}
|
4183e77e3e639f9036c23263aba84b4ab80ea072 | d75b7bc015b47d94254bcc9334ba15972d3ec9a1 | /1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici102.R | 39229b4f04ecaad4c4e22ac8c9acbe0f7193666f | [] | no_license | laurajuliamelis/BachelorDegree_Statistics | a0dcfec518ef70d4510936685672933c54dcee80 | 2294e3f417833a4f3cdc60141b549b50098d2cb1 | refs/heads/master | 2022-04-22T23:55:29.102206 | 2020-04-22T14:14:23 | 2020-04-22T14:14:23 | 257,890,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 199 | r | exercici102.R | v <- c(1,3,2,5,6,4,7,8)
mediana <- function(v){
v <- sort(v)
N <- length(v)
if (N%%2 ==0){
return(mean(c(v[N/2], v[N/2 + 1])))
} else {
return(v[N/2 + 1])
}
} |
346dfd4c3c45d7716003cf80c7722eb88c4186d4 | dab82520f422d58dbaca5b1e4cb7db74cc3dec98 | /R/new_form.R | cca8d4818e600d7c018fc9a232ef832a212828fb | [] | no_license | urodoct/iuform | 7d9c437fb729ffeffe24d799e4d0467c6423c543 | 689e51289e9a65020085cfa3965cf2074fa9d643 | refs/heads/master | 2023-01-20T16:29:52.783748 | 2020-11-30T15:19:23 | 2020-11-30T15:19:23 | 302,903,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,422 | r | new_form.R | #' to start the form, ensures only one form
#'
#' @param name the title/name of the form
#' @param chaptered if many section will be in the form
#'
#'
#' @export
new_form <- function(name,chaptered=F){
#quoting the user arguments using enquo
# enlabel <- rlang::enquo(label)
enname <- rlang::enquo(name)
#quting them again using paste and que_name
# label <- paste(rlang::quo_name(enlabel))
name <- paste(rlang::quo_name(enname))
form_name <- name
# firstsection_name <-label
##### END of quoting/unquoting
#######################################################
#check if there is already a form name in bobo.R
#read it
bobo <- readr::read_file(glue::glue("./bobo.R"))
# @ is unique for new form
form_exist <- freq_word(bobo, "@" )
if(form_exist>0)(glue:: glue("\n\n\n",
"ERROR!!\n\n",
"Sorry but you can only do one form per project!\n\n",
"Alternatevily, you can try to:\n",
"1- Start a new_section() or\n",
"2- Start new project:\n\n",
"File => New Project => New directory => Start new IUform\n\n"))
else
{
part1 <- glue::glue(# add here
"!1\n\n",
"!A\n\n",
"ui <- fluidPage(\n",
" tags$style(formCSS),\n",
" shinyjs::useShinyjs(),\n",
" shinyjs::inlineCSS(appCSS),\n",
" tags$fieldset('{form_name}'),\n\n",
######Start of the main div
" div(\n",
"id = ","'form'", ",",
" !E",",\n\n",
" actionButton(","'submit'",",","'Submit'",",","class=", "'btn-primary')",",\n\n",
" shinyjs::hidden(\n",
" span(id =","'submit_msg'",",","'Submitting...')",",\n\n",
" div(id=","'error'",",\n",
" div(br(), tags$b(","'Error: ')",","," span(id =","'error_msg'))\n",
" )\n",
" )\n",
" ),\n",
##### End of the main div
#####Adding JAVASCRIPT CODE
" shinyjs::hidden(\n",
" div(\n",
" id =","'thankyou_msg'",",\n\n",
" h3(","'Thanks, your response was submitted successfully')",",\n\n",
" actionLink(","'submit_another'",",","'Submit another response')\n",
" )\n",
" ),\n",
##### END of JAVASCRIPT CODE
##### Adding place holder for the table of the data
" DT::dataTableOutput(","'responsesTable'),\n\n",
##### Download data place holder
" downloadButton(","'downloadBtn'",",","'Download responses')\n\n",
#adding a section holder !S
" {if (chaptered) return ('!S') else return ('')}\n\n",
# add more here if needed
"",
")")
contents <- paste0(part1)
form_names <- glue::glue("\n\n" ,'@"{form_name}"\n','&"form"\n\n',"\n\n" )
readr::write_file(contents, glue::glue("./ui.R"), append = T)
readr::write_file(form_names, glue::glue("./bobo.R"), append = T)}
}
|
10e8a47741eaedf250b8f5d05b042af5fc8d4f59 | 86195dac7c6d203b5d83b6e0b17c2a7140bddfdb | /case_control_env_power.R | ab99bb4c8875b3c528cd2aa4840c65d7f4791a77 | [
"MIT"
] | permissive | adams-cam/power_GxE | 1334ba4b0fdf9333e0246a444a32ac98600e1333 | 1f18c088c2bd4592ef6c2f1bb81c5d054c903850 | refs/heads/master | 2023-03-16T02:11:14.727649 | 2023-03-11T01:41:06 | 2023-03-11T01:41:06 | 292,380,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,649 | r | case_control_env_power.R |
####create fluid row####
fluidRow(
#### put input area here ####
column(4,
style = "background-color: #E8E8E8",
##change the title here
div(style=paste0("display: inline-block; vertical-align:top; ",
"text-align:center; width: 100%;"),
strong(h3("Environment Effect"))),
br(),
##put input boxes here
splitLayout(cellWidths = c("50%", "50%"),
textInput("ccp_e_n_case", "N case", value = "1000", width = 100),
textInput("ccp_e_k", "Controls:Case", value = "1",
width = 1000)),
splitLayout(cellWidths = c("34%", "33%", "33%"),
textInput("ccp_e_pg", "Pe", value = "0.5", width = 60),
textInput("ccp_e_pg_ll", "From:", width = 60),
textInput("ccp_e_pg_ul", "To:", width = 60)),
splitLayout(cellWidths = c("34%", "33%", "33%"),
textInput("ccp_e_org", "ORe", value = "1.1", width = 60),
textInput("ccp_e_org_ll", "From:", width = 60),
textInput("ccp_e_org_ul", "To:", width = 60)),
textInput("ccp_e_alpha", "Alpha", value = "0.05", width = 60),
textInput("ccp_e_num_tests", "# Tests", value = "1", width = 100),
#br(),
#submitButton("Calculate")
actionButton("ccp_e_do", "Submit")
), ## close column 1
#### put output here ####
column(8,
tabsetPanel(
tabPanel("Table:",
tableOutput("tbl_cc_env_power")),
tabPanel("Plot:",
plotOutput("cc_env_power_plot", click = "cc_env_power_plot_click",
dblclick = "cc_env_power_plot1_dblclick",
brush = brushOpts(
id = "cc_env_power_plot1_brush",
resetOnNew = TRUE
)),
verbatimTextOutput("cc_env_power_info")),
tabPanel("Help:",
withMathJax(),
HTML(markdown::markdownToHTML(knit("case_control_env_power.Rmd",
quiet = T))))
)## close tabset panel
) ## close column
) ##close fluid row
|
9e2025b44cdbcef4f7f06781867a4e2fb07a5c28 | d7a6536d38949396ef6eefe4de44fe6579b13b42 | /analysis/TF_Enrichment_near_SVs/src/PlotDistanceFromPeaksToSVs-d.R | 196c2dbd8bb06e19f0fe5a73d6d8f4fad39554e0 | [] | no_license | RoccoDLucero/epig_str_mut_germ | 3834350ceb4f0392fbe291160f4d662da6aa4092 | 8f0db553e0f1d92a3c18923561a7616547ddbe2a | refs/heads/master | 2021-01-25T04:42:27.430354 | 2017-06-09T23:33:10 | 2017-06-09T23:33:10 | 93,470,128 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,639 | r | PlotDistanceFromPeaksToSVs-d.R |
ks.test(x = RARE_AFR_CNV.SV.bed.sampled$V1,y = RARE_EUR_CNV.SV.bed.sampled$V1)
ks.test(x = RARE_AFR_CNV.SV.bed.sampled$V1,y = RARE_EAS_CNV.SV.bed.sampled$V1)
ks.test(x = RARE_AFR_CNV.SV.bed.sampled$V1,y = RARE_AMR_CNV.SV.bed.sampled$V1)
ks.test(x = RARE_AFR_CNV.SV.bed.sampled$V1,y = RARE_SAS_CNV.SV.bed.sampled$V1)
ks.test(x = RARE_AMR_CNV.SV.bed.sampled$V1,y = RARE_EAS_CNV.SV.bed.sampled$V1)
RARE_AFR_CNV.SV.bed <- read.csv(
"C:/Users/Rocco/Dropbox/BRL/Methylation_Struct_Mut/ROI_Data/BED_Format/Output/tmpdist/RARE_AFR_CNV.SV.bed.dist",
sep="", stringsAsFactors=FALSE
)
RAR_CNV_List = list.files(pattern = "RAR.*AFR.*CNV.SV.bed.sampled*")
plot(ecdf(RARE_AFR_CNV.SV.bed[,1]))
for(file in RAR_CNV_List ){
lines(ecdf(read.table(file)[,1]),col = 'purple')
}
RAR_CNV_Control_List = list.files(pattern = "RAR.*AFR.*CNV.SV.bed.sampled.*toshuff")
for(file in RAR_CNV_Control_List ){
read.table(file)[,1]
lines(ecdf(read.table(file)[,1]),col = 'black')
}
COM_CNV_List = list.files(pattern = "COM.*CNV.SV.bed.sampled*")
lines(ecdf(RARE_AFR_CNV.SV.bed[,1]))
for(file in COM_CNV_List ){
lines(ecdf(read.table(file)[,1]),col = 'aquamarine')
}
DUP_List = list.files(pattern = "*DUP.SV.bed.sampled*")
lines(ecdf(RARE_AFR_DUP.SV.bed.shuffleChIP),col ='hotpink2')
for(file in DUP_List ){
lines(ecdf(read.table(file)[,1]),col = 'red')
}
DEL_List = list.files(pattern = "*DEL.SV.bed.sampled*")
lines(ecdf(RARE_AFR_DEL.SV.bed.shuffleChIP[,1]),col ='goldenrod')
for(file in DEL_List ){
#later add code to plot line color by population
lines(ecdf(read.table(file)[,1]),col = 'chartreuse') ##rgb(runif(5),runif(5),runif(5)))
}
|
49c360a9ab89f57ed9b3c6dd766b6d73344361f4 | ec3a7973db5d8c855e3f1d30afbf27906ecb41d2 | /slides-pdf/splice_pdf.R | e31eeb6098f66af54ad0fe9f378c4e9f6dd52c46 | [] | no_license | migariane/workshops-1 | 3da2e59979fa88457af633e6d30dd93eface5e88 | 755eff9d98c18d20a068703cfebb273dfb1652fe | refs/heads/master | 2020-03-17T07:22:21.721995 | 2018-05-14T06:58:48 | 2018-05-14T06:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 639 | r | splice_pdf.R | library(staplr)
setwd()
remove_pages(c(1:24, 26:29), input_filepath = "2-packages.pdf",
output_filepath = "use_github.pdf")
remove_pages(c(1:28), input_filepath = "2-packages.pdf",
output_filepath = "license_page.pdf")
remove_pages(c(2:13), input_filepath = "2b-github.pdf",
output_filepath = "title_page.pdf")
remove_pages(c(1:2, 13), input_filepath = "2b-github.pdf",
output_filepath = "content.pdf")
staple_pdf(input_files = c("title_page.pdf", "use_github.pdf",
"content.pdf", "license_page.pdf"),
output_filepath = "../eRum2018/2b-github.pdf")
|
50cf3173eacf416c547344e2f0e186fa36e9fe0d | 6ee35707d967a44a9fcdebb24a406e2048886413 | /Additional_Files/PartialDependence_Extreme_Cases.R | 63330a80e5aa23bab5cb16a65632a114e67a0dc1 | [] | no_license | RaphaelCouronne/Benchmark_RF-LR_OpenML | 31d9c1f140abd451d9b06034b9fece2fed6349ff | 67a2dd2d6afba5033fbbe25f5ba7a6970eb1405f | HEAD | 2018-09-12T01:12:57.810464 | 2018-06-05T13:59:29 | 2018-06-05T13:59:29 | 65,920,602 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,152 | r | PartialDependence_Extreme_Cases.R | # requires
partialDependenceAnalysis_extremCases = function(regis, seed = 1) {
police.size = 18
# Get the datasets without errors
ids = 1:259
ids = ids[-which(ids %in% c(14))] # error on dataset 14
result.pdp = reduceResultsList(ids = ids, reg = regis)
result.pdp.df = do.call("rbind", result.pdp)
Partiaresult.pdp = result.pdp.df
#clas_used.pdp = clas_used[ids,]
# Plot of the Partial Dependence for all datasets
# df.all = data.frame(result.pdp.df, data.id = clas_used.pdp$data.id,
# n = clas_used.pdp$number.of.instances,
# p = clas_used.pdp$number.of.features,
# logpn = log(clas_used.pdp$number.of.features/clas_used.pdp$number.of.instances))
#
df.all = data.frame(result.pdp.df, data.id = regis$defs$problem[ids])
df.all = df.all[df.all$data.id %in% clas_used$data.id,]
df.all$data.id = as.integer(sapply(df.all$data.id, toString))
# get the extrem cases
data.id.highDiff = df.all$data.id[order(df.all$difference.imp.weight.all)][length(df.all$data.id)-2] # max diff model
data.id.lowDiff = df.all$data.id[order(df.all$difference.imp.weight.all)][2] # min diff model
data.id.highAcc = df.all$data.id[order(df.all$diff.acc)][length(df.all$diff.acc)-2] # max diff acc
id.highDiff = which(df.all$data.id==data.id.highDiff)
id.lowDiff = which(df.all$data.id==data.id.lowDiff)
id.highAcc = which(df.all$data.id==data.id.highAcc)
# Plot the Difference of Partial Dependence vs Difference Accuracy
police.size = 18
df.all$case = 0
df.all$case[id.highDiff]=2
df.all$case[id.lowDiff]=1
df.all$case[id.highAcc]=3
df.all$is.case = df.all$case >0
df.all$case = as.factor(df.all$case)
p <- ggplot(df.all, aes(difference.imp.weight.all, diff.acc))
p = p + geom_point(aes(colour=factor(case),
fill = factor(is.case),
shape = is.case,
size = is.case) )
p = p + labs(x = bquote(paste(Delta, "PartialDependence")), y = bquote(paste(Delta, "acc")))
p = p + theme(legend.title=element_blank(), text = element_text(size=police.size))
p = p + guides(shape = FALSE, fill = FALSE, size = FALSE)
p = p + scale_colour_manual(breaks=c("1","2","3"),
labels = c("Case 1", "Case 2", "Case 3"),
values=c("black", "#D55E00","#0072B2","#009E73"))
p = p + scale_shape_manual(values = c(1, 15))
p = p + scale_size_manual(values = c(2, 3))
print(p)
jpeg(filename = "Data/Pictures/AdditionalFigures/AdditionalFigure2_PDP.jpeg", width = 700, height = 400)
plot(p)
dev.off()
## Before : 1479, 923, 1460
## Now Clas Used 1443, 846, 720
## Now real class
print(paste(data.id.lowDiff, data.id.highDiff, data.id.highAcc))
# plot the extrem cases
# Case 1 Low difference in Partial Dependence
extrem_cases(data.id.lowDiff, seed = 1, path.out = "Data/Pictures/AdditionalFigures/AdditionalFigure3_PDPcase1.jpeg")
# Case 2 High difference in Partial Dependence
extrem_cases(data.id.highDiff, seed = 1, path.out = "Data/Pictures/AdditionalFigures/AdditionalFigure4_PDPcase2.jpeg")
# Case 3 High difference in Accuracy
extrem_cases(data.id.highAcc, seed = 1, path.out = "Data/Pictures/AdditionalFigures/AdditionalFigure4_PDPcase3.jpeg")
}
# Further analysis for the extreme cases
# Case 1 : High Accuracy Low Difference
extrem_cases = function(data.id, seed = 1, path.out = "Data/Pictures/AdditionalFigures/AdditionalFigure2_PDP.jpeg") {
print(data.id)
set.seed(1)
police.size = 18
# load the task
omldataset = getOMLDataSet(data.id)
#omldataset = getOMLDataSet(334) test
if (identical(omldataset$target.features, character(0))) {
omldataset$target.features="Class"
omldataset$desc$default.target.attribute="Class"
}
task = convertOMLDataSetToMlr(omldataset)
task$task.desc$id = paste("dataset")
task
task$env$data
# ## See which features are numeric or symbolic
# target = task$task.desc$target
# X.train = task$env$data[!colnames(task$env$data) %in% c(target)]
#
# # get the number and type of features
# type.list = sapply(X.train, class)
#
# # get the index of types
# index.numeric = which(type.list == "numeric")
# index.factor = which(type.list == "factor")
# features.list.numeric = features.list[index.numeric]
# features.list.factor = features.list[index.factor]
# train the models
lrn.classif.rf = makeLearner("classif.randomForest",
importance = TRUE, predict.type = "prob", fix.factors.prediction = TRUE)
fit.classif.rf = train(lrn.classif.rf, task)
lrn.classif.lr = makeLearner("classif.logreg", predict.type = "prob", fix.factors.prediction = TRUE)
fit.classif.lr = train(lrn.classif.lr, task)
fv = fit.classif.rf$learner.model$importance[,3]
fv[fv<0]=0
feature_importance_order = order(-fv)
fv.percentage = fv/sum(fv)
# Do the PDP between 5% and 95% of the feature data
gridsize = 20
features.list = row.names(fit.classif.rf$learner.model$importance)
target = task$task.desc$target
plot_pdp_list = NULL
feature.chosen = NULL
fv.percentage.chosen = NULL
for (i in c(1:2)) {
print(i)
index.temp = feature_importance_order[i]
feature.temp = features.list[index.temp]
feature.chosen[i] = feature.temp
fv.percentage.chosen[i] = fv.percentage[index.temp]
# quantiles = quantile(task$env$data[[feature.temp]], probs = c(0.05, 0.95))
# quantiles_list = as.list(quantiles)
# names(quantiles_list) = c(feature.temp, feature.temp)
# fmin=(quantiles_list[1])
# fmax=(quantiles_list[2])
#
set.seed(seed)
pd.rf = generatePartialDependenceData(fit.classif.rf, task,
features.list[index.temp], uniform = FALSE)#,
#fmin = fmin, fmax = fmax)
set.seed(seed)
pd.lr = generatePartialDependenceData(fit.classif.lr, task,
features.list[index.temp], uniform = FALSE)#,
#fmin = fmin, fmax = fmax)
library(ggplot2)
df.plot = data.frame(grid = pd.rf$data[[feature.temp]],
rf = pd.rf$data[,1],
lr = pd.lr$data[,1])
library(reshape2)
df.plot.reshaped = reshape2::melt(df.plot, "grid")
detach(package:reshape2, unload = TRUE)
p = ggplot(df.plot.reshaped, aes_string(x = "grid", y="value", colour = "variable"))
p = p+geom_line(size=1) + geom_point(size=3) +
theme(legend.position="none", text = element_text(size=police.size), legend.title=element_blank()) +
scale_colour_grey(start = 0,end = 0.7) + ylim(0,1) + xlab(feature.temp)+ylab("Probability")
print(p)
plot_pdp_list[[i]] = p
}
library(cowplot)
#quantiles1 = quantile(task$env$data[[features.list[1]]], probs = c(0.05, 0.95))
#quantiles2 = quantile(task$env$data[[features.list[2]]], probs = c(0.05, 0.95))
p = ggplot(task$env$data, aes_string(x = feature.chosen[1], y=feature.chosen[2], colour = target))
p = p + geom_point(size=1) + #coord_cartesian(xlim = quantiles2, ylim = quantiles1) +
theme(legend.position="none", text = element_text(size=police.size), legend.title=element_blank())
print(p)
print("Partial dependence of the 2 main feaures according to RF importance", quote = FALSE)
print(paste(" Relative importance of feature 1 (",feature.chosen[1],") is ",
format(round(fv.percentage.chosen[1], 3), nsmall = 3), "%" ), quote = FALSE)
print(paste(" Relative importance of feature 2 (",feature.chosen[2],") is ",
format(round(fv.percentage.chosen[2], 3), nsmall = 3), "%" ), quote = FALSE)
p.grid = plot_grid(plot_pdp_list[[2]] +
coord_flip(),
p , NULL, plot_pdp_list[[1]],
ncol = 2, nrow = 2, align = 'v')
print(p.grid)
jpeg(filename = path.out, width = 500, height = 500)
plot(p.grid)
dev.off()
}
|
d82e313e037921ba5331a7e7e8e2bfef804d4807 | 18e2b8080aae52e221fba4b47c3bd5087a0c0362 | /DBS_Acoustics_analysis.R | b6f5490473b2712f6f70949b8e78544a957eb108 | [] | no_license | troettge/Muecke-et-al-Thalamic-Deep-Brain-Stimulation-changes-speech-dynamics | b9c63bd680362f1c58778396e7119dbf255e5b55 | f20b0c7db31bc2cf04d4c61833362cf1e062f0b9 | refs/heads/master | 2020-12-02T16:13:11.061431 | 2017-10-08T10:20:16 | 2017-10-08T10:20:16 | 96,520,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,175 | r | DBS_Acoustics_analysis.R |
## Analysis of acoustics data for Muecke et al. "Thalamic Deep Brain Stimulation changes speech dynamics in patients with Essential Tremor"
## Author: Timo B. Roettger
## Contact: timo.roettger@uni-koeln.de
## Date: May 30th, 2016
## Load in necessary packages
library(lme4)
## define function for correction of multiple testing
dunn.sidak <- function(alpha,n) 1-(1-alpha)^(1/n)
dunn.sidak(0.05, 4) ## so the new threshold for significance is 0.0127
#############################################################
################## P R E P R O C E S S ######################
#############################################################
## set working directory:
setwd("~")
## load in data
xdata <- read.csv("DBS_acoustic_overall_reduced.csv")
## scale continous predictors
xdata$syllable.scale = scale(xdata$syllable)
## scale and log-transform measurements
xdata$syll_dur_2 = scale(log(xdata$syll_dur))
## two subsets for Control vs. Off and Off vs. On
xdata.CvsOff = xdata[xdata$DBS != "ON",]
xdata.OffvsOn = xdata[xdata$DBS != "C",]
xdata.CvsOff$DBS = factor(xdata.CvsOff$DBS)
xdata.OffvsOn$DBS = factor(xdata.OffvsOn$DBS)
xdata.OffvsOn$dbs.group = factor(xdata.OffvsOn$dbs.group)
## contrast code categorical predictors
contrasts(xdata.CvsOff$POA) = contr.sum(3)/2
contrasts(xdata.CvsOff$DBS) = contr.sum(2)/2
contrasts(xdata.OffvsOn$POA) = contr.sum(3)/2
contrasts(xdata.OffvsOn$DBS) = contr.sum(2)/2
contrasts(xdata.OffvsOn$dbs.group) = contr.sum(2)/2
#############################################################
################# M O D E L I N G ###########################
#############################################################
## First, testing for possible interactions of the control variables POA with the test variable DBS.
## If we come across a significant interaction, we report the interaction model,
## If not we drop the interaction term and test whether inclusion of DBS improves the model significantly.
#########
## syllable duration
#########
## model CvsOff
syll.CvsOff.POA = lmer(syll_dur_2 ~ syllable.scale + DBS * POA + (1|subject), xdata.CvsOff, REML=FALSE)
syll.CvsOff = lmer(syll_dur_2 ~ syllable.scale + DBS + POA + (1|subject), xdata.CvsOff, REML=FALSE)
## likelihood ratio tests
anova(syll.CvsOff, syll.CvsOff.POA) ## X2(2)=10.315; p=0.005756 ** --> interaction with POA
summary(syll.CvsOff.POA)
## main effect of DBS?
#syll.CvsOff.null = lmer(syll_dur_2 ~ syllable.scale + POA + (1|subject), xdata.CvsOff, REML=FALSE)
#anova(syll.CvsOff.null, syll.CvsOff) ## X2(1)=6.9183; p=0.008532 **
## model OffvsOn
syll.OffvsOn.POA = lmer(syll_dur_2 ~ syllable.scale + DBS * POA + (1|subject), xdata.OffvsOn, REML=FALSE)
syll.OffvsOn = lmer(syll_dur_2 ~ syllable.scale + DBS + POA + (1|subject), xdata.OffvsOn, REML=FALSE)
## likelihood ratio tests
anova(syll.OffvsOn, syll.OffvsOn.POA) ## X2(2)=15.78; p=0.0003744 *** --> interaction with POA
summary(syll.OffvsOn.POA)
## main effect of DBS?
#syll.OffvsOn.null = lmer(syll_dur_2 ~ syllable.scale + POA + (1|subject), xdata.OffvsOn, REML=FALSE)
#anova(syll.OffvsOn.null, syll.OffvsOn) ## X2(1)=124.51; p< 2.2e-16 ***
#########
## voi-to-syll
#########
## model CvsOff
voi_to_syll.CvsOff.POA = lmer(voi_to_syll ~ syllable.scale + DBS * POA + (1|subject), xdata.CvsOff, REML=FALSE)
voi_to_syll.CvsOff = lmer(voi_to_syll ~ syllable.scale + DBS + POA + (1|subject), xdata.CvsOff, REML=FALSE)
## likelihood ratio tests
anova(voi_to_syll.CvsOff, voi_to_syll.CvsOff.POA) ## X2(2)=1.6282; p=0.443 --> no interaction with POA
## main effect of DBS?
voi_to_syll.CvsOff.null = lmer(voi_to_syll ~ syllable.scale + POA + (1|subject), xdata.CvsOff, REML=FALSE)
anova(voi_to_syll.CvsOff.null, voi_to_syll.CvsOff) ## X2(1)=0.6292; p=0.4277
summary(voi_to_syll.CvsOff)
## model OffvsOn
voi_to_syll.OffvsOn.POA = lmer(voi_to_syll ~ syllable.scale + DBS * POA + (1|subject), xdata.OffvsOn, REML=FALSE)
voi_to_syll.OffvsOn = lmer(voi_to_syll ~ syllable.scale + DBS + POA + (1|subject), xdata.OffvsOn, REML=FALSE)
## likelihood ratio tests
anova(voi_to_syll.OffvsOn, voi_to_syll.OffvsOn.POA) ## X2(2)=5.1707; p=0.07537 --> no interaction with POA
## main effect of DBS?
voi_to_syll.OffvsOn.null = lmer(voi_to_syll ~ syllable.scale + POA + (1|subject), xdata.OffvsOn, REML=FALSE)
anova(voi_to_syll.OffvsOn.null, voi_to_syll.OffvsOn) ## X2(1)=36.481; p=1.541e-09 ***
summary(voi_to_syll.OffvsOn)
#########
## frication
#########
## model CvsOff
fri_dur_clo.CvsOff.POA = glmer(fri_dur_clo ~ syllable.scale + POA * DBS + (1|subject), xdata.CvsOff, family=binomial)
fri_dur_clo.CvsOff = glmer(fri_dur_clo ~ syllable.scale + DBS + POA + (1|subject), xdata.CvsOff, family=binomial)
## likelihood ratio tests
anova(fri_dur_clo.CvsOff, fri_dur_clo.CvsOff.POA) ## X2(2)=24.348; p=5.162e-06 *** --> interaction with POA
summary(fri_dur_clo.CvsOff.POA)
## main effect of DBS?
#fri_dur_clo.OffvsOn.null = glmer(fri_dur_clo ~ syllable.scale + POA + (1|subject), xdata.CvsOff, family=binomial)
#anova(fri_dur_clo.OffvsOn.null, fri_dur_clo.CvsOff) ## X2(1)=1.4727; p=0.2249
## model OffvsOn
fri_dur_clo.OffvsOn.POA = glmer(fri_dur_clo ~ syllable.scale + POA * DBS + (1|subject), xdata.OffvsOn, family=binomial)
fri_dur_clo.OffvsOn = glmer(fri_dur_clo ~ syllable.scale + DBS + POA + (1|subject), xdata.OffvsOn, family=binomial)
## likelihood ratio tests
anova(fri_dur_clo.OffvsOn, fri_dur_clo.OffvsOn.POA) ## X2(2)=3.5318; p=0.171 --> no interaction with POA
## main effect of DBS?
fri_dur_clo.OffvsOn.null = glmer(fri_dur_clo ~ syllable.scale + POA + (1|subject), xdata.OffvsOn, family=binomial)
anova(fri_dur_clo.OffvsOn.null, fri_dur_clo.OffvsOn) ## X2(1)=41.116; p=1.434e-10 ***
summary(fri_dur_clo.OffvsOn)
#########
## voi_dur_clo
#########
## model CvsOff
voi_dur_clo.CvsOff.POA = glmer(voi_dur_clo ~ syllable.scale + POA * DBS + (1|subject), xdata.CvsOff, family=binomial)
voi_dur_clo.CvsOff = glmer(voi_dur_clo ~ syllable.scale + DBS + POA + (1|subject), xdata.CvsOff, family=binomial)
## likelihood ratio tests
anova(voi_dur_clo.CvsOff, voi_dur_clo.CvsOff.POA) ## X2(2)=1.415; p=0.4929 --> no interaction with POA
## main effect of DBS?
voi_dur_clo.CvsOff.null = glmer(voi_dur_clo ~ syllable.scale + POA + (1|subject), xdata.CvsOff, family=binomial)
anova(voi_dur_clo.CvsOff.null, voi_dur_clo.CvsOff) ## X2(1)=1.4341; p=0.2311
summary(voi_dur_clo.CvsOff)
## model OffvsOn
voi_dur_clo.OffvsOn.POA = glmer(voi_dur_clo ~ syllable.scale + POA * DBS + (1|subject), xdata.OffvsOn, family=binomial)
voi_dur_clo.OffvsOn = glmer(voi_dur_clo ~ syllable.scale + DBS + POA + (1|subject), xdata.OffvsOn, family=binomial)
## likelihood ratio tests
anova(voi_dur_clo.OffvsOn, voi_dur_clo.OffvsOn.POA) ## X2(2)=7.9246; p=0.01902 * --> no interaction with POA (after correction)
## main effect of DBS?
voi_dur_clo.OffvsOn.null = glmer(voi_dur_clo ~ syllable.scale + POA + (1|subject), xdata.OffvsOn, family=binomial)
anova(voi_dur_clo.OffvsOn.null, voi_dur_clo.OffvsOn) ## X2(1)=23.762; p=1.09e-06 ***
summary(voi_dur_clo.OffvsOn)
|
442c83491207811e31631df9b477926578621cec | 67c544e7059f67a956029865ec1482d83b7fa484 | /man/eigSdat.Rd | 241cfeaf8203cb31de9e5b0bb1d2698a621081f6 | [] | no_license | StoreyLab/eigenR2 | 1b27983074cf840a1e8b336159c8fee44ba38979 | fdddea07b13c469769adabee79c48443a6904333 | refs/heads/master | 2020-07-03T13:01:18.512134 | 2015-07-19T17:34:24 | 2015-07-19T17:34:24 | 39,342,324 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 839 | rd | eigSdat.Rd | % --- Source file: man/eigSdat.Rd ---
\name{eigSdat}
\docType{data}
\alias{eigSdat}
\title{A simulated data set with independent variables of interest and a gene expression matrix}
\description{This simulated data set consists of two parts. The first
part "\code{varList}" contains three variables: age, genotype and ID. The second part "\code{exp}" is an expression matrix, in which all the gene expression levels are affected by age and genotype with varying effect sizes.}
\usage{data(eigSdat)}
\format{"\code{varList}" is a data set consisting of 50 rows and 3 columns,
each row is one observation, and "\code{exp}" is a 200 x 50 gene
expression matrix with genes in rows and arrays in columns. }
\references{
Chen LS and Storey JD (2008) Eigen-R2 for dissecting variation in high-dimensional studies.
}
\keyword{datasets} |
f4c38b669ddcff3aa4443fa2c0f2fe1322e6893c | a8903875bd24b53c0e4d2e771c054f2b144960c4 | /man/coltype.Rd | 889977b1db88e281d897fedb6e99f5f96ec76f58 | [] | no_license | chang70480/YCfun | ceef82a02e55ad16a0c0a50e7e9aeedb7b2de254 | a055dc63946e2d9f012b67bf497c73d6b9b69065 | refs/heads/master | 2021-03-01T20:34:18.422508 | 2020-11-30T07:50:52 | 2020-11-30T07:50:52 | 245,812,658 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 622 | rd | coltype.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coltype.R
\name{coltype}
\alias{coltype}
\title{\code{coltype} set reading datatype}
\usage{
coltype(
len,
at = list(),
default = c("?", "guess"),
readfun = c("read_csv", "read_excel")
)
}
\arguments{
\item{len}{set variable datatype}
\item{at}{setting variable index}
\item{default}{default datatype. read_csv using "?", and read_excel using "guess"}
\item{readfun}{reading function, set which function do you use to read file.}
}
\value{
coltype
}
\description{
can set datatpy when using read_csv or read_excel more efficiently
}
|
42012cf7f0bdeb819deddff5d1c7c50a3a6bbaa7 | 3aad0bc10eac99131c5d62590cb078ca3a540dc9 | /app/modules/mod_call_react.R | 3bc3eca4b05a60832b17a324373f7486276b9676 | [
"MIT"
] | permissive | moore-datascience/ais-ships | e44c34a23ec04ad7cd5632d0a4a554c28ab211ef | 9db8097c060d8afd3fa794948371a19527adc19d | refs/heads/main | 2023-06-30T10:54:59.338970 | 2021-08-02T05:50:39 | 2021-08-02T05:50:39 | 391,443,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,106 | r | mod_call_react.R | #'
#' file: mod_call_react.R
#' loc: /efs/ECS/Shiny/ais-ships/app/modules
#' date: 7.30.21
#'
#' The purpose of this file is to be a kind of 'hosting' area for React code.
#' The reason that this is best served in a module format is to ensure that
#' the app UI is flush before the babel transformed React code attempts to
#' render. If there isn't this kind of delay, we will get an error saying
#' that the DOM element for our target divs do no exist.
#'
#' This is also the point of adding our process scss -> css code, which takes
#' a second to register in the browser, which is why i have the loading screen
#' with the spinning sun.
#'
#--------------------------/
mod_react_ui <- function(id){
ns <- NS(id)
tagList(
uiOutput(ns('app_ui'))
)
}
mod_react_server <- function(input, output, session, id, rv){
output$app_ui <- renderUI({
pripas('output$app_ui <- renderUI({ triggered')
react1 <- rv$react1
css0 <- rv$scss
tagList(
tags$style(css0),
HTML(react1)
)
})
}
|
49db849a84f8a65082e98792a9589fc6698f01fa | 1ff941c5bfdca03767877c468cb5a9c482fbe1b4 | /man/filter_endpoints_conditions.Rd | e632bfd67a2f6df4ba8b8a0d76d5f5a694647cec | [] | no_license | Sentewolf/edeaR | 0727415f808739670940dd1168cbceb962379e06 | 19ba09107f068ce40805b98c69c5803ef4d2b0b8 | refs/heads/master | 2022-11-09T16:38:43.010212 | 2020-06-18T19:31:25 | 2020-06-18T19:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,516 | rd | filter_endpoints_conditions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_endpoints_conditions.R
\name{filter_endpoints_conditions}
\alias{filter_endpoints_conditions}
\title{Filter: Start and end conditions}
\usage{
filter_endpoints_conditions(
eventlog,
start_condition,
end_condition,
reverse,
...
)
}
\arguments{
\item{eventlog}{The dataset to be used. Should be a (grouped) eventlog object.}
\item{start_condition}{A logical condition}
\item{end_condition}{A logical condition}
\item{reverse}{Logical, indicating whether the selection should be reversed.}
\item{...}{Deprecated arguments.}
}
\value{
When given an eventlog, it will return a filtered eventlog. When given a grouped eventlog, the filter will be applied
in a stratified way (i.e. each separately for each group). The returned eventlog will be grouped on the same variables as
the original event log.
}
\description{
Filters cases where the first and/or last activity adhere to the specified conditions
}
\details{
The filter_endpoints method filters cases based on the first and last activity label. It can be used in two ways: by specifying vectors with allowed start
activities and/or allowed end activities, or by specifying a percentile. In the latter case, the percentile value will be used as a cut off.
For example, when set to 0.9, it will select the most common endpoint pairs which together cover at least 90% of the cases, and filter the event log accordingly.
}
\seealso{
\code{vignette("filters", "edeaR")}
}
|
9f907d72b954fa8a97cf7788d6be7e4a3809e369 | 05e056455d4fe1a32bfd93e2ed73c008227012d2 | /R/cooperam.R | 3bd3b4a5ca7b479689a10b9406edb827df16b9b5 | [] | no_license | APatman2017/caplist | 3f5e95b17db93e3998fbc2482f21c3d1e1d3e830 | 24143669d68b840e62e9162ffc63cc5337f871d7 | refs/heads/master | 2021-01-24T09:34:07.702698 | 2018-02-26T19:52:21 | 2018-02-26T19:52:21 | 123,021,462 | 0 | 0 | null | 2018-02-26T19:44:55 | 2018-02-26T19:44:55 | null | UTF-8 | R | false | false | 1,508 | r | cooperam.R | # Load libraries
library(stringr)
library(dplyr)
library(readr)
# Sets working directory, reads file and creates a nickname
setwd("Z:/Cap List/")
wd <- getwd()
date <- format(Sys.Date(), "%B%Y")
# Reads in files in format below
#epic <- read.csv(paste(wd,"/CooperAM_180223", ".csv", sep=""), header=TRUE, stringsAsFactors = FALSE)
epic <- read_csv(paste0(wd,"/CooperAM_180223", ".csv"))
# `Patient Name` needs to be split into MEMB_FIRST_NAME and MEMB_LAST_NAME
# `Home Address` needs to be split into MEMB_ADDRESS_LINE_1, MEMB_CITY, MEMB_STATE
epic2 <- epic %>%
mutate(MEMB_LAST_NAME = str_split(`Patient Name`, ",") %>% sapply("[", 1),
MEMB_FIRST_NAME = str_split(`Patient Name`, ",") %>% sapply("[", 2),
MEMB_ADDRESS_LINE_1 = str_split(`Patient Address`, ",") %>% sapply("[", 1),
MEMB_CITY = str_split(`Patient Address`, ",") %>% sapply("[", 2) %>% str_replace(" CAMDEN", "CAMDEN"),
MEMB_STATE = str_split(`Patient Address`, ",") %>% sapply("[", 3),
MEMB_ZIP = str_split(MEMB_STATE, " ") %>% sapply("[", 3))
epic2$MEMB_STATE <- epic2$MEMB_STATE %>% str_trim() %>% str_replace_all('[[:space:]]{1}[[:digit:]]{5,}', "")
# Dplyr: new_col = existing_col
epic3 <- epic2 %>% select(MEMB_FIRST_NAME, MEMB_LAST_NAME, MEMB_MRN = MRN, MEMB_DOB = DOB, MEMB_ADDRESS_LINE_1, MEMB_CITY, MEMB_STATE, MEMB_ZIP,
MEMB_INSURANCE = Insurance)
savedate <- (strftime(Sys.Date(),format="%Y-%m-%d"))
write_csv(epic3, path=paste0(savedate,"-CooperAM.csv"), na="")
|
9099620a797419f3dfcdf8d4f5e862969b52a103 | da36d96f0231290b53345c06896cd8f543712548 | /my_first_mousetracking_analysis/my_first_save.R | b12416dcc6d9df61b140a2769b02d857688ffdb2 | [
"MIT"
] | permissive | dwulff/EADM_SS2018 | 09f3a126815191577b77f316311b426bc8d68bd3 | 92881a69680aa198b4b8ba85b6ab4a51bb927e64 | refs/heads/master | 2020-03-22T17:38:45.462371 | 2018-07-14T08:20:19 | 2018-07-14T08:20:19 | 140,406,712 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 298 | r | my_first_save.R | install.packages("mousetrap")
library(mousetrap)
?mt_import_mousetrap
mousetrap
some_vector
some_vector <- c(1, 2, 3)
some_vector
print(some_vector)
some_vector + 2
some_vector
some_vector <- some_vector + 2
some_vector
some_vector <- mean(some_vector)
some_vector <- 'hello world'
|
8256f9727fc302f5128d6ef3b67bcddada838eb4 | 652f658c2ca8cadf7e749985856cfcc08d09bf4a | /Circular Plot.R | 1f996e32aad8acc02e59cda6204e5b8a039aa9e7 | [] | no_license | ahmedaquib/sorghum-meta-QTL | e5eb3897f62a50297d628ab27ae1fff427eadfc5 | 90a23d5070a7969f2739aae3c97813976d77f40d | refs/heads/main | 2023-06-23T22:05:37.414197 | 2021-07-29T17:02:43 | 2021-07-29T17:02:43 | 382,636,342 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,421 | r | Circular Plot.R | library(circlize)
library(dplyr)
library(tidyr)
library(GenomicRanges)
library(RColorBrewer)
library(yarrr)
############################# DATA PREPARATION #######################################################
source("C:/Users/Shabana Usmani/sorghum-meta/sorghum-meta-QTL/Physical Interval.R")
## Consensus Map Data
setwd("C:/Users/Shabana Usmani/sorghum-meta/sorghum-meta-QTL/Results/Consensus Maps")
files <- list.files()
i <- grep("map",files)
j <- 1
Consensus <- data.frame()
for(i in i){
Chromosome <- strsplit(strsplit(files[i], split = " ")[[1]][2],split = ".txt")[[1]]
f <- read.table(paste0(files[i]), sep="\t", skip=13, row.names = 1, fill = TRUE)
f$Chromosome <- rep(Chromosome,dim(f)[1])
Consensus <- rbind(Consensus,f)
j=j+1
}
remove(files,i,j,f, Chromosome)
Consensus <- data.frame(chr=as.factor(Consensus$Chromosome),cM=Consensus$V3,Marker=tolower(Consensus$V2))
Consensus1 <- filter(Consensus,row_number() %% 3 == 1) ## Select every 3rd row starting from first row
Consensus1 <- filter(Consensus1,row_number() %% 3 == 1) ## Select every 3rd row starting from first row
Consensus1 <- filter(Consensus1,row_number() %% 3 == 1) ## Select every 3rd row starting from first row
## QTL Overview Data
setwd("C:/Users/Shabana Usmani/sorghum-meta/sorghum-meta-QTL/Results/Consensus Maps")
qtl <- read.table("ALLQTL.txt",sep = "\t",skip = 1) %>% group_by(V6)
colnames(qtl)[6] <- "chr"
Sb <- dplyr::select(qtl,chr,V10,V11,V12) %>%
transmute(chr=chr,mean=V10,sd=(V12-V11)/(2*1.96))
result <- data.frame()
output <- data.frame()
# Limit of x axis on all chromosomes
mx <- c()
for(j in unique(Consensus$chr)){mx <- c(mx,filter(Consensus, chr==j)%>%dplyr::select(cM)%>%max())}
# Values of y every cM on x axis
for(j in unique(Sb$chr))
{
S <- filter(Sb, chr==j)
x <- seq(0,mx[which(unique(Sb$chr)==j)])
y <- 0
for(i in 1:nrow(S)){
y <- y + dnorm(x,mean = S$mean[i],sd=S$sd[i])
}
output <- cbind(rep(j,length(x)),x,y)
result <- rbind(result,output)
}
result[,1] <- as.factor(result[,1])
result$x <- as.numeric(result$x)
result$y <- as.numeric(result$y)
result$y <- result$y/10
## Meta QTL Locations
setwd("C:/Users/Shabana Usmani/sorghum-meta/sorghum-meta-QTL/Results")
meta <- read.table("Meta-weight.txt", header = TRUE)
colnames(meta)[3] <- "chr"
## Physical Map Data Prep
pp95<-data.frame(reduce(p95))[,1:3]
pp95[1,3] <- end(ranges(GR[24621]))
pp95$seqnames <- c("10","02","02","03","03","03","04")
pp95 <- arrange(pp95, seqnames)
pp95$regions <- 1:7
reg_lim<-data.frame(regions=rep(1:7,2),x=c(pp95$start,pp95$end))
p95_red <- makeGRangesFromDataFrame(pp95, keep.extra.columns = TRUE)
seqlevels(GR) <- c("01","02","03","04","07","10")
GR <- makeGRangesFromDataFrame(arrange(data.frame(GR),data.frame(GR)$seqnames))
meta$MQTL <- row.names(meta)
meta$MQTL <- as.integer(meta$MQTL)
P95[13,2] <- end(ranges(GR[24621]))
# Number of Genes in a 250KB Bins in each region
all_counts <- NULL
for(i in 1:7){
den <- GRanges(seqnames = pp95[i,1], ranges = IRanges(start=seq(pp95[i,2],pp95[i,3],by=250000), width = 250000), region=pp95[i,4])
den$count <- countOverlaps(den, GR)
all_counts <- rbind(all_counts, data.frame(den))
}
# Correspondance data for nested zooming
merge <- left_join(data.frame(P95),meta, by=c("MQTL"="MQTL"))
merge$region <- c(1,2,2,2,2,3,4,4,4,4,5,6,7)
merge <- merge[,c(7,5,6,9,1,2)]
merge2 <- NULL
for(j in unique(merge$region)){
mr <- NULL
mr <- split(merge, merge$region)[[j]] %>% summarise(across(c(2,3,5,6),c(min,max))) %>%
dplyr::select(start.y_1,end.y_2,start.x_1,end.x_2)
mr <- cbind(mr,split(merge, merge$region)[[j]][1,c(1,4)])
merge2 <- rbind(merge2,mr)
}
merge2 <- merge2[,c(5,1,2,6,3,4)]
## SETTING COLOURS
tr_col <- brewer.pal(11, "BrBG")[6]
ov_col <- brewer.pal(11, "BrBG")[10:11]
col_fun = colorRamp2(c(min(meta$Wt), mean(meta$Wt), max(meta$Wt)), c("green", "black", "red"))
col_fun2 = colorRamp2(c(min(all_counts$count), max(all_counts$count)), c("#D9F0A3", "#004529"))
ht <- brewer.pal(9, "Greens")[c(1,4)]
gn <- brewer.pal(9, "Blues")
ni = brewer.pal(9,"Greens")
############################# THE CIRCULAR PLOT #######################################################
f1 <- function(){
circos.par(cell.padding= c(0,0,0,0))
circos.initialize(Consensus$chr, x=Consensus$cM)
circos.track(Consensus$chr,x=Consensus$cM,ylim = c(0, 1), panel.fun = function(x, y) {
xlim = CELL_META$xlim
xcenter=CELL_META$xcenter
ycenter=CELL_META$ycenter
circos.rect(xlim[1], 0, xlim[2], 0.3, col = rand_color(10, hue = "green", luminosity = "dark"), border=NA)
circos.segments(x, rep(0,length(x)),x, rep(0.3,length(x)), col="black")
x = Consensus1$cM[Consensus1$chr==CELL_META$sector.index]
circos.segments(x, rep(0,length(x)),x, rep(0.4,length(x)), col="black")
circos.text(x, rep(0.75,length(x)),Consensus1$Marker[Consensus1$chr==CELL_META$sector.index],niceFacing = TRUE,facing = "clockwise",cex=0.8, font = 3)
circos.text(xcenter,ycenter/3,labels = CELL_META$sector.index, col = "floralwhite", font = 2)
},bg.border = NA)
circos.par(track.height=0.12)
circos.track(result$V1,x=result$x,ylim = c(0, 0.15),panel.fun = function(x, y){
circos.axis(h="top",direction = "inside" ,labels.facing = "clockwise",labels.cex = 0.5, labels.col = "grey17", col="grey17")
circos.lines(result[result$V1==CELL_META$sector.index,2],result[result$V1==CELL_META$sector.index,3], area = TRUE, type = 'l', col= ov_col[1],border = ov_col[2])
for(h in seq(0, 0.15, by = 0.05)) {
circos.lines(CELL_META$cell.xlim, c(h, h), lty = 3, col = "#AAAAAA")
}
}, bg.border = NA, bg.col=tr_col)
circos.par(track.height=0.05)
circos.track(result$V1,x=result$x,ylim = c(0,1),panel.fun = function(x, y) {
chr = CELL_META$sector.index
circos.rect(meta$start[meta$chr==chr], rep(0,length(meta$start[meta$chr==chr])), meta$end[meta$chr==chr], rep(1,length(meta$start[meta$chr==chr])), col = col_fun(meta$Wt[meta$chr==chr]), border=NA)
}, bg.border = ov_col[2], bg.col=ht[1])
}
f2 <- function(){
circos.par(start.degree=+60)
circos.par(track.height=0.10)
circos.par(track.margin=c(0,0))
circos.initialize(reg_lim$regions, x=reg_lim$x)
circos.track(c(all_counts$region,all_counts$region),x=c(all_counts$start,all_counts$end),ylim = c(0, 1), bg.col=ni[reg_lim$regions], track.index=1 ,panel.fun = function(x, y) {
reg = CELL_META$sector.index
xlim = CELL_META$xlim
circos.text(CELL_META$xcenter, y=CELL_META$ycenter, labels=paste0("SBI-",pp95$seqnames[which(pp95$regions==reg)]),niceFacing = TRUE,facing = "inside",cex=0.6, col = "grey17")
circos.axis(h="bottom",direction = "outside" ,labels.facing = "clockwise",major.at = reg_lim$x[reg_lim$regions==reg] ,labels = paste0(round(reg_lim$x[reg_lim$regions==reg]/10^6, digits = 2)),labels.cex = 0.7, labels.col = "grey17", col="grey17")
},bg.border = NA)
circos.par(track.height=0.15)
circos.track(c(all_counts$region,all_counts$region),x=c(all_counts$start,all_counts$end),ylim = c(0, 1),track.index=2 ,panel.fun = function(x, y) {
reg = CELL_META$sector.index
circos.rect(all_counts[all_counts$region==reg,2], 0, all_counts[all_counts$region==reg,3], 1, col = col_fun2(all_counts[all_counts$region==reg,7]), border=NA)
},bg.border = NA)
}
circos.nested(f1,f2,merge2, connection_height = mm_h(7), adjust_start_degree = FALSE, connection_col = ni[merge2[[4]]], connection_border = NA)
|
5f974126065a65bce53c653206e0d2ff982319ab | 8d4dfa8b6c11e319fb44e578f756f0fa6aef4051 | /man/getIdLevelQvals.Rd | 5b351199494f2eac9c03758b26ed650b51720ce2 | [] | no_license | eahrne/SafeQuant | ce2ace309936b5fc2b076b3daf5d17b3168227db | 01d8e2912864f73606feeea15d01ffe1a4a9812e | refs/heads/master | 2021-06-13T02:10:58.866232 | 2020-04-14T10:01:43 | 2020-04-14T10:01:43 | 4,616,125 | 4 | 4 | null | 2015-11-03T20:12:03 | 2012-06-10T15:35:25 | R | UTF-8 | R | false | true | 713 | rd | getIdLevelQvals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IdentificationAnalysis.R
\name{getIdLevelQvals}
\alias{getIdLevelQvals}
\title{Calculates identification level q-values based on target-decoy score distributions}
\usage{
getIdLevelQvals(scores, isDecoy)
}
\arguments{
\item{scores}{peptide/protein identficationscore}
\item{isDecoy}{vector of TRUE/FALSE}
}
\value{
vector of q.values
}
\description{
Calculates identification level q-values based on target-decoy score distributions
}
\details{
q-value = (Nb. Decoy Entries at idScore Threshold S*) / (Nb. Target Entries at idScore Threshold S). (* idScore >= S)
}
\note{
No note
}
\examples{
print("No examples")
}
\references{
NA
}
|
36687bd72b07261a0d4de96f63c8ba50b40eca94 | faa2f0bc274f3d7708a010510b281932996dae6a | /data-raw/data_acquisition.R | b6720d06d00436ede90137b30c0361f432549985 | [
"MIT"
] | permissive | martijnvanattekum/cleanse | 3628ce0113e9cf3228f473bf4a0af4081761f16c | 092d695e30399d80dc088599fe30f8ab58396622 | refs/heads/master | 2023-08-16T23:05:25.261398 | 2023-08-16T08:05:32 | 2023-08-16T08:05:32 | 191,123,816 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,645 | r | data_acquisition.R | # data were downloaded from https://depmap.org/portal/download/. The processing below of each data source
# should yield similar data structures, with variable "value" representing the final data returned to the user
# data are in long format; a sample data frame can be found in the file sample.csv
library(devtools)
library(tidyverse)
library(DESeq2)
#####################################################################
######################### INTERNAL DATA #############################
#####################################################################
#save internal data
#usethis::use_data(#, internal = TRUE, overwrite = TRUE)
#####################################################################
########################### USER DATA ###############################
#####################################################################
#example se
set.seed(15)
expr <- matrix(runif(480,0,25), ncol = 48)
cn <- matrix(runif(480,1,5), ncol = 48)
genes <- data.frame(gene_group = rep(c("IL", "NOTCH", "TLR"), c(5,3,2)),
gene_name = c("IL1R1", "IL1R2", "IL2RA", "IL2RB", "IL2RG", "DLL1", "DLL3", "JAG1", "TLR1", "TLR2"))
samples <- expand.grid(patient = 1:4,
site = c("skin", "brain", "liver"),
time = c(0,4),
treatment = c("A", "B"))
seq_se <- SummarizedExperiment(assays = list(expression = expr, copy_number = cn),
rowData = genes,
colData = samples)
# SAVE USER DATA ----------------------------------------------------------
usethis::use_data(seq_se, overwrite = TRUE)
|
c83542b3ede764adfb1c997d4f85d5fd0d5be752 | 2d5553595d54c42907645f0fbe19fb6fef41b098 | /week-2/complete.R | 96d4ee158a352f8d74a1bd447299cb0a9d5a8f6c | [] | no_license | gaocegege/rprog-in-coursera | ee2407955006d0e7848df356bf7c8cd42e155791 | 9ed40de7ae8fccf0abeaccff6381edadd76f9912 | refs/heads/master | 2016-09-02T05:49:03.407521 | 2014-12-07T03:39:00 | 2014-12-07T03:39:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | complete.R | complete <- function(directory, id = 1:332) {
## 'directory' 是长度为1的字符向量,指明
## CSV 文件的位置
directory <- as.character(directory)
## 'id' 是正整数向量,指明监测点的ID号,
## 将要被使用的
id <- as.integer(id)
## calculate
filenameArray <- paste("./", directory, "/", formatC(id, width=3, flag="0"), ".csv", sep="")
data <- data.frame(id = id, nobs = numeric(length(id)))
for (i in seq_along(filenameArray)) {
buf <- read.csv(filenameArray[i])
data[i,"nobs"] <- sum(!is.na(buf[["sulfate"]]) & !is.na(buf[["nitrate"]]))
}
## 返回以下格式的数据帧:
## id nobs
## 1 117
## 2 1041
## ...
## 其中'id' 是监测点ID编号,而'nobs'是
## 完整案例的数量
data
} |
9f65008e01e72ea68747d56155a9d5b4f5344622 | 92343742887e2957b7d6d2b1f42c70f93d7deece | /IMPUTACION.R | 1cdac6fb4324e79c85c35358aa9090fe58360505 | [] | no_license | GITInformacion/Dps-Scripts | 3d8bd7a4d69c396381be92dd485c08e360057571 | 0f9229a77816bdea9232bcbc9e307dbea2e6bfb3 | refs/heads/main | 2023-01-01T00:09:33.640391 | 2020-10-21T15:03:09 | 2020-10-21T15:03:09 | 305,685,667 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,808 | r | IMPUTACION.R | ###########################
# IMPUTACION #
# #
# Andres Romero Parra #
###########################
library("ggplot2", lib.loc="~/R/win-library/3.4")
library("readxl", lib.loc="~/R/win-library/3.4")
library("dplyr", lib.loc="~/R/win-library/3.4")
library("tidyverse", lib.loc="~/R/win-library/3.4")
library("viridisLite", lib.loc="~/R/win-library/3.4")
library("tidyr", lib.loc="~/R/win-library/3.4")
library("ggthemes", lib.loc="~/R/win-library/3.4")
rm(list = ls())
setwd("~/Datos/ciclo2/DATOS INICIALES/datos_04062018")
Archivo_tabla_de_hogares_con_error_1_ <- read_excel("Datos/PQRS/Revisión consistencia logros/Sin dato/Archivo tabla de hogares con error (1).xlsx")
b<-setdiff(Archivo_tabla_de_hogares_con_error_1_$idIntegranteHogar, Integrantes.Hogares_Acumulado$ID_INTEGRANTE)
c<-setdiff(Archivo_tabla_de_hogares_con_error_1_$idHogar, Integrantes.Hogares_Acumulado$X.U.FEFF.HOGAR)
d<-intersect(Archivo_tabla_de_hogares_con_error_1_$idHogar, Integrantes.Hogares_Acumulado$X.U.FEFF.HOGAR)
Entrega_Hogares_Acumulado_2 <-Entrega_Hogares_Acumulado[Entrega_Hogares_Acumulado$logro19=="SIN DATO",]#Hogares sin dato en el logro 19
Integrantes.Hogares_Acumulado_2<-Integrantes.Hogares_Acumulado[Integrantes.Hogares_Acumulado$X.U.FEFF.HOGAR %in% Entrega_Hogares_Acumulado_2$X.U.FEFF.HOGAR,]#Integrantes de los hogares sin dato en el logro 19
#Bases sin missing values
campo_prueba <- Archivo_tabla_de_hogares_con_error_1_[!(Archivo_tabla_de_hogares_con_error_1_$idIntegranteHogar %in% b),4]
campo_prueba <- Archivo_tabla_de_hogares_con_error_1_[Archivo_tabla_de_hogares_con_error_1_$campo=="I05",]
Integrantes.Hogares_Acumulado_1$vacio<- as.factor(ifelse(Integrantes.Hogares_Acumulado_1$ID_INTEGRANTE %in% campo_prueba,1,0))
|
3abc776b67597c36878f482017fe88f73b2e2f00 | 7cd5ec4afdf109a8f13da0269180a573afc4da19 | /man/get_circle.Rd | 4749c2731e169490183713eb6cbf61870b6b85aa | [] | no_license | SimonCoulombe/cangetpop | c37e47c116bb02963c02a5fbcafd03b2a1b54ef3 | 71c6a7c525821244a865d0bfa7d154e82aed5068 | refs/heads/master | 2023-07-16T15:48:32.955117 | 2021-09-01T12:51:44 | 2021-09-01T12:51:44 | 386,512,668 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 379 | rd | get_circle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_utm_crs.R
\name{get_circle}
\alias{get_circle}
\title{get_circle returns a circle of radius "radius" kilometers around lat/lon}
\usage{
get_circle(lat, lon, radius)
}
\arguments{
\item{radius}{}
}
\value{
}
\description{
get_circle returns a circle of radius "radius" kilometers around lat/lon
}
|
d27eaed1eecb9428c1b96befa10af780ae0f7b95 | ded254a14f62f6705715c719f126ff6b557a661e | /R/run_app_ui.R | 58f170af05c02d2f867f994bdf83fa5c2cc768ab | [] | no_license | pedroivo000/lambdaPrimeR | 9a2269fc967242ed79b3a032e0c2cb1b33db1898 | 0a40261fabea18075fc953a2c62e26620c57b65a | refs/heads/master | 2021-06-04T07:57:45.801757 | 2020-01-20T05:34:36 | 2020-01-20T05:34:36 | 119,300,044 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 392 | r | run_app_ui.R | #' Open lambdaPrimeR Shiny user-interface
#'
#' @return Opens Shiny app on a new browser window
#' @export
#'
run_app_ui <- function() {
app_dir <- system.file("lambdaPrimeRui", "shiny_app", package = 'lambdaPrimeR')
if(app_dir == "") {
stop("Could not find Shiny app directory. Try re-installing `lambdaPrimeR`.", call. = F)
}
shiny::runApp(app_dir, display.mode = 'normal')
} |
59b53b076c31775d52c04e390f25d22eb0d48b63 | 898886e1247e9a00868e07b4fae0c16daff520f6 | /Basic Statistics_Level 1/Q9_b.R | a25faaaaca3ce2be5d66dfd4e1144f2d9d326328 | [] | no_license | ShahajiJadhav/R-Codes-For-Data-Science | 56492b71c4082828d7792be25179c0b30a5b82cf | 8eea86faa209ef55e946efd20073be933413256b | refs/heads/master | 2022-09-26T01:21:44.994065 | 2020-06-09T10:02:23 | 2020-06-09T10:02:23 | 269,337,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 547 | r | Q9_b.R | #Reading DWTa
Q9 <- read.csv(file.choose(),header = TRUE,sep = ",", stringsAsFactors = TRUE)
Q9 <- Q9[-1]
View(Q9)
names(Q9)#"SP" "WT"
plot(Q9)
attach(Q9)
#Ploting
plot(SP)
plot(SP, type = 'o',xlab ="Frequancy",ylab = 'SP', main = 'SP')
boxplot(SP)
boxplot(SP, horizontal = T, col = "orange")
hist(SP)
hist(SP, col = "blue", border = T, breaks = 15,labels = T, density = 7)
#WT
plot(WT, xlab ="Frequancy",ylab = 'WT', main = 'WT')
boxplot(WT)
hist(WT, border = F, breaks =10, labels = T, main = "HIST WT")
plot( WT, SP)
|
57c81aa5aa077e5861cba7cb7c2f207f2ee90eed | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/ggip/tests/testthat/test-geom-hilbert-outline.R | b3f0a49413799b85e29448c3ed7dd7b5d62fbd57 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,726 | r | test-geom-hilbert-outline.R | test_that("input validation", {
address_data <- data.frame(address = ip_address("0.0.0.0"))
network_data <- data.frame(network = ip_network("0.0.0.0/16"))
expect_error(
print(ggplot(network_data) + geom_hilbert_outline(aes(ip = network))),
class = "ggip_error_missing_coord"
)
expect_error(
print(ggplot(address_data) + coord_ip() + geom_hilbert_outline(aes(ip = address))),
class = "ggip_error_bad_aes_type"
)
expect_error(
print(ggplot(network_data) + coord_ip(curve = "morton") + geom_hilbert_outline(aes(ip = network))),
'`geom_hilbert_outline()` requires `coord_ip(curve = "hilbert")`.',
fixed = TRUE
)
})
test_that("alternative ways to specify data/aesthetics", {
dat <- data.frame(
ip = ip_network(c("0.0.0.0/2", "128.0.0.0/4"))
)
p1 <- ggplot() +
coord_ip() +
geom_hilbert_outline(aes(ip = ip), data = dat)
p2 <- ggplot(dat) +
coord_ip() +
geom_hilbert_outline(aes(ip = ip))
p3 <- ggplot(dat, aes(ip = ip)) +
coord_ip() +
geom_hilbert_outline()
g1 <- layer_grob(p1)[[1]]
g2 <- layer_grob(p2)[[1]]
g3 <- layer_grob(p3)[[1]]
expect_s3_class(g1, "segments")
expect_s3_class(g2, "segments")
expect_s3_class(g3, "segments")
expect_equal(g1$x0, g2$x0)
expect_equal(g1$x0, g3$x0)
expect_equal(g1$y0, g2$y0)
expect_equal(g1$y0, g3$y0)
expect_equal(g1$x1, g2$x1)
expect_equal(g1$x1, g3$x1)
expect_equal(g1$y1, g2$y1)
expect_equal(g1$y1, g3$y1)
})
test_that("works without data", {
p <- ggplot() + coord_ip() + geom_hilbert_outline()
g <- layer_grob(p)[[1]]
expect_s3_class(g, "segments")
})
test_that("validate drawn segments", {
expect_segments <- function(curve_order, closed) {
n_segments <- (2^curve_order + 1)^2
n_segments <- ifelse(closed, n_segments, n_segments - 2)
p <- ggplot() +
coord_ip() +
geom_hilbert_outline(curve_order = curve_order, closed = closed)
g <- layer_grob(p)[[1]]
expect_length(g$x0, n_segments)
}
expect_segments(1, FALSE)
expect_segments(2, TRUE)
expect_segments(3, FALSE)
expect_segments(4, TRUE)
})
test_that("networks outside 2D grid raise warning", {
dat <- data.frame(ip = ip_network("128.0.0.0/4"))
p <- ggplot(dat, aes(ip = ip)) +
coord_ip(canvas_network = ip_network("0.0.0.0/2"))
expect_warning(layer_grob(p + geom_hilbert_outline()))
expect_silent(layer_grob(p + geom_hilbert_outline(na.rm = TRUE)))
})
test_that("networks without outline are silently ignored", {
dat <- data.frame(ip = ip_network("128.0.0.0/4"))
p <- ggplot(dat, aes(ip = ip)) +
coord_ip() +
geom_hilbert_outline(curve_order = 2)
expect_silent(layer_grob(p))
expect_s3_class(layer_grob(p)[[1]], "zeroGrob")
})
|
3b6bbaa04e08c8af4364fee6cbe21a16cc604917 | 7bed3886e5258d7a0a36f509d762b7859ed63732 | /R/import.R | 45258a7bd440fd707dcb9bd6e8e9c694210a33aa | [] | no_license | JonasGlatthorn/APAtree | a584bd72e35414deea564aea1e6a901ca35e9190 | 383cd9fb95a8396a66a61ae1dae75a962b54df97 | refs/heads/main | 2023-04-19T03:25:58.997681 | 2021-08-20T13:01:47 | 2021-08-20T13:01:47 | 394,584,651 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 142 | r | import.R | # Importing the most frequently used functions of other packges:
#' @useDynLib APAtree
#' @importFrom Rcpp sourceCpp
#' @import sf raster
NULL |
e3b22f06af0c991a6eebebb92f8d9694c600fe37 | df7a77bdbcedb1d272c5a2346cacb57c73b6e846 | /r_modules/analysisPackages/GeneSetTTest/tests/runTests.R | 1ff794707dacc5e3b35646d9fc35457fb58560bd | [
"MIT"
] | permissive | pablopunk/Oncoscape | 72200b482fc0e2350fc361e9b1f1f784fa28c11b | d96276931c45b121a49642a0f713d509a0376233 | refs/heads/develop | 2021-01-21T17:50:48.605435 | 2016-04-05T11:28:32 | 2016-04-05T11:28:32 | 55,501,595 | 0 | 0 | MIT | 2020-04-28T20:43:09 | 2016-04-05T11:15:45 | C++ | UTF-8 | R | false | false | 118 | r | runTests.R | require("GeneSetTTests") || stop("unable to load GeneSetTTests package")
BiocGenerics:::.testPackage("GeneSetTTests")
|
36624fbe14c0c6a412fe53ec6112d4c00cd40a0c | 279049e52fedc26f8a5d47eae470c42ab1cb5d2c | /man/par_discretenum.Rd | 40a216943b6f58905b88bf3d69812d76a001442b | [] | no_license | CollinErickson/comparer | 5e947b63634e03c21a86f4d7fd76aba08dd9783a | b8340a4d165bf22a00d0d7847aa92601edb51e96 | refs/heads/master | 2023-03-06T06:33:25.266036 | 2023-02-20T18:57:59 | 2023-02-20T18:57:59 | 94,040,471 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 542 | rd | par_discretenum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hype_par_discretenum.R
\name{par_discretenum}
\alias{par_discretenum}
\title{Parameter with uniform distribution for hyperparameter optimization}
\usage{
par_discretenum(name, values)
}
\arguments{
\item{name}{Name of the parameter, must match the input to `eval_func`.}
\item{values}{Values, discrete numeric}
}
\description{
Parameter with uniform distribution for hyperparameter optimization
}
\examples{
p1 <- par_discretenum('x1', 0:2)
class(p1)
print(p1)
}
|
533103d8fe5abcc50a4e888dd63943db33629061 | 7ab1cf068cc836549a001219a93a7305eb33297e | /R/02_Evaluate_Projection.R | 712e1f99582a15a1db0b55cda819423ddeb8f683 | [
"MIT"
] | permissive | davnovak/SingleBench | 16912eab32f18afd68b992d219166873f7c7efa4 | 34751d16e85605e197153eec62ce69018ff33b71 | refs/heads/main | 2023-05-29T02:53:18.052308 | 2021-06-08T12:30:54 | 2021-06-08T12:30:54 | 368,499,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,081 | r | 02_Evaluate_Projection.R |
EvalProjection <- function(
benchmark, verbose
) {
seed <- benchmark$seed.projection
## Iterate over subpipelines
purrr::walk(
seq_len(benchmark$n_subpipelines), function(idx.subpipeline) {
subpipeline_name <- GetSubpipelineName(benchmark, idx.subpipeline)
proj <- benchmark$subpipelines[[idx.subpipeline]]$projection
if (!is.null(proj)) {
if (verbose) { .msg('Evaluating subpipeline '); .msg_alt(idx.subpipeline); .msg(' of ' ); .msg_alt(benchmark$n_subpipelines); .msg('\n') }
cloned <- IsClone(proj)
if (cloned)
proj <- benchmark$subpipelines[[proj$ref]]$projection
## See if there is an n-parameter and get the parameter value range if there is
n_param_values <- benchmark$n_params[[idx.subpipeline]]$projection
n_param_range <- seq_along(n_param_values)
if (length(n_param_range) == 0)
n_param_range <- 'NoNParameter'
if (IsClone(proj) && n_param_range == 'NoNParameter') {
## If this projection step was used before and there is no n-parameter, write a reference to the result
if (verbose) { .msg('\t-> cloning projection result from sub-pipeline'); .msg_alt(proj$ref); .msg('\n') }
.h5writeProjectionReference(benchmark, idx.subpipeline = idx.subpipeline, idx.subpipeline_ref = proj$ref)
} else {
if (IsClone(proj) && n_param_range != 'NoNParameter') {
## If this projection step was used before and there is an n-parameter, check if there are n-parameter values for which we already have the result
proj_result_locations <-
purrr::map(
seq_along(n_param_values),
function(idx)
FindProjectionResultIfAlreadyGenerated(
benchmark,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx,
n_param = n_param_values[idx]
)
)
which_proj_results_not_available_already <-
purrr::map_lgl(proj_result_locations, function(x) is.null(x))
## Write reference to those n-parameter iteration results that we already have
for (idx_res in seq_along(proj_result_locations)) {
loc <- proj_result_locations[[idx_res]]
if (!is.null(loc)) {
.h5writeProjectionReference(
benchmark,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx_res,
idx.subpipeline_ref = loc$idx.subpipeline,
idx.n_param_ref = loc$idx.n_param
)
}
}
## Only those n-parameter iterations that have not been evaluated yet will be evaluated in this parameter sweep
n_param_range <- n_param_range[which_proj_results_not_available_already]
}
if (length(n_param_range) > 0) { # if there are any unevaluated n-parameter iterations left...
## Get model-building functions
train <- fTrain.ModuleChain(proj)
extract <- fExtract.ModuleChain(proj)
map <- fMap.ModuleChain(proj)
## Retrieve input expression data (and k-NNG if needed)
exprs <- GetExpressionMatrix(benchmark)
knn <- if (proj$uses_knn_graph) GetkNNMatrix(benchmark) else NULL
## Iterate over n-parameter values
purrr::walk(
n_param_range,
function(idx.n_param) {
if (idx.n_param != 'NoNParameter' && is.na(n_param_values[idx.n_param])) {
## Skip the projection step if n-parameter value is set to NA
if (verbose) { .msg('\t-> omitting projection step (since n-parameter value set to NA)\n') }
.h5writeProjectionReference(
benchmark,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx.n_param,
idx.subpipeline_ref = 0,
idx.n_param_ref = NA
)
} else {
if (verbose) { .msg('\t-> evaluating projection step: '); .msg_alt(GetNParameterIterationName_Projection(benchmark, idx.subpipeline, idx.n_param)) }
if (idx.n_param != 'NoNParameter') {
## Check if n-parameter iteration was evaluated already in *this* subpipeline
loc <- FindProjectionResultIfAlreadyGenerated(
benchmark,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx.n_param,
n_param = n_param_values[idx.n_param]
)
if (!is.null(loc)) {
## Write reference to previous n-parameter iteration if it has
.h5writeProjectionReference(
benchmark,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx.n_param,
idx.subpipeline_ref = loc$idx.subpipeline,
idx.n_param_ref = loc$idx.n_param
)
if (verbose) { .msg_alt_good(' cloned from subpipeline ', loc$idx.subpipeline, ' n-param iteration ', loc$idx.n_param, '\n') }
}
}
if (
idx.n_param == 'NoNParameter' ||
(idx.n_param != 'NoNParameter' && is.null(loc))
) { # if the result has not not been generated before in this nor another subpipeline...
# ...get n-parameter value
n_param <- if (idx.n_param == 'NoNParameter') NULL else n_param_values[idx.n_param]
if (idx.n_param == 'NoNParameter') idx.n_param <- NULL
# ...get copy of expression data if needed
this_exprs <- if (proj$uses_original_expression_matrix) exprs else NULL
# ...deploy the projection tool
res <- DeployProjection(exprs, train, extract, map, seed, benchmark$projection.training_set, knn, this_exprs, n_param, benchmark$h5_path, idx.subpipeline = idx.subpipeline, idx.n_param = idx.n_param)
if (verbose) .msg_alt_good(' done in ', round(res$Timing, 2), ' seconds\n')
# ...if there were separate samples on input, separate out the projection also
res$Projection <- SeparateIntoSamples(res$Projection, benchmark)
# ...write the projection result
.h5writeProjectionResult(res, benchmark, idx.subpipeline, idx.n_param)
# ...score the projection result if needed
if (benchmark$score_projections) {
distmat <- if (benchmark$row_count <= benchmark$projection_collapse_n) GetDistanceMatrix(benchmark) else NULL
knn <- if (benchmark$row_count > benchmark$projection_collapse_n) GetkNNMatrix(benchmark) else NULL
scores <- ScoreProjection(exprs, res, distmat, benchmark$projection_collapse_n, benchmark$projection_neighbourhood, knn, benchmark$knn.k, benchmark$knn.algorithm, benchmark$knn.distance, verbose)
.h5writeProjectionScoring(scores, benchmark, idx.subpipeline, idx.n_param)
}
}
}
}
)
} # endif length(n_param_range) > 0
}
} # endif !is.null(proj)
}
)
invisible(benchmark)
}
DeployProjection <- function(
input, fTrain, fExtract, fMap, seed, idcs_training, knn, exprs, n_param, h5_path = NULL, idx.subpipeline = NULL, idx.n_param = NULL, out.intermediates = NULL
) {
systime <- system.time({
set.seed(seed)
intermediates <- NA
res <-
fTrain(
input = if (is.null(idcs_training)) input else input[idcs_training],
n_param = n_param,
knn = knn,
exprs = exprs,
save_intermediates = TRUE,
h5_path = h5_path,
idx.subpipeline = idx.subpipeline,
idx.n_param = idx.n_param,
out.intermediates = if (!is.null(out.intermediates)) intermediates else NULL
)
if (!is.null(out.intermediates))
eval.parent(substitute(out.intermediates <- intermediates))
res <- if (is.null(idcs_training)) fExtract(res) else fMap(res, input)
})
colnames(res) <- paste0('component_', seq_len(ncol(res)))
list(Projection = res, Timing = systime['elapsed'])
}
|
0a651c7b8dcef7e5016440932b060fb787480d1c | 483018f5af9ba30b8c26add9168f85052891f1fc | /homework1.R | 33b8b319cb5eff20f03c44f46d1d581bd1556a51 | [] | no_license | hscarter/QMECgitrepo | d1682272275d6cbf38bd8c15cd87a03502f91f70 | 2697fa48c7122e4a13ff3574cf28735b5a56124a | refs/heads/master | 2020-12-11T21:21:15.274185 | 2020-01-31T18:00:21 | 2020-01-31T18:00:21 | 233,962,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,568 | r | homework1.R | #Homework Assignment 1
#by Haley Carter
#I am not very good at shuffling cards generally and I wanted to see if it made a difference
#which hand I used to initiate the shuffle. The subject is my shuffling, the treatments are
#starting with my right hand or my left hand. Treatment assignment was done randomly
#(see code below). And the response was a successful shuffle (all cards could be pushed together)
#or an unsuccessfull shuffle (cards could not be combined after shuffling attempt).
#make dataframe
trial <- c(1:50) #there will be 50 trials
trial <- sample(trial, 50) #random assigment requires randomly selecting from trial number
assignment <- rep(T:F, each = 25) #first 25 randomly sampled trial numbers assigned treatment 1
#treatment 1 is starting the shuffle with my right hand
df <- data.frame(trial = trial, assignment = assignment) #combine them
df <- df[order(df$trial),] #sort in order of trial number
#record data, 1 means successfull shuffle
results <- c(
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
0,
1
)
#add results to the dataframe
df$results <- results
#save the data
write.csv(df, "hw1_data.csv") #I'm working from a git repo so also saves online
#read this data in remotely from github
hw1_data <- read.csv("https://raw.githubusercontent.com/hscarter/QMECgitrepo/master/hw1_data.csv")
#how did I do?
table(df$assignment, df$results)
|
b4544381d51485ab21c1802231fd637f22a75159 | e2f5c377b0507310f9fe4233fc816d660b73dc1b | /man/testDesignShiny.Rd | f4be98e709140a8b865ba6a37f52f75f655de01e | [] | no_license | SpencerButt/IDPS-LAAD | 6e79d8f3f71e400c51c23c1b6fba4fe7b1d54c56 | 3348e0ba32056637dba72dcb595d1e0eaf69954a | refs/heads/master | 2022-04-22T10:06:53.566404 | 2020-04-20T15:06:38 | 2020-04-20T15:06:38 | 117,839,416 | 0 | 4 | null | 2020-04-20T15:00:28 | 2018-01-17T13:23:35 | R | UTF-8 | R | false | true | 2,270 | rd | testDesignShiny.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rShinyDatasetDocumentation.R
\docType{data}
\name{testDesignShiny}
\alias{testDesignShiny}
\title{Default hyperparameter experimental test design for a single-hidden layer undercomplete
autoencoder neural netowrk.}
\format{A dataset with 600 rows and 12 features; 9-test design features, 1-response
variable feature, and 1-design point index feature
\itemize{
\item{\strong{Subset_Split: }}{Categorical 3-level factor; describes the subset to use
in the Autoencoder}
\item{\strong{Design_Point: }}{An index of each test point}
\item{\strong{Activatino_Function: }}{Categroical 2-level factor; describes the activation
function within each neuron in the neural network}
\item{\strong{Input_DO_Rate: }}{Continuous multi-level factor; describes the Dropout
rate of the input layer neurons}
\item{\strong{Hidden_DO_Rate: }}{Continuous multi-level factor; describes the Dropout
rate of the hidden layer neurons}
\item{\strong{Initial_Weight_Distribution: }}{Categorical 3-level factor; describes the
distribution by which the initial weights of the autoencoder neural network
are generated}
\item{\strong{Data_Scale: }}{Categorical 3-level factor; describes the range to which
the training and test subset data is scaled.}
\item{\strong{Rho: }}{Continuous multi-level factor; describes the value of the rho parameter
for the ADADELTA learning procedure}
\item{\strong{Epsilon: }}{Continuous multi-level factor; describes the value of the epsilon
parameter for the ADADELTA learning procedure}
\item{\strong{Shuffle_Train_Data: }}{Categorical 2-level factor; boolean value indicating
if the training data should be randomly shuffled during neural network training}
\item{\strong{Y: }}{Placeholder for the response value measurement}
}}
\usage{
testDesignShiny
}
\description{
Default hyperparameter experimental test design for a single-hidden layer undercomplete
autoencoder neural netowrk.
}
\section{Information}{
A .rda dataset containing the factors and levels of 600-test trial designed experiment
generated in JMP Pro v12.1. The designed experiment is a flexible space filling design
for 9 test design factors.
}
\keyword{datasets}
|
67fc409362f8a771b1bc90baf29e827d7a967e57 | 8bfb07d4492bf8901070e678846991f497794199 | /man/fit_sncm.Rd | b3e8d95d3ffdf2f206365e5d80dd7a33fbe83444 | [] | no_license | DanielSprockett/reltools | 5b38577e1a3257af40d47c19c39778a32214107d | f693109c03ebd6944fdb42b7bff29d18cb49341b | refs/heads/master | 2021-08-23T23:04:48.558234 | 2017-12-07T00:49:13 | 2017-12-07T00:49:13 | 113,137,957 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,586 | rd | fit_sncm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeling_tools.R
\name{fit_sncm}
\alias{fit_sncm}
\title{Fits \code{sncm} to an OTU table}
\usage{
fit_sncm(spp, pool = NULL, taxon = NULL)
}
\arguments{
\item{spp}{A community table for communities of interest with local communities/samples as rows and taxa as columns.
All samples must be rarefied to the same depth.}
\item{pool}{(optional) A community table for defining source community.}
\item{taxon}{(optional) A table listing the taxonomic calls for each otu, with OTU ids as row names
and taxonomic classifications as columns.}
}
\value{
This function returns list of two elements.
The first element, spp.out$fitstats, contains various fitting stats.
The second element contains the predicted occurrence frequencies for each OTU/ASV, as well as their \code{fit_class}
}
\description{
Fits the neutral model from
\href{http://onlinelibrary.wiley.com/doi/10.1111/j.1462-2920.2005.00956.x/abstract}{Sloan \emph{et al.} (2006)}
to an OTU table and returns several fitting statistics as well as predicted occurrence frequencies for each OTU
based on their abundance in the metacommunity. The author of this function is Adam Burns (\email{aburns2@uoregon.edu}),
and was originally published in \href{https://www.nature.com/articles/ismej2015142}{Burns \emph{et al.} (2016)}.
}
\examples{
spp <- otu_table(ps)@.Data
spp2 <- otu_table(ps2)@.Data
spp.out <- fit_sncm(spp, pool=NULL, taxon=NULL)
spp.out <- fit_sncm(spp, pool=spp2, taxon=data.frame(tax_table(ps)))
}
\seealso{
\code{\link{plot_sncm_fit}}
}
|
8f226754a7e005d213e0fd51d5d2e79e6e011abd | 13e8bd394fc0b606ceb0cf73d0be8ef720910758 | /inst/ConfintLMApp.r | a9a09549204bb3ad04b2088096a9632b12d8b486 | [] | no_license | cran/lmviz | 7e6634a084a5469cf56505dae9c4577f0aba62b5 | ed9121c3f19c7112815107bcab1189c41f46db00 | refs/heads/master | 2021-07-04T08:41:16.480716 | 2020-08-24T19:40:02 | 2020-08-24T19:40:02 | 158,114,582 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,884 | r | ConfintLMApp.r | #library(shiny)
#library(shinyjs)
library(car)
ui <- fluidPage(
shinyjs::useShinyjs(),
withMathJax(),
# tags$img(width=300,height=50,src="deams.png"),
# tags$img(width=300,height=50,src="logoUnits2.jpg"),
h1("Confidence intervals for the coefficients"),
#tabsetPanel(
# tabPanel("a",
# Input functions
sidebarLayout(
sidebarPanel(
sliderInput("n","Number of observations",min=8,max=1000,value=25,step=1),
hr(),
p(strong("Model: \\(y_i=\\beta_1+\\beta_2x_i+\\varepsilon_i\\), \\(\\varepsilon_i\\thicksim IID(\\mathcal N(0,\\sigma^2))\\)")),
splitLayout(
numericInput("beta0","\\(\\beta_1\\)",0,-10,10,0.1,width="100%"),
numericInput("beta1","\\(\\beta_2\\)",1,-10,10,0.1,width="100%"),
numericInput("sdeverr","\\(\\sigma\\)",1,0,10,0.1,width="100%")
),
hr(),
p(strong("Explanatory variable")),
splitLayout(
numericInput("sdevx","\\(\\sqrt{V(x)}\\)",0.5,0,1,0.1,width="100%"),
numericInput("xbar","\\(\\bar{x}\\)",0,-3,3,0.1,width="100%")
),
checkboxInput("mostra","show estimates on plot"),
hr(),
actionButton("aggiorna","Draw Y"),
p("A new sample for Y is drawn based on the input parameters."),
actionButton("aggiorna2","Draw Y repeatedly"),
p("A new simulation will be drawn every 0.5/0.1 seconds"),
hr(),
p("Sample distributions and c.i. plots are reset if any parameter is changed."),
hr(),
actionButton("scarica","Download data"),
p("Saves data in temp.csv in the working directory"),
width=2),
mainPanel(
numericInput("livconf","Confidence level",0.95,0.5,0.999,0.001,width="100%"),
fluidRow(
column(width = 8,{
verticalLayout(
p("Below you see a realization of the linear model with the parameters you specified on the left panel.
The red line is the least squares (maximum likelihood) estimate."),
plotOutput("scatter",height="300px")
)
}),
column(width = 4,
p("The plot below represents the confidence intervals for \\(\\beta_1\\) and \\(\\beta_2\\) and the confidence region for the pair \\((\\beta_1,\\beta_2)\\)."),
plotOutput("stim", width = "100%", height="300px")
),
# column(width = 4,{
# verticalLayout(
# plotOutput("beta0ic", width = "100%", height="300px"),
# plotOutput("qp1plot", width = "100%", height="300px"),
# p(textOutput("beta0ictesto"))
# )
# }),
column(width = 6,{
verticalLayout(
p(textOutput("testolivconf")),
p(textOutput("testo2")),
plotOutput("beta1ic", width = "100%", height="300px"),
p(textOutput("beta1ictesto")),
p("The above c.i. are based on the fact that \\(T_2=\\frac{\\hat\\beta_2-\\beta_2}{\\hat{V}(\\hat{\\beta_2})}\\thicksim t_{n-2}\\) and so \\(P(t_{n-2,1-\\alpha/2}\\leq T_2 \\leq t_{n-2,1-\\alpha/2})=1-\\alpha\\)."),
p("in other words, the c.i. includes those values that would be accepted as null hypotheses in a t-test on \\(\\beta_2\\), this means that those c.i. that do not cover the true value correspond to samples where a null hypotheses on \\(\\beta_2\\) would reject the true value. Below we depict the test statistics for that hypothesis."),
plotOutput("qp2plot", width = "100%", height="300px")
)
}),
column(width = 6,{
verticalLayout(
p(textOutput("testolivconf2")),
p(textOutput("testo22")),
plotOutput("beta0beta1ic", width = "100%", height="300px"),
p(textOutput("beta01ictesto")),
p("The above c.r. are based on the fact that \\(F(\\beta)=\\frac{(\\hat\\beta-\\beta)^TX^TX(\\hat\\beta-\\beta)/(2n)}{\\hat\\sigma^2/(n-2)} \\thicksim F_{2,n-2} \\) and so \\(P(F(\\beta)\\leq F_{2,n-2,1-\\alpha})=1-\\alpha\\)."),
p("in other words, the c.r. includes those values that would be accepted as null hypotheses in a F-test on \\(\\beta_1,\\beta_2\\), this means that those c.r. that do not cover the true value correspond to samples where a null hypotheses on \\(\\beta_1,\\beta_2\\) would reject the true value. Below we depict the test statistics for that hypothesis."), plotOutput("Fplot", width = "100%", height="300px")
)
})
),
width=10)
)
)
nintf=function(x) {
if (length(x)<50) 5
if ((length(x)>=50) & (length(x)<500)) 20
if ((length(x)>=500)) 30
}
server <- function(input, output, session) {
# input$<id> available
# data <- reactive ({}) #respond to every value in input, to be used as data()
# output$<nomeoutput> <- render<TIPO>({ <codice> })
rv=reactiveValues()
rv$beta0.v=c()
rv$beta1.v=c()
rv$sigma.v=c()
rv$sd.v=c()
rv$ctrlellipse=c()
rv$qp=c()
rv$qpx=c()
rv$ftest=c()
rv$ftestx=c()
fit=reactive({fit=lm(y()~x())})
summfit=reactive({summary(fit())})
tempo=reactiveVal(30000000000000000)
observeEvent(input$aggiorna2,{
if ((input$aggiorna2 %% 3)==0){
tempo(30000000000000000)
updateActionButton(session, "aggiorna2",label="Draw Y repeatedly")
} else {
if ((input$aggiorna2 %% 3)==1){
tempo(500)
updateActionButton(session, "aggiorna2",label="Accelerate drawings")
} else {
tempo(50)
updateActionButton(session, "aggiorna2",label="Stop automatic drawings")
}
}
})
observe({
shinyjs::click("aggiorna")
invalidateLater(tempo())
})
observeEvent(y(),{
temp=rv$beta1.v
temp=c(temp,fit()$coefficients[2])
rv$beta1.v=temp
temp=rv$beta0.v
temp=c(temp,fit()$coefficients[1])
rv$beta0.v=temp
temp=rv$sigma.v
stimasigma=sum(fit()$resid^2)/(input$n-2)
temp=c(temp,stimasigma)
rv$sigma.v=temp
temp=rv$qp
temp=c(temp,(fit()$coefficients[2]-input$beta1)/(sum(fit()$resid^2)/(input$n-2)*sqrt(1/Dx())))
rv$qp=temp
temp=rv$qpx
temp=c(temp,runif(1))
rv$qpx=temp
temp=rv$ftest
fit1=lm(I(y()-input$beta0-input$beta1*x())~x())
fit0=lm(I(y()-input$beta0-input$beta1*x())~ -1)
aa=anova(fit0,fit1)$F[2]
vec=fit()$coefficients-c(input$beta0,input$beta1)
controllo=((vec %*% xtx() %*% vec)/(2*input$n))/(stimasigma/(input$n-2))
temp=c(temp,controllo)
rv$ftest=temp
temp=rv$ftestx
temp=c(temp,runif(1))
rv$ftestx=temp
})
observeEvent(input$n*input$beta0*input$beta1*input$sdeverr*input$sdevx*input$xbar,{
rv$beta1.v=c()
rv$beta0.v=c()
rv$sigma.v=c()
rv$sd.v=c()
rv$ctrlellipse=c()
rv$qp=c()
rv$qpx=c()
rv$ftest=c()
rv$ftestx=c()
})
observeEvent(input$scarica,{
#dati=data.frame(x=x(),y=y())
#.GlobalEnv$results.SimpleLM=dati
write.table(dati,sep=",",file=paste0(.wd,"/temp.csv"))
})
output$numsim=renderText(
paste("Sample distributions of estimators based on ",
length(rv$beta1.v),
"simulations; green: true values, gray: theorical distribution, triangle: mean of simulated estimates.")
)
output$numsim2=renderText(
paste("Sample distributions of estimators based on ",
length(rv$beta1.v),
"simulations; green: true values, gray: theorical distribution.")
)
output$numsim3=renderText(
paste("Sample distributions of pivotal quantities based on",
length(rv$beta1.v),
"simulations; green: theorical distribution.")
)
xtx=reactive({ t(cbind(1,x())) %*% cbind(1,x()) })
invxtx=reactive({ solve(xtx()) })
output$stim=renderPlot({
par(mar=c(5,4,0.5,0.5),cex=1)
ci=confint(fit(),level=input$livconf)
a=ellipse(c(input$beta0,input$beta1),shape=invxtx()[1:2,1:2],
radius=sqrt(2*qf(0.9999,2,input$n-3)*input$sdeverr^2))
lim2=range(input$beta0+6*c(-1,1)*input$sdeverr/sqrt(input$n),a[,1])
lim3=range(input$beta1+6*c(-1,1)*input$sdeverr/sqrt(input$n),a[,2])
plot(fit()$coef[1],fit()$coef[2],xlim=lim2,ylim=lim3,xaxs="i",yaxs="i",
xlab=expression(hat(beta)[1]),ylab=expression(hat(beta)[2]),las=1)
confidenceEllipse(fit(),which.coef=c(1,2),add=TRUE,col="darkred",level=input$livconf)
segments(ci[1,1],lim3[1],ci[1,2],lim3[1],lwd=5,col="darkred")
segments(lim2[1],ci[2,1],lim2[1],ci[2,2],lwd=5,col="darkred")
points(input$beta0,input$beta1,pch=20,cex=2,col="darkgreen")
segments(input$beta0,input$beta1,input$beta0,lim3[1],col="darkgreen",lty=2)
segments(input$beta0,input$beta1,lim2[1],input$beta1,col="darkgreen",lty=2)
})
x=eventReactive(input$n*input$sdevx*input$xbar,{
req(is.numeric(input$xbar) & is.numeric(input$sdevx) & (input$sdevx>0))
x=rnorm(input$n,0,1)
x=(x-mean(x))/sd(x)
x=input$xbar+input$sdevx*x
return(x)
})
Dx=reactive({sum((x()-input$xbar)^2)})
y=eventReactive(
input$aggiorna*input$n*input$beta0*input$beta1*input$sdeverr*input$sdevx*input$xbar, {
req(is.numeric(input$beta1) & is.numeric(input$beta0)& is.numeric(input$sdeverr) & (input$sdeverr>0))
#autoInvalidate()
y=input$beta0+input$beta1*x()+rnorm(input$n,0,input$sdeverr)
return(y)
},ignoreNULL = FALSE)
output$scatter=renderPlot({
par(mar=c(4,4,1,1))
if (input$mostra){
limx=c(min(c(0,1,x())),max(c(0,1.1,x())))
limy=range(c(input$beta0+input$beta1*(input$xbar+c(-1,1)*4*input$sdevx)+c(-1,1)*3*input$sdeverr,input$beta0+c(-1,1)*qnorm(0.95)*input$sdeverr*sqrt(1/input$n+mean(x())^2/Dx()),input$beta0+c(-1,1,-1,1)*qnorm(0.95)*input$sdeverr*sqrt(1/input$n+mean(x())^2/Dx())+input$beta1+c(-1,-1,1,1)*qnorm(0.95)*input$sdeverr*sqrt(1/Dx())))
} else {
limx=c(min(x()),max(x()))
limy=range(input$beta0+input$beta1*(input$xbar+c(-1,1)*4*input$sdevx))+c(-1,1)*3*input$sdeverr
}
a=plot(x(),y(),xlim=limx,
ylim=limy,las=1,xlab="x",ylab="y",las=1,pch=20,yaxt="n",bty="n")
axis(1,at=c(limx[1]-1,limx[2]+1),labels=c("",""))
axis(2,las=1,
at=c(limy[1]-4,limy[2]+4),
labels=c("",""))
axis(2,las=1,
at=c(limy,input$beta0),
labels=signif(c(limy,input$beta0),3))
rug(x())
text(ifelse(input$beta1>0,limx[1],limx[2]),limy[2],adj=c(0+(input$beta1<=0),1),col="darkred",
label=substitute(paste(hat(y),"=",b0,segno,b1,"x"," (est. reg. line)"),
list(b0=signif(fit()$coef[1],3),b1=signif(fit()$coef[2],3),r2=signif(summfit()$r.squared,3),segno=ifelse(fit()$coef[2]>0,"+","")))
)
text(ifelse(input$beta1>0,limx[1],limx[2]),limy[2]-0.1*(limy[2]-limy[1]),adj=c(0+(input$beta1<=0),1),col="darkgray",
label=substitute(paste(E(y),"=",b0,segno,b1,"x"," (true reg. line)"),
list(b0=signif(input$beta0,3),b1=signif(input$beta1,3),segno=ifelse(input$beta1>0,"+","")))
)
# expression(paste("Retta stimata:",hat(y),"=",signif(fit()$coef[1],3)," + ",signif(fit()$coef[2],3),"x")))
curve(input$beta0+input$beta1*x,col=gray(0.5),lwd=2,add=TRUE)
curve(fit()$coef[1]+fit()$coef[2]*x,col="darkred",lwd=2,add=TRUE)
if (input$mostra){
par(xpd=NA)
text(0,fit()$coef[1],adj=c(fit()$coef[2]>=0,0),
labels=expression(hat(beta)[1]),col="darkgreen",cex=1.5)
text(1,mean(c(fit()$coef[1],fit()$coef[1]+fit()$coef[2])),pos=4,labels=expression(hat(beta)[2]),col="darkgreen",cex=1.5)
par(xpd=FALSE)
points(0,fit()$coef[1],pch=20,cex=1.5,col="darkgreen")
abline(v=c(0,1),lty=2)
segments(1,fit()$coef[1],1,fit()$coef[1]+fit()$coef[2],lwd=2,col="darkgreen")
segments(0,fit()$coef[1],1,fit()$coef[1],lty=2)
}
})
output$beta0ic=renderPlot({
req(length(rv$beta1.v)>0)
par(mar=c(5,1,1,1))
plot(rv$beta0.v,1:length(rv$beta1.v),
bty="n",
yaxt="n",
xaxt="n",
xlim=c(input$beta0-2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/input$n+mean(input$xbar)^2/(input$n*input$sdevx^2)),
input$beta0+2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/input$n+mean(input$xbar)^2/(input$n*input$sdevx^2))),
ylim=c(0,max(100,length(rv$beta1.v))),
xlab=expression(hat(beta)[1]))
axis(1,at=signif(c(input$beta0-4*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/input$n+mean(x())^2/Dx()),
input$beta0+c(-1,1,-0.5,0.5)*2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/input$n+mean(x())^2/Dx()),
input$beta0,
input$beta0+4*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/input$n+mean(x())^2/Dx())),2))
abline(v=input$beta0,col=gray(0.8),lwd=2)
segments(rv$beta0.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()),
1:length(rv$beta0.v),
rv$beta0.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()),
col=c("black","darkred")[1+
((rv$beta0.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))>input$beta0)+((rv$beta0.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))<input$beta0)
])
})
output$beta0ictesto=renderText({
req(length(rv$beta1.v)>0)
paste0(sum(((rv$beta0.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))>input$beta0)+((rv$beta0.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))<input$beta0)),
" out of ",
length(rv$beta1.v)," intervals do not include the true value (",
round(100*sum(((rv$beta0.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))>input$beta0)+((rv$beta0.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))<input$beta0))/length(rv$beta1.v),1),"%), their average length is approximately",signif(mean(2*qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx())),3))
})
output$beta1ic=renderPlot({
req(length(rv$beta1.v)>0)
par(mar=c(5,1,1,1))
plot(rv$beta1.v,1:length(rv$beta1.v),
bty="n",
yaxt="n",
xaxt="n",
xlim=c(input$beta1-2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/(input$n*input$sdevx^2)),
input$beta1+2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/(input$n*input$sdevx^2))),
ylim=c(0,max(100,length(rv$beta1.v))),
xlab=expression(hat(beta)[2]))
axis(1,at=signif(c(input$beta1-4*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/Dx()),
input$beta1+c(-1,1,-0.5,0.5)*2*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/Dx()),
input$beta1,
input$beta1+4*qnorm(1-(1-input$livconf)/5)*input$sdeverr*sqrt(1/Dx())),2))
abline(v=input$beta1,col=gray(0.8),lwd=2)
segments(rv$beta1.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()),
1:length(rv$beta1.v),
rv$beta1.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()),
col=c("black","darkred")[1+
((rv$beta1.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))>input$beta1)+((rv$beta1.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))<input$beta1)
])
})
output$beta1ictesto=renderText({
req(length(rv$beta1.v)>0)
paste(sum(((rv$beta1.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))>input$beta1)+((rv$beta1.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))<input$beta1)),
" out of ",
length(rv$beta1.v)," intervals do not include the true value (",
round(100*sum(((rv$beta1.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))>input$beta1)+((rv$beta1.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))<input$beta1))/length(rv$beta1.v),1),"%), their average length is approximately",signif(mean(2*qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx())),3))
})
output$beta01ictesto=renderText({
req(length(rv$beta1.v)>0)
paste0(length(rv$beta1.v)-rv$ctrlellipse,
" out of ",
length(rv$beta1.v)," ellipses do not include the true value (",
round(100*(length(rv$beta1.v)-rv$ctrlellipse)/length(rv$beta1.v),1),"%)")
})
output$testolivconf=renderText({
req(input$livconf)
paste0("The random interval for \\(\\beta_r\\) is built so that the probability that it includes \\(\\beta_r\\) is ",100*input$livconf,"%.")
})
output$testolivconf2=renderText({
req(input$livconf)
paste0("The random ellipse for \\((\\beta_1,\\beta_2)\\) is built so that the probability that it includes \\((\\beta_1,\\beta_2)\\) is ",100*input$livconf,"%.")
})
output$testo2=renderText({
req(rv$beta1.v)
paste0("Below we depict ",length(rv$beta1.v)," random confidence intervals, we expect that ",100*(1-input$livconf),"% of them (which is approximately ",round((1-input$livconf)*length(rv$beta1.v),0),") do not cover the true value.")
})
output$testo22=renderText({
req(rv$beta1.v)
paste0("Below we depict ",length(rv$beta1.v)," random confidence ellipses, we expect that ",100*(1-input$livconf),"% of them (which is approximately ",round((1-input$livconf)*length(rv$beta1.v),0),") do not cover the true value.")
})
# output$testoempirical=renderText({
# req(length(rv$beta1.v)>0)
# coverellipse=rv$ctrlellipse
# coverb1=length(rv$beta1.v)-sum(((rv$beta1.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))>input$beta1)+((rv$beta1.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/Dx()))<input$beta1))
# coverb0=length(rv$beta1.v)-sum(((rv$beta0.v-qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))>input$beta0)+((rv$beta0.v+qt(1-(1-input$livconf)/2,input$n-2)*sqrt(rv$sigma.v)*sqrt(1/input$n+mean(x())^2/Dx()))<input$beta0))
# })
output$beta1ictestoTeo=renderText({
paste0("The above c.i. are based on the fact that \\(T_2=\\frac{\\hat\\beta_2-\\beta_2}{\\hat{V}(\\hat{\\beta_2})}\\thicksim t_{",input$n-2,"}\\) and so \\(P(t_{",input$n-2,",",(1-input$livconf)/2,"}\\leq T_2 \\leq t_{",input$n-2,",",1-(1-input$livconf)/2,"})=",input$livconf,"\\).")
})
output$FtestoTeo=renderText({
paste0("The above c.r. are based on the fact that \\(F(\\beta)=\\frac{(\\hat\\beta-\\beta)^TX^TX(\\hat\\beta-\\beta)/(2n)}{\\hat\\sigma^2/(n-2)} \\thicksim F_{2,n-2} \\) and so \\(P(F(\\beta)\\leq F_{2,",input$n-2,"})=",input$livconf,"\\).")
})
output$beta0beta1ic=renderPlot({
req(length(rv$beta1.v)>0)
par(mar=c(5,4,0.5,0.5),cex=1)
ci=confint(fit(),level=input$livconf)
a=ellipse(c(input$beta0,input$beta1),shape=invxtx()[1:2,1:2],
radius=sqrt(2*qf(0.9999,2,input$n-3)*input$sdeverr^2))
lim2=range(input$beta0+6*c(-1,1)*input$sdeverr/sqrt(input$n),a[,1])
lim3=range(input$beta1+6*c(-1,1)*input$sdeverr/sqrt(input$n),a[,2])
plot(input$beta0,input$beta1,xlim=lim2,ylim=lim3,xaxs="i",yaxs="i",
xlab=expression(hat(beta)[1]),ylab=expression(hat(beta)[2]),las=1)
#confidenceEllipse(fit(),which.coef=c(1,2),add=TRUE,col="darkred",level=input$livconf)
#colore=hsv(4/360,.13,.98,alpha=0.3)
colore=c("darkred","gray")
contatore=0
for (i in 1:length(rv$beta1.v)){
vec=c(rv$beta0.v[i],rv$beta1.v[i])-c(input$beta0,input$beta1)
controllo=(vec %*% xtx() %*% vec)<=(2*input$n*qf(input$livconf,2,input$n-2)*rv$sigma.v[i]/(input$n-2))
ellipse(c(rv$beta0.v[i],rv$beta1.v[i]),shape=invxtx()[1:2,1:2],
radius=sqrt(2*input$n*qf(input$livconf,2,input$n-2)*rv$sigma.v[i]/(input$n-2)),col=colore[1+controllo],fill=FALSE,lwd=0.5,center.pch=FALSE)
contatore=contatore+controllo
}
rv$ctrlellipse=contatore
segments(ci[1,1],lim3[1],ci[1,2],lim3[1],lwd=5,col="darkred")
segments(lim2[1],ci[2,1],lim2[1],ci[2,2],lwd=5,col="darkred")
points(input$beta0,input$beta1,pch=20,cex=2,col="darkgreen")
segments(input$beta0,input$beta1,input$beta0,lim3[1],col="darkgreen",lty=2)
segments(input$beta0,input$beta1,lim2[1],input$beta1,col="darkgreen",lty=2)
})
output$qp1plot=renderPlot({
req(length(rv$beta0.v)>0)
par(mar=c(5,1,1,1))
qp=(rv$beta0.v-input$beta0)/(sqrt(rv$sigma.v)*sqrt(1/input$n+input$xbar^2/Dx()))
a= hist(qp,n=nintf(qp))
plot(a,main="",yaxt="n",border="white",col="darkred",xlab=expression(t[1]),freq=FALSE,
xlim=range(c(qt(c(0.001,0.999),df=input$n-2),qp)),ylim=c(0,max(1.2*dt(0,df=input$n-2),0*a$density)))
curve(dt(x,df=input$n-2),add=TRUE,col="darkgreen",lwd=2)
})
output$qp2plotB=renderPlot({
req(length(rv$beta0.v)>0)
par(mar=c(5,1,1,1))
#qp=(rv$beta1.v-input$beta1)/(rv$sigma.v*sqrt(1/Dx()))
#bb=c(-1,1)*qt(input$livconf/2,df=input$n-2)
#limiti=sort(c(seq(min(qp),bb[1],length=3),seq(bb[1],bb[2],length=7),seq(bb[2],max(qp),length=3)))
a= hist(rv$qp,n=nintf(rv$qp))
plot(a,main="",yaxt="n",border="white",col="darkred",xlab=expression(t[2]),freq=FALSE,
xlim=range(c(qt(c(0.001,0.999),df=input$n-2),rv$qp)),ylim=c(0,max(1.2*dt(0,df=input$n-2),0*a$density)),
xlab="Sample values of T2")
curve(dt(x,df=input$n-2),add=TRUE,col="darkgreen",lwd=2)
})
output$qp2plot=renderPlot({
req(length(rv$beta0.v)>0)
par(mar=c(5,1,1,1))
bb=c(-1)*qt((1-input$livconf)/2,df=input$n-2)
plot(rv$qp,rv$qpx,col=c("black","darkred")[1+(abs(rv$qp)>bb)],yaxt="n",
xlim=c(-1,1)*pmax(qt(0.999,df=input$n-2),max(abs(rv$qp))),ylim=c(0,1),
xlab="Simulated values of T2",
xaxt="n")
axis(1,at=c(0,c(-1,1)*round(bb,2)))
abline(v=c(-1,1)*bb)
},height=200)
output$Fplot=renderPlot({
req(length(rv$beta0.v)>0)
par(mar=c(5,1,1,1))
bb=qf(input$livconf,df1=2,df2=input$n-2)
plot(rv$ftest,rv$ftestx,col=c("black","darkred")[1+(rv$ftest>bb)],yaxt="n",
xlim=c(0,pmax(qf(0.999,df1=2,df2=input$n-2),max(rv$ftest))),ylim=c(0,1),
xlab="Simulated values of F",
xaxt="n")
axis(1,at=c(0,round(bb,2)))
abline(v=bb)
},height=200)
}
shinyApp(ui = ui, server = server)
|
b927f0674d083553e55014a09224210035dc8f01 | c1303d49667599235e32afb94f00f68d09eab47f | /process_utilization_data_util.R | f19d55e5501347ba2d981c018b2d88a8471644e8 | [] | no_license | cloudish-ufcg/cloudish-statistics | f037c342a0983bc2b011dba62c1950409480b5c0 | 2742b47a0f962abdf1af00c7762f1f0bb7b64651 | refs/heads/master | 2020-05-21T05:33:43.104740 | 2017-03-15T18:15:15 | 2017-03-15T18:15:15 | 84,579,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,862 | r | process_utilization_data_util.R | # Default interval.size is 5 min
SummariseUsageInterval <- function(usage, interval.index, interval.size = 300000000) {
intervalSummary <- usage %>% filter(between(time, interval.index * interval.size, (interval.index + 1) * interval.size))
return(intervalSummary)
}
# Default interval.size is 5 min
ProcessHostUsageData <- function(usage, interval.size = 300000000, max.interested.time = 2506179281020){
hostUsageSummary <- data.frame()
# the number of intervals depends on the size of interval
max.interval = floor(max.interested.time / interval.size)
for (index in 0:max.interval) {
intervalUsageEntries <- SummariseUsageInterval(usage, index, interval.size)
intervalSummary <- as.data.frame(intervalUsageEntries %>%
summarise(interval.index = index, time = max(time), p0Usage = mean(p0Usage), p1Usage = mean(p1Usage),
p2Usage = mean(p2Usage), p0Vms = mean(p0Vms), p1Vms = mean(p1Vms), p2Vms = mean(p2Vms), availableMips = mean(availableMips)))
hostUsageSummary <- rbind(hostUsageSummary, intervalSummary)
}
return(hostUsageSummary)
}
SummariseUtilization <- function(usage, host.capacity) {
data.result <- usage %>% mutate(total.usage = p0Usage + p1Usage + p2Usage) %>%
mutate(utilization = total.usage/host.capacity, av = availableMips/host.capacity) %>%
summarise(utilization.mean = mean(utilization), av.mean = mean(av))
return(data.result)
}
CollectHostUsage <- function(usage, initial.interval = 0, interval.size = 300000000, max.interested.time = 2506179281020,
from.db = T, collecting.intervals = -1) {
hostUsageSummary <- data.frame()
# the number of intervals depends on the size of interval
max.interval = floor(max.interested.time / interval.size)
previousUsage <- data.frame()
collectedUsage <- data.frame()
next.collecting <- max.interval + 1
if (from.db) {
if (collecting.intervals == -1) {
print(paste("collecting from DB until interval", max.interval + 1))
collectedUsage <- usage %>% filter(between(time, 0, (max.interval + 1) * interval.size)) %>% collect(n = Inf)
} else {
print(paste("collecting from DB until interval", collecting.intervals))
collectedUsage <- usage %>% filter(between(time, 0, (collecting.intervals + 1) * interval.size)) %>% collect(n = Inf)
next.collecting <- collecting.intervals + 1
}
}
for (index in initial.interval:max.interval) {
print(paste("index=", index))
if (index == next.collecting) {
print("collecting more usage from database")
collectedUsage <- usage %>% filter(between(time, next.collecting, (next.collecting + collecting.intervals + 1) * interval.size)) %>% collect(n = Inf)
next.collecting <- next.collecting + collecting.intervals + 1
}
intervalEntries <- collectedUsage %>% filter(between(time, (index) * interval.size, (index + 1) * interval.size)) %>% collect(n = Inf)
intervalEntries <- rbind(previousUsage, intervalEntries)
hostUsage <- as.data.frame(intervalEntries %>% group_by(hostId) %>% filter(time == max(time)) %>% mutate(interval.index = index))
previous <- hostUsage %>% select(hostId, time, usage, vms, priority, availableMips)
usageMean <- hostUsage %>% group_by(interval.index, hostId) %>% summarise(usage.total = sum(usage), available.total = mean(availableMips)) %>%
group_by(interval.index) %>% summarise(utilization.mean = mean(usage.total), availability.mean = mean(available.total))
hostUsageSummary <- rbind(hostUsageSummary, usageMean)
}
return(hostUsageSummary)
}
CollectHostUsageByPriority <- function(usage, initial.interval = 0, interval.size = 300000000, max.interested.time = 2506179281020,
from.db = T, collecting.intervals = -1) {
hostUsageSummary <- data.frame()
# the number of intervals depends on the size of interval
max.interval = floor(max.interested.time / interval.size)
previousUsage <- data.frame()
collectedUsage <- data.frame()
next.collecting <- max.interval + 1
if (from.db) {
if (collecting.intervals == -1) {
print(paste("collecting from DB until interval", max.interval + 1))
collectedUsage <- usage %>% filter(between(time, 0, (max.interval + 1) * interval.size)) %>% collect(n = Inf)
} else {
print(paste("collecting from DB until interval", collecting.intervals))
collectedUsage <- usage %>% filter(between(time, 0, (collecting.intervals + 1) * interval.size)) %>% collect(n = Inf)
next.collecting <- collecting.intervals + 1
}
}
for (index in initial.interval:max.interval) {
print(paste("index=", index))
if (index == next.collecting) {
print("collecting more usage from database")
collectedUsage <- usage %>% filter(between(time, next.collecting, (next.collecting + collecting.intervals + 1) * interval.size)) %>% collect(n = Inf)
next.collecting <- next.collecting + collecting.intervals + 1
}
#collecting interval entries
intervalEntries <- collectedUsage %>% filter(between(time, (index) * interval.size, (index + 1) * interval.size)) %>% collect(n = Inf)
intervalEntries <- rbind(previousUsage, intervalEntries)
hostUsage <- as.data.frame(intervalEntries %>% group_by(hostId) %>% filter(time == max(time)) %>% mutate(interval.index = index))
usageMean <- as.data.frame(hostUsage %>% group_by(interval.index, hostId, priority) %>% summarise(usage.total = sum(usage), available.total = mean(availableMips)) %>%
group_by(interval.index, priority) %>% summarise(utilization.mean = mean(usage.total), availability.mean = mean(available.total)))
hostUsageSummary <- rbind(hostUsageSummary, usageMean)
}
return(hostUsageSummary)
}
SummariseUsageByInterval <- function(usage) {
data.result <- usage %>% group_by(interval.index) %>%
summarise(p0Usage = mean(p0Usage), p1Usage = mean(p1Usage), p2Usage = mean(p2Usage),
p0Vms = mean(p0Vms), p1Vms = mean(p1Vms), p2Vms = mean(p2Vms), availableMips = mean(availableMips))
return(data.result)
} |
bdbc5f2ab3217716635c54e6c8f534ca14082897 | a289946a3b069f7e343e9f54590989926ebe7f32 | /man/angle_from_positions.Rd | 3e6e57ef070b6b0c3d8e2296d13bdcc5cb97f83c | [] | no_license | hejtmy/navr | ffdfc1e9e5a6b2c54c35d026a34e39c4a3f9a55b | 2bcc4eba8dbed2d1cb4835139966770cfc6b64cb | refs/heads/master | 2023-03-08T16:49:38.487959 | 2023-03-01T02:21:56 | 2023-03-01T02:21:56 | 119,145,726 | 0 | 0 | null | 2020-03-08T04:06:41 | 2018-01-27T08:10:24 | R | UTF-8 | R | false | true | 575 | rd | angle_from_positions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/navr-angle-calculations.R
\name{angle_from_positions}
\alias{angle_from_positions}
\title{Calculates angle from two 2d positions}
\usage{
angle_from_positions(pos_from, pos_to, zero_vec = c(0, 1))
}
\arguments{
\item{pos_from}{numeric(2) vector of original position}
\item{pos_to}{numeric(2) vector of position towards the target}
\item{zero_vec}{defines which axis should correspond to 0 degrees. defaults to c(0,1) (Y axis)}
}
\value{
}
\description{
Calculates angle from two 2d positions
}
|
67b12ff114c658311d3e8c9d7c77c7afa7d9765d | 1c319230bfa1064066b2930f329dc4a82437799a | /man/WEE.quantile.Rd | bd1a5760e7c61e5857aefcd38896ebea04c2ef6b | [] | no_license | cran/WEE | 73771c2eae919326fea00f916a79ce2d4ecc4b17 | 3c83105698ce059e6175d6aa4353601ade70365f | refs/heads/master | 2020-07-19T14:01:56.833280 | 2016-11-15T01:19:43 | 2016-11-15T01:19:43 | 73,762,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,575 | rd | WEE.quantile.Rd | \name{WEE.quantile}
\alias{WEE.quantile}
\title{
WEE quantile regression
}
\description{
Returns an object of class "WEE.quantile" that is generated by quantile regression with WEE approach for continuous secondary traits in genetic case-control studies.
}
\usage{
WEE.quantile(formula, D, data, pd_pop, tau, iter = 5, boot = 0, ...)
}
\arguments{
\item{formula}{
The secondary trait given SNPs and covariates. e.g. y~x+z
}
\item{D}{
Primary disease (case-control status), must be specified.
}
\item{data}{
Dataset with real observation.
}
\item{pd_pop}{
The population disease prevelance of primary disease.
}
\item{tau}{
The quantile level to be estimated. Multiple taus can be chosen.
}
\item{iter}{
Number of generating pseudo observations. (iter=10 by default)
}
\item{boot}{
Number of bootstrape samples. (boot=0 by default)
}
\item{\dots}{
Optional arguments to be passed through to rq.
}
}
\details{
The quantile regression package "quantreg" is required before calling this function
}
\value{
\item{Coefficients}{Point estimates}
\item{StdErr}{Bootstrap standard errors, returned if boot > 0}
\item{Wald}{Wald test statistics, returned if boot > 0}
\item{p.value}{p-values, returned if boot > 0}
\item{Covariance}{Covariance matrix, returned if boot > 0}
}
\section{Warning}{
If boot = 0, point estimates are plotted. If boot > 0, boostrap standard errors, Wald test statistics, p-values, and covariance matrix are also returned.
Optional arguments from rq can be passed to this function, but arguments 'subset' and 'weights' should be used with caution.
}
\examples{
## Generate simulated data
# set population size as 500000
n = 500000
# set parameters
beta = c(0.12, 0.1) # P(Y|X,Z)
gamma = c(-4, log(1.5), log(1.5), log(2) ) #P(D|X,Y,Z)
# generate the genetic variant X
x = rbinom(n,size=2,prob=0.3)
# generate the continuous covariate Z
z = rnorm(n)
# generate the continuous secondary trait Y
y= 1 + beta[1]*x + beta[2]*z + (1+0.02*x)*rnorm(n)
# generate disease status D
p = exp(gamma[1]+x*gamma[2]+z*gamma[3]+y*gamma[4])/
(1+exp(gamma[1]+x*gamma[2]+z*gamma[3]+y*gamma[4]))
d = rbinom(n,size=1,prob=p)
# form population data dataset
dat = as.data.frame(cbind(x,y,z,d))
colnames(dat) = c("x","y","z","d")
# Generate sample dataset with 200 cases and 200 controls
dat_cases = dat[which(dat$d==1),]
dat_controls= dat[which(dat$d==0),]
dat_cases_sample = dat_cases[sample(sum(dat$d==1),
200, replace=FALSE),]
dat_controls_sample = dat_controls[sample(sum(dat$d==0),
200, replace=FALSE),]
dat_quantile = as.data.frame(rbind(dat_cases_sample,
dat_controls_sample))
colnames(dat_quantile) = c("x","y","z","D")
D = dat_quantile$D # Disease status
pd = sum(d==1)/n # population disease prevalence
# WEE quantile regressions:
WEE.quantile(y ~ x, D, tau = 0.5,
data = dat_quantile, pd_pop = pd)
\donttest{
WEE.quantile(y ~ x + z, D, tau = 1:9/10,
data = dat_quantile, pd_pop = pd, boot = 500)
}
}
\references{
[1] Ying Wei, Xiaoyu Song, Mengling Liu, Iuliana Ionita-Laza and Joan Reibman
(2016). Quantile Regression in the Secondary Analysis of Case Control Data. \emph{Journal of the American Statistical Association}, 111:513, 344-354; DOI: 10.1080/01621459.2015.1008101
[2] Xiaoyu Song, Iuliana Ionita-Laza, Mengling Liu, Joan Reibman, Ying Wei (2016). A General and Robust Framework for Secondary Traits Analysis. \emph{Genetics}, vol. 202 no. 4 1329-1343; DOI: 10.1534/genetics.115.181073
}
|
9904af16d71dd60ffbbbab0ff929819dce221f11 | 297cd2ac4f06548e569daf5325c44e66cd3578a0 | /assesment of model.R | 2011903e60ba89d0a5d2395fb6eaede123663004 | [] | no_license | wanghan0501/DPLRadiomics | 7bfe77c8f10a4742647661b9e274571f26c052d1 | b671701dfca10fbe49de7adff912dd563d4585ac | refs/heads/main | 2023-09-03T13:47:37.131400 | 2021-10-29T10:03:33 | 2021-10-29T10:03:33 | 422,521,537 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,334 | r | assesment of model.R | #library packages
library(rms)
library(calibrate)
library(export)
#calibration curve#
train<-read.csv("train.csv")
f1 <- lrm(Label~DPL15+DPL73+DPL230+DPL67+DPL317+DPL461+DPL329+DPL475+DPL361+DPL183+DPL504+DPL60+DPL394+
DPL489+DPL155+DPL228+DPL473+DPL215+DPL212+DPL392,x=T, y=T, data = train)
cal1 <- rms::calibrate(f1,cmethod="KM",method="boot",m=115,B=1000,legend=FALSE)
plot(cal1,lwd=2,lty=1,xlab=" Predicted",ylab=list("Actual "),xlim=c(0,1),ylim=c(0,1))
export::graph2ppt(file = './339DPL-5612.1se calibration curve train.pptx', width = 5, height = 5,append = T)
test<-read.csv("test.csv")
f2 <- lrm(Label~DPL15+DPL73+DPL230+DPL67+DPL317+DPL461+DPL329+DPL475+DPL361+DPL183+DPL504+DPL60+DPL394+
DPL489+DPL155+DPL228+DPL473+DPL215+DPL212+DPL392,x=T, y=T, data = test)
cal2 <- rms::calibrate(f2,cmethod="KM",method="boot",m=115,B=1000,legend=FALSE)
plot(cal2,lwd=2,lty=1,xlab=" Predicted",ylab=list("Actual "),xlim=c(0,1),ylim=c(0,1))
export::graph2ppt(file = './100DPL-5612.1se calibration curve test.pptx', width = 5, height = 5,append = T)
valid84<-read.xlsx("106radiomic-Label is metastasis.xlsx",sheet=5)
f3 <- lrm(Label~DPL15+DPL73+DPL230+DPL67+DPL317+DPL461+DPL329+DPL475+DPL361+DPL183+DPL504+DPL60+DPL394+
DPL489+DPL155+DPL228+DPL473+DPL215+DPL212+DPL392,x=T, y=T, data =valid84)
cal3 <- rms::calibrate(f3,cmethod="KM",method="boot",m=115,B=1000,legend=FALSE)
plot(cal3,lwd=2,lty=1,xlab=" Predicted",ylab=list("Actual "),xlim=c(0,1),ylim=c(0,1))
export::graph2ppt(file = './84DPL-5612.1se calibration curve valid.pptx', width = 5, height = 5,append = T)
#decision curve#
library(rmda)
train<-read.csv("train.csv")
DPL<- decision_curve(Label~DPL15+DPL73+DPL230+DPL67+DPL317+DPL461+DPL329+DPL475+DPL361+DPL183+DPL504+DPL60+DPL394+
DPL489+DPL155+DPL228+DPL473+DPL215+DPL212+DPL392,data = train,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
plot_decision_curve(DPL,curve.names= c('complex'),
cost.benefit.axis =FALSE,col = c('red'),
confidence.intervals =FALSE,standardize = FALSE)
traincom<-read.xlsx("train+CEA+CA199.xlsx",sheet=3)
combined<- decision_curve(Label ~ DPL.score+CEA+CA199,data = traincom,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
plot_decision_curve(combined,curve.names= c('Combined'),
cost.benefit.axis =FALSE,col = c('red'),
confidence.intervals =FALSE,standardize = FALSE)
#plot two decision curves
List<- list(DPL,combined)
plot_decision_curve(list(DPL,combined),
curve.names = c("DPL model", "Combined model"),
confidence.intervals = FALSE,
cost.benefit.axis = FALSE,
lwd = 1.2,
col = c("blue", "red"),
cex.lab = 1.5,
cex.axis = 1.5,
mgp = c(4,1,0))
graph2ppt(file = 'decision curve DPL and combined.pptx', width = 5, height =5, append = T) |
735a3f75b25966b496b03d8f2f79baace162d6ee | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/selectMeta/examples/pPool.Rd.R | 5503e5f8d935efc584af1c2dde613644a9de0c74 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 229 | r | pPool.Rd.R | library(selectMeta)
### Name: pPool
### Title: Pool p-values in pairs
### Aliases: pPool
### Keywords: distribution htest nonparametric
### ** Examples
# This function is used in the help file for the function DearBegg().
|
5d2daf6c373efc7e7eefbb762519ca3416bb21d5 | 43c714b49291d0781209b8a2ef7965481906d866 | /man/V.PS.nov1.Rd | a0d19bb54cdeaaefc78d66bc28b973b154368187 | [
"MIT"
] | permissive | dosreislab/mcmc3r | b61afeae325695a5ba2a8df9140bbce317bfc682 | 918806995085c279941ba433bd87b404222608ad | refs/heads/master | 2023-05-13T09:55:37.203740 | 2023-05-03T00:48:21 | 2023-05-03T00:48:21 | 108,122,699 | 4 | 3 | MIT | 2023-01-30T11:03:05 | 2017-10-24T12:20:30 | R | UTF-8 | R | false | true | 609 | rd | V.PS.nov1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V.PS.nov1.R
\docType{data}
\name{V.PS.nov1}
\alias{V.PS.nov1}
\title{Object of class array output by Morpho after PA}
\format{
Object procSym
\describe{
\item{...}{Check \code{\link[Morpho]{procSym}} for more details}
}
}
\usage{
V.PS.nov1
}
\description{
Object of class array output by Morpho after being aligned to the
mean shape of the 19 carnivoran species previously generated (object "C.PS").
Please take a look at the description in morpho/data-raw/V.R to understand
how this object was generated.
}
\keyword{datasets}
|
cfcab1cfa6edc2b2e8f4b342dc463837c3e2fa5f | b5b9a757a04a083e6cea43832a5af7d9b5ecfb54 | /hw4.R | ac286bbfec992b1cc3b0dc99ce0a6c84232d449d | [] | no_license | nockbarry/ASM-580-Stasitical-Learning | 8a00d6a5d89cc2c571a0e556494d5bb89ebf4b13 | aca13ae7ca9848ebdddd2d767ea1f14436f8b14f | refs/heads/main | 2023-07-07T19:28:19.289453 | 2021-08-16T17:26:12 | 2021-08-16T17:26:12 | 396,868,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,661 | r | hw4.R | library(tidyverse)
library(caret)
library(randomForest)
data <- read.csv('Titanic.csv')
data <- subset(data, select = -c(PassengerId,Name,Ticket,Cabin))
# Remember we have added "PassengerId" into the removal list
data <- subset(data, is.na(Age) == FALSE)
data$Survived <- as.factor(data$Survived)
str(data)
dim(data)[1] #714 passengers
set.seed(123)
split <- data$Survived %>%
createDataPartition(p = 0.75, list = FALSE)
train <- data[split,]
test <- data[-split,]
str(train) #536
str(test) #178
#Q2
set.seed(123)
model <- train( Survived ~., data = train, method = "rf",
trControl = trainControl("cv", number = 10),
importance = TRUE )
# Best tuning parameter
model$bestTune
model$finalModel
cm = model$finalModel$confusion
my.ssa = function(cm){
sens = cm[4]/(cm[2]+cm[4])
specif = cm[1]/(cm[1]+cm[3])
accu = (cm[1]+cm[4])/(cm[2]+cm[3]+cm[1]+cm[4])
x = c(sens = sens, specif = specif, accu = accu)
x
}
my.ssa(cm)
varImpPlot(model$finalModel,type = 1)
varImpPlot(model$finalModel,type = 2)
varImp(model)
#Q3
pred <- model %>% predict(test)
cm = table(test$Survived,pred)
cm
my.ssa(cm)
#Q4
# Plot MeanDecreaseAccuracy
varImpPlot(model$finalModel, type = 1)
# Plot MeanDecreaseGini
varImpPlot(model$finalModel, type = 2)
#Q5
varImp(model)
#Q6
sqrt(36)
#diffetent way below
library(caTools)
set.seed(123)
split <- sample.split(data$Survived, SplitRatio = 0.75)
train <- data[split,]
test <- data[!split,]
str(train) #536
str(test) #178
fit <- randomForest(Survived ~ ., data = train, importance = TRUE)
fit
cm = fit$confusion
my.ssa(cm)
|
5161499c6f1b7e4f77fc1569e4928d35aeb7f712 | b15fdcb6d9cd5da486da4c6631157fb84be184ce | /plot2.R | 3fb3c7839871031880d1ab98525c82ecad08d61d | [] | no_license | bilklo/Exploratory-Data-Analysis | d0b528436ee8dbe314d70b864b515c0a93a10be3 | 427d5850bde69ec8dc54723114cb09de624d3908 | refs/heads/master | 2021-01-01T06:50:20.851212 | 2015-03-22T11:59:28 | 2015-03-22T11:59:28 | 32,668,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 581 | r | plot2.R | plot2 <- function()
{
#load the NEI
NEI <- readRDS("summarySCC_PM25.rds")
#load the SCC
SCC <- readRDS("Source_Classification_Code.rds")
#Filter the data based in fips=24510
select<-filter(NEI,fips=="24510")
#summary all the emissions per year from all sources
summOfEmissions <- ddply(select,.(year), summarise,sum(Emissions))
#Create the plot in png file
png("plot2.png",width = 480, height = 480, bg="transparent")
plot(summOfEmissions$year,summOfEmissions$..1, type='l',
ylab = "Total Emissions", xlab = "Years")
#close
dev.off()
} |
acfe8d9748a51a29d5ad28549937e9e309caee63 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nws/examples/nwsFetchTry-methods.Rd.R | d9a0d1a9f11a2a56f534bb58460fc8a9865f7889 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 530 | r | nwsFetchTry-methods.Rd.R | library(nws)
### Name: nwsFetchTry
### Title: Fetch a Value from a NetWorkSpace (Non-Blocking Version)
### Aliases: nwsFetchTry nwsFetchTry-methods
### nwsFetchTry,netWorkSpace-method
### Keywords: methods
### ** Examples
## Not run:
##D ws <- netWorkSpace('nws example')
##D # If variable 'x' is not found in the shared netWorkSpace,
##D # return default value, NULL.
##D nwsFetchTry(ws, 'x')
##D # If variable 'x' is not found in the shared netWorkSpace,
##D # return 10.
##D nwsFetchTry(ws, 'x', 10)
## End(Not run)
|
f86be5439d7157383eb115bd41ece5950c539dc8 | 023131dc04e74ed662b892099996fc3972de093a | /Paper Draft/remake_figures/Fig5/distance_deposition/distance_deposition.R | acbeb2cceaef76fdabab29bcccb1bf5209e74116 | [
"MIT"
] | permissive | Broccolito/LAPDmouse_Analysis | 13614d4efe1d68b0d77a14f6ccc3fc196a2139cc | 8aef0b295f2272d478066a4003fe28b7db57bf13 | refs/heads/master | 2022-11-28T20:20:51.622353 | 2020-08-04T08:51:33 | 2020-08-04T08:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,125 | r | distance_deposition.R | rm(list = ls())
graphics.off()
gc()
library(ggplot2)
library(dplyr)
library(rgl)
get_directory = function(){
args <- commandArgs(trailingOnly = FALSE)
file <- "--file="
rstudio <- "RStudio"
match <- grep(rstudio, args)
if(length(match) > 0){
return(dirname(rstudioapi::getSourceEditorContext()$path))
}else{
match <- grep(file, args)
if (length(match) > 0) {
return(dirname(normalizePath(sub(file, "", args[match]))))
}else{
return(dirname(normalizePath(sys.frames()[[1]]$ofile)))
}
}
}
find_projection = function (p, l1 = NULL, l2 = NULL){
if(is.list(l1)){
if("l1" %in% names(l1)){
l2 = l1$l2
l1 = l1$l1
}
if("m" %in% names(l1)){
l2 = c(1, l1$m + l1$b)
l1 = c(0, l1$b)
}
if("a" %in% names(l1)){
l2 = c(1, -(l1$a + l1$c)/l1$b)
l1 = c(0, -l1$c/l1$b)
}
}
if(is.vector(p)){
p = matrix(p, nrow = 1, ncol = length(p))
return_vector = TRUE
}
else {
return_vector = FALSE
}
return_2d = FALSE
if(ncol(p) == 2){
p = cbind(p, rep(0, nrow(p)))
l1 = c(l1, 0)
l2 = c(l2, 0)
return_2d = TRUE
}
r = matrix(NA, nrow = nrow(p), ncol = ncol(p))
for (i in 1:nrow(p)){
if(is.null(l2))
l2 = 2 * l1
if(sum((l2 - l1)^2)== 0)
return(NA)
u = sum((p[i, ] - l1)* (l2 - l1))/sum((l2 - l1)^2)
r[i, ] = l1 + u * (l2 - l1)
}
if(return_vector && return_2d)
return(as.vector(r)[1:2])
if(return_vector && !return_2d)
return(as.vector(r))
if(!return_vector && return_2d)
return(r[, 1:2])
}
get_depo_dist = function(sample_id){
aw_wd = "CSV_Data/mxx/mxx_AirwayTreeTable.csv"
dp_wd = "CSV_Data/mxx/mxx_NearAciniDeposition.csv"
aw_wd = gsub("mxx",sample_id,aw_wd)
dp_wd = gsub("mxx",sample_id,dp_wd)
aw = read.csv(aw_wd)
dp = read.csv(dp_wd)
lmb = filter(aw,parent==1,name=="LMB")
rmb = filter(aw,parent==1,name=="RMB")
itxl = lmb$centroidX - 0.5*lmb$length*lmb$directionX
ityl = lmb$centroidY - 0.5*lmb$length*lmb$directionY
itzl = lmb$centroidZ - 0.5*lmb$length*lmb$directionZ
itxr = rmb$centroidX - 0.5*rmb$length*rmb$directionX
ityr = rmb$centroidY - 0.5*rmb$length*rmb$directionY
itzr = rmb$centroidZ - 0.5*rmb$length*rmb$directionZ
itx = mean(itxl,itxr)
ity = mean(ityl,ityr)
itz = mean(itzl,itzr)
l = min(c(lmb$length,rmb$length))
lx = itx + l*lmb$directionX
ly = ity + l*lmb$directionY
lz = itz + l*lmb$directionZ
rx = itx + l*rmb$directionX
ry = ity + l*rmb$directionY
rz = itz + l*rmb$directionZ
x1 = mean(c(lx,rx))
y1 = mean(c(ly,ry))
z1 = mean(c(lz,rz))
dis = vector()
for(i in 1:dim(dp)[1]){
depo = dp[i,]
projection = find_projection(p = c(depo$centroidX,depo$centroidY,depo$centroidZ),
l1=list(l1=c(itx,ity,itz),l2=c(x1,y1,z1)))
dis = c(dis,((projection[1]-itx)^2+(projection[2]-ity)^2+(projection[3]-itz)^2)^0.5)
}
depo_distro = select(dp, mean,centroidX,centroidY,centroidZ) %>%
mutate(distance = dis) %>%
mutate(id = sample_id) %>%
mutate(depo = mean) %>%
select(id,depo,distance,centroidX,centroidY,centroidZ)
# min_dist = depo_distro[which.min(depo_distro$distance),]
# min_projection = find_projection(p = c(min_dist$centroidX,min_dist$centroidY,min_dist$centroidZ),
# l1=list(l1=c(itx,ity,itz),l2=c(x1,y1,z1)))
# min_distance = ((min_projection[1]-itx)^2+(min_projection[2]-ity)^2+(min_projection[3]-itz)^2)^0.5
# min_projection_point_dist = vector()
# project_origin_dist = vector()
# for(i in 1:dim(depo_distro)[1]){
# depo = depo_distro[i,]
# projection = find_projection(p = c(depo_distro$centroidX,
# depo_distro$centroidY,
# depo_distro$centroidZ),
# l1=list(l1=c(itx,ity,itz),l2=c(x1,y1,z1)))
# min_projection_point_dist = c(min_projection_point_dist,
# ((projection[1]-min_projection[1])^2+
# (projection[2]-min_projection[1])^2+
# (projection[3]-min_projection[1])^2)^0.5)
# project_origin_dist = c(project_origin_dist,
# ((projection[1]-itx)^2 +
# (projection[2]-ity)^2 +
# (projection[3]-itz)^2)^0.5)
# }
# for(i in 1:length(project_origin_dist)){
# if(project_origin_dist[i]<min_projection_point_dist[i]){
# depo_distro$distance[i] = depo_distro$distance[i] * (-1)
# }
# }
# dis = ifelse(project_origin_dist>min_projection_point_dist,dis,dis*(-1))
# depo_distro = mutate(depo_distro,distance=dis)
return(depo_distro)
}
wd = get_directory()
setwd(wd)
samples = c(paste0("m0",seq(1,9)),paste0("m",seq(10,34)))
depo_distro_overall = vector()
for(s in samples){
depo_distro_overall = rbind.data.frame(depo_distro_overall, get_depo_dist(s))
cat(paste0("Sample ", s, " Processed ..\n"))
}
write.csv(depo_distro_overall,"depo_distro_overall.csv",quote = FALSE,row.names = FALSE)
|
e3b88153a14e95107c6101d47f818fa2890784d6 | de2e025edb41e8acf2efef84b0deae4d5a5cf3a3 | /src/multiNMF_residue_comodule.R | 91a173be890d356cb01e3df78ee60a55cb0314d4 | [] | no_license | ThallingerLab/IamComparison | 4ec0af83ec9b3708ef8016c9e6bec75cccfd284f | 52f48eecef51addd9564946145290f79b1782809 | refs/heads/master | 2021-09-10T11:26:34.881433 | 2018-03-25T16:28:24 | 2018-03-25T16:28:24 | 114,128,981 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 958 | r | multiNMF_residue_comodule.R | multiNMF_residue_comodule = function(Xlist, K, nloop, maxiter){
verbose = 1
# nloop = 100
# maxiter = 1000
ni = lapply(Xlist, nrow); mi = lapply(Xlist, ncol)
n = ni[[1]]
bestW = matrix(0, nrow = n, ncol = K)
bestHlist = mapply(function(X,m) { matrix(0, nrow=K, ncol=m)}, Xlist, mi)
names(bestHlist) = paste0("H", 1:length(bestHlist))
bestobj = rep(1000000000, length(bestHlist))
for (iloop in 1:nloop) {
if(verbose) {
cat("iteration ", iloop, "\n")
}
speak = 1
WHlist = multiNMF_mm(Xlist, K, maxiter, speak)
# compute residue
newobj = mapply(function(X,H) { sum((X-WHlist$W%*%H)^2) },
Xlist, WHlist[grep("H", names(WHlist))])
if (any(mapply("<", newobj, bestobj))) {
bestobj = newobj
bestW = WHlist$W
bestHlist = WHlist[grep("H", names(WHlist))]
}
}
W = bestW; Hlist = bestHlist
return(c(W = list(W), Hlist))
}
|
6653fd17948e61f7a7c7151013a7fe3010a2b189 | 04c7f9dbc8447b6626b0b96cd6d9f84608213e95 | /knn sucess.R | 6dd46059b136a2625fd1d5b6c7c46e1cd37d180b | [] | no_license | Loshiga/LungCancerDetection_DataAnalytics | 8edc548709b6aa02d236d38ce0ceb1a93b56c1af | 1dedfb7f22ebcfafff54f830534db00b2b9b4ce2 | refs/heads/main | 2023-04-27T01:28:06.564923 | 2021-05-17T20:32:22 | 2021-05-17T20:32:22 | 366,520,650 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,227 | r | knn sucess.R | cancer <- read.csv("~/Desktop/LosProject/cancer-patient-data-sets.csv")
View(cancer)
dim(cancer)
table(cancer$Level)
x = subset(cancer, select = -c(Patient.Id,Level))
View(x)
dim(x)
data_norm <- function(x) { return((x) - min(x))/(max(x) - min(x)) }
data_norm
#wdbc =
#data(wdbc)
wdbc_norm <-as.data.frame(lapply(cancer[2:24], data_norm))
View(wdbc_norm)
summary(wdbc_norm[,2:5])
#summary(wdbc_norm[,1:4])
smp_size <- floor(0.8*nrow(cancer))
set.seed(123)
train_ind<-sample(seq_len(nrow(cancer)),size = smp_size)
cancer_train<-wdbc_norm[train_ind,]
cancer_test<-wdbc_norm[-train_ind,]
dim(cancer_train)
dim(cancer_test)
cancer_train_labels<-cancer[train_ind,25]
cancer_test_labels<-cancer[-train_ind,25]
length(cancer_train_labels)
length(cancer_test_labels)
library(class)
wdbc_test_pred <-knn(train = cancer_train,test = cancer_test, cl = cancer_train_labels,k=21)
length(cancer_train)
#install.packages("gmodels")
library(gmodels)
library(ggplot2)
library(caret)
#table(wdbc_pred,cancer[8001:10000,1])
cancer_test_labels=factor(c(cancer_test_labels))
str(cancer_test_labels)
str(wdbc_test_pred)
confusionMatrix(wdbc_test_pred,cancer_test_labels)
CrossTable(x= cancer_test_labels, y = wdbc_test_pred, prop.chisq = FALSE)
|
12e9f796a2b6268a00f475685141e43a92ea3a8b | a481c6b19b700981a7ea880868f0f3e445fe3488 | /Cleaning/PHCleaning15.R | 84b0543e921b5f34a86cee732779d0c8a20bf16a | [] | no_license | MarianneLawless/Irish-Suicide-Data-Analyis | f2a26d99f805834fa372382f4856318731f359ac | d23610318ceccba1032319428e5d01d34fbd8024 | refs/heads/master | 2023-03-17T18:01:35.797447 | 2019-05-14T10:49:10 | 2019-05-14T10:49:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,704 | r | PHCleaning15.R | #set directory
setwd("C:/Users/Alexander/OneDrive - National College of Ireland/4th Year/4thYR Project/Datasets/Final Code/DATA - CSO&PIETAHOUSE/PietaHouse")
#load csv
Gender2015 <- read.csv("2015Gender.csv", stringsAsFactors = FALSE, header = TRUE, sep = ",")
#print dataframe
print(Gender2015)
#Identify NA's
sapply(Gender2015,function(x) sum(is.na(x)))
#Delete Columns or Rows not needed
ncol(Gender2015)
#Check total rows / remove row(s) not needed
nrow(Gender2015)
Gender2015 <-Gender2015[-4,]
#changing column names
names(Gender2015)[names(Gender2015) == "X"] <- "Gender"
#new column - date
Gender2015$Date <- c(2015, 2015, 2015)
#check class of columns
class(Gender2015$Gender)
class(Gender2015$Ballyfermot)
class(Gender2015$North.Dublin)
class(Gender2015$Lucan)
class(Gender2015$Limerick)
class(Gender2015$Tallaght)
class(Gender2015$Roscrea)
class(Gender2015$West)
class(Gender2015$Tralee)
class(Gender2015$Kerry)
#display data
print(Gender2015)
#save to new csv
write.csv(Gender2015, 'Gender2015Cleaned.csv', row.names = FALSE)
#######################
#Age
#set directory
setwd("C:/Users/Alexander/OneDrive - National College of Ireland/4th Year/4thYR Project/Datasets/Final Code/DATA - CSO&PIETAHOUSE/PietaHouse")
#load csv
Age2015 <- read.csv("2015Age.csv", stringsAsFactors = FALSE, sep = ",")
#print dataframe
print(Age2015)
#Identify NA's
sapply(Age2015,function(x) sum(is.na(x)))
#Delete Columns or Rows not needed
ncol(Age2015)
#Check total rows / remove row(s) not needed
nrow(Age2015)
Age2015 <-Age2015[-6,]
#changing column names
names(Age2015)[names(Age2015) == "X"] <- "Age"
#new column - date
Age2015$Date <- c(2015, 2015, 2015, 2015, 2015)
#check class of columns
class(Age2015$Age)
class(Age2015$Ballyfermot)
class(Age2015$North.Dublin)
class(Age2015$Lucan)
class(Age2015$Limerick)
class(Age2015$Tallaght)
class(Age2015$Roscrea)
class(Age2015$West)
class(Age2015$Tralee)
class(Age2015$Kerry)
class(Age2015$Date)
#display data
print(Age2015)
#save to new csv
write.csv(Age2015, 'Age2015Cleaned.csv', row.names = FALSE)
############################
#Cause
setwd("C:/Users/Alexander/OneDrive - National College of Ireland/4th Year/4thYR Project/Datasets/Final Code/DATA - CSO&PIETAHOUSE/PietaHouse")
#load csv
Cause2015 <- read.csv("2015Cause.csv", stringsAsFactors = FALSE, sep = ",")
#print dataframe
print(Cause2015)
#########################################################
# Table Legend for cause of suicide
#
# DSH = Deliberate Self-Harm
# SA/DSH = Suicide Attempt and Deliberate Self-Harm
# SA Only = Suicide Attempt Only
# SI/SA/DSH = Suicidal Ideation, Suicidal Attmpt, and Deliberate Self-Harm
# SI/DSH = Suicidal Ideation and Deliberate Self-Harm
# SI/SA = Suicidal Ideation and and Suicide Attempt
# SI Only = Suicidal Ideation Only
# DNAA = Did Not Attend Assessment
#
#########################################################
#Identify NA's
sapply(Cause2015,function(x) sum(is.na(x)))
#Delete NA's/Columns or Rows not needed
ncol(Cause2015)
#Check total rows / remove row(s) not needed
nrow(Cause2015)
Cause2015 <-Cause2015[-9,]
#changing column names
names(Cause2015)[names(Cause2015) == "X"] <- "Cause"
Cause2015$Date <- c(2015, 2015, 2015, 2015, 2015, 2015, 2015, 2015)
#check class of columns
class(Cause2015$Cause)
class(Cause2015$Ballyfermot)
class(Cause2015$North.Dublin)
class(Cause2015$Lucan)
class(Cause2015$Limerick)
class(Cause2015$Tallaght)
class(Cause2015$Roscrea)
class(Cause2015$West)
class(Cause2015$Tralee)
class(Cause2015$Kerry)
class(Cause2015$Date)
#display data
print(Cause2015)
#save to new csv
write.csv(Cause2015, 'Cause2015Cleaned.csv', row.names = FALSE)
|
312c601ccd2aae0cae0f77c3ed0495a8c56ec386 | c8c6fa007193c26b4a93cab4dc40b81350a5a23c | /1.RF_Bruvs_dominant.R | 0fb24f495a94054d67ab5edd3ed28901ee3e96a2 | [] | no_license | anitas-giraldo/GB_Habitat_Classification | ba15fa35deae813f6bdb787e76965962eb9579bc | e3def2c2d6fefe84af436e629dc8de771f92e16c | refs/heads/master | 2023-07-13T05:08:24.823385 | 2021-08-15T22:54:33 | 2021-08-15T22:54:33 | 253,971,022 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 23,175 | r | 1.RF_Bruvs_dominant.R | ########################################################
###### Script -- Random Forest - BRUVs data - v1.0 ##############
### Load libraries ----
library(FactoMineR)
library(factoextra)
library(ggplot2)
library(ggthemes)
library(cowplot)
library(randomForest)
library(sp)
library(rgdal)
library(raster)
library(caTools)
library(reshape2)
library(tidyr)
library(car)
library(lattice)
library(dplyr)
library(raster)
library(rasterVis)
library(zoo)
library(sf)
library(fields)
library(ROCR)
library(caret)
library(geoR)
library(gstat)
#library(elsa)
#install.packages("corrplot")
library(corrplot)
library(broman)
library(VSURF)
# Clear memory ----
rm(list=ls())
### Set directories ----
w.dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
d.dir <- paste(w.dir, "data", sep='/')
s.dir <- paste(w.dir, "spatial_data", sep='/')
p.dir <- paste(w.dir, "plots", sep='/')
o.dir <- paste(w.dir, "outputs", sep='/')
### Load data ----
df <- read.csv(paste(d.dir, "tidy", "GB_Bruvs_fine_bathy_habitat_dominant_broad.csv", sep='/'))
head(df)
str(df) # check the factors and the predictors
any(is.na(df)) # check for NA's in the data
p <- stack(paste(s.dir, "predictors.tif", sep='/'))
namesp <- read.csv(paste(s.dir, "namespredictors.csv", sep='/'))
namesp
names(p) <- namesp[,2]
names(p)
## Prepare data ----
# remove unneeded columns ---
names(df)
df2 <- df[,c(5:14)]
head(df2)
# change name of class
names(df2)
colnames(df2)[colnames(df2)=="Max_if_2_habitats_have_same"] <- "Class"
names(df2)
str(df2)
levels(df2$Class)
summary(df2)
head(df2)
## Plot predictors correlations by class -----
# matrix scatterplot of just these 13 variables --
scatterplotMatrix(df2[2:10], col = df2$Class)
plot(df2[2:10], col = df2$Class)
legend("center",
legend = levels(df2$Class))
## using corrplot ----
# compute correlation matrix --
C <- cor(df2[2:10])
head(round(C,2))
# correlogram : visualizing the correlation matrix --
# http://www.sthda.com/english/wiki/visualize-correlation-matrix-using-correlogram#:~:text=Correlogram%20is%20a%20graph%20of%20correlation%20matrix.&text=In%20this%20plot%2C%20correlation%20coefficients,corrplot%20package%20is%20used%20here.
#Positive correlations are displayed in blue and negative correlations in red color.
#Color intensity and the size of the circle are proportional to the correlation coefficients
corrplot(C, method="circle")
corrplot(C, method="pie")
corrplot(C, method="color")
corrplot(C, method="number", type = "upper")
corrplot(C, method="color", type = "lower", order="hclust") # “hclust” for hierarchical clustering order is used in the following examples
# compute the p-value of correlations --
# mat : is a matrix of data
# ... : further arguments to pass to the native R cor.test function
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# matrix of the p-value of the correlation
p.mat <- cor.mtest(df2[2:10])
head(p.mat[, 1:5])
# customize correlogram --
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(C, method="color", col=col(100),
type="upper", order="hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45, #Text label color and rotation
# Combine with significance
p.mat = p.mat, sig.level = 0.01, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
#save - it doesnt work
#ggsave(paste(p.dir, "BRUV-fine-corr.png", sep='/'), device = "png", width = 6.23, height = 4.18, dpi = 300)
### Check Predicitor correlations ---
# define function mosthighlycorrelated --
# https://little-book-of-r-for-multivariate-analysis.readthedocs.io/en/latest/src/multivariateanalysis.html
# linear correlation coefficients for each pair of variables in your data set,
# in order of the correlation coefficient. This lets you see very easily which pair of variables are most highly correlated.
mosthighlycorrelated <- function(mydataframe,numtoreport)
{
# find the correlations
cormatrix <- cor(mydataframe)
# set the correlations on the diagonal or lower triangle to zero,
# so they will not be reported as the highest ones:
diag(cormatrix) <- 0
cormatrix[lower.tri(cormatrix)] <- 0
# flatten the matrix into a dataframe for easy sorting
fm <- as.data.frame(as.table(cormatrix))
# assign human-friendly names
names(fm) <- c("First.Variable", "Second.Variable","Correlation")
# sort and print the top n correlations
head(fm[order(abs(fm$Correlation),decreasing=T),],n=numtoreport)
}
mosthighlycorrelated(df2[2:10], 9) # This results in only depth, rough and slope 4 not being correlated above 0.95
## MAKE BETTER PLOT -- TO do still -----
b <- bplot(train$aspect8, train$Class)
plot(b)
### Get train and test data ----
set.seed(777)
sample <- sample.split(df2$flowdir, SplitRatio = 0.75)
train <- subset(df2, sample == TRUE)
test <-subset(df2, sample == FALSE)
dim(train) # [1] 109 10
dim(test) # [1] 35 10
### MODEL 1 ----
### RF - 5 habitat classes ---
# this is using all the habitat classes = 5 in total
# Used only the preds that were not correlated: depth, slope4, roughness
model <- randomForest(Class ~ ., data=train %>% select(c(Class, depth, slope4, roughness)) , ntree=501, proximity=TRUE)
model # OOB = 55.05%
model$importance
model$classes
ptest <- p
names(ptest)
ptest <- dropLayer(p, c(3:7,9))
## Predict ----
test <- raster::predict(ptest, model)
## Plot ----
plot(test)
e <- drawExtent()
testx <- crop(test, e)
plot(testx)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
lp <- levelplot(testx)
lp
class(lp) # trellis
#### MODEL 2 ----
### RF - 5 habitat classes ---
# try using all predictors ---
# remove all the Classes that are not SG or MA
levels(df2$Class)
model2 <- randomForest(Class ~ ., data=df2, ntree=501, proximity=T, mtry=3)
model2 # this is OOB = 56.94% for 2001 trees / OOB = 56.94% for 501 trees
model2$importance
# Predict ----
test <- raster::predict(p, model2)
# plot ----
plot(test)
e <- drawExtent()
testx <- crop(test, e)
plot(testx)
# Basic plot using lattice --
lp <- levelplot(testx)
lp
### MODEL 3 ----
### RF - 5 habitat classes ---
# this is using all the habitat classes = 5 in total
# Used all preds except flowdir
model3 <- randomForest(Class ~ ., data=train %>% select(-flowdir) , ntree=501, proximity=TRUE, mtry = 3)
model3 # OOB = 53.21%
model3$importance
ptest <- p
names(ptest)
ptest <- dropLayer(p, c(9))
## Predict ----
test <- raster::predict(ptest, model3)
## Plot ----
plot(test)
e <- drawExtent()
testx <- crop(test, e)
plot(testx)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
lp <- levelplot(testx)
lp
class(lp) # trellis
### MODEL 4 ----
### RF - 3 habitat classes : unvegetated, seagrass, macroalgae ---
# Using all preds
# to manipulate factors: https://stackoverflow.com/questions/35088812/combine-sets-of-factors-in-a-dataframe-with-dplyr
model4 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")),
ntree=1001, proximity=TRUE, mtry = 3)
model4 # OOB = 53.21%
model4$importance
# Remove predictors if needed --
#ptest <- p
#names(ptest)
#ptest <- dropLayer(p, c(9))
## Predict ----
test <- raster::predict(p, model4)
## Plot ----
plot(test)
e <- drawExtent()
testx <- crop(test, e)
plot(testx)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
lp <- levelplot(testx)
lp
class(lp) # trellis
### MODEL 5 ----
### RF - 3 habitat classes : unvegetated, seagrass, macroalgae ---
# Using all preds
# to manipulate factors: https://stackoverflow.com/questions/35088812/combine-sets-of-factors-in-a-dataframe-with-dplyr
model5 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(-flowdir),
ntree=501, proximity=TRUE, mtry = 3)
model5 # OOB = 53.21%
model5$importance
# Remove predictors if needed --
ptest <- p
names(ptest)
ptest <- dropLayer(p, c(9))
## Predict ----
test <- raster::predict(ptest, model5)
## Plot ----
plot(test)
e <- drawExtent()
testx <- crop(test, e)
plot(testx)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
lp <- levelplot(testx)
lp
class(lp) # trellis
### MODEL 6 and 7 ----
### RF - 3 habitat classes : unvegetated, seagrass, macroalgae ---
# Using all preds
# to manipulate factors: https://stackoverflow.com/questions/35088812/combine-sets-of-factors-in-a-dataframe-with-dplyr
model6 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, slope4, roughness)),
ntree=2001, proximity=TRUE, mtry = 3, importance=TRUE)
model6 # OOB = 55.05%
model6$importance
varImpPlot(model6)
# using different code --
# https://www.edureka.co/blog/random-forest-classifier/
# Training using ‘random forest’ algorithm
# Converting ‘Survived’ to a factor
train$Class <- factor(train$Class)
# Set a random seed
set.seed(51)
# had to use less classes, otherwise it wouldn't run, I think because not enough replicates per class
t=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"))
head(t)
TrainData <- t[,c(2,3,9)]
TrainClasses <- t[,1]
model7 <- caret::train(TrainData, TrainClasses, # Class is a function of the variables we decided to include
#data = train, # Use the train data frame as the training data
#preProcess = c("center", "scale"),
method = 'rf',# Use the 'random forest' algorithm
trControl = trainControl(method = 'cv', # Use cross-validation
search = 'random')) # Use 5 folds for cross-validation
model7
model7$finalModel
#model7$importance
v<- varImp(model7, scale =F)
v
varImp(model7)
plot(v, top = 9)
# AREA UNDER THE CURVE --
roc_imp <- filterVarImp(x = train[, 2:10], y = train$Class)
roc_imp
# Remove predictors if needed --
ptest <- p
names(p)
ptest <- dropLayer(p, c(3:7,9))
ptest2 <- dropLayer(p, c(3:5,9))
names(ptest2)
## Predict ----
test <- raster::predict(ptest, model6)
test2 <- raster::predict(ptest, model7)
## Plot ----
plot(test)
#e <- drawExtent()
e <- extent(115.1187, 115.5686 , -33.6169, -33.32534)
testx <- crop(test, e)
plot(testx)
plot(test2)
e <- drawExtent()
test2 <- crop(test2, e)
plot(test2)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
# https://stat.ethz.ch/pipermail/r-sig-geo/2013-March/017893.html
#pick colors --
sg <- brocolors("crayons")["Jungle Green"] # "#78dbe2"
sg <- brocolors("crayons")["Forest Green"] # "#78dbe2"
sg <- brocolors("crayons")["Fern"] # "#78dbe2"
alg <- brocolors("crayons")["Raw Umber"] # "#1dacd6"
sand <- brocolors("crayons")["Unmellow Yellow"] # "#f75394"
# read gb cmr
gb <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/PowAnalysis_for1sParksMeeting/Desktop/shapefiles")
plot(gb)
lp <- levelplot(testx, col.regions=c(alg, sg, sand))
lp
class(lp) # trellis
# https://oscarperpinan.github.io/rastervis/FAQ.html
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold"))
# with the gb polygon
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold")) + layer(sp.polygons(gb))
lp2
#print(lp2)
trellis.device(device ="png", filename = paste(p.dir, "Bruv-fine.png", sep='/'), width = 1000, height = 670, res = 200)
print(lp2)
dev.off()
#### Model 8 ####
model8 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, slope4, aspect4, tpi, flowdir)),
ntree=2001, proximity=TRUE, mtry = 3, importance=TRUE)
model8 # OOB = 58.72%
model8$importance
varImpPlot(model8)
### Model 9 ####
model9 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, roughness, aspect4, tpi, flowdir)),
ntree=2001, proximity=TRUE, mtry = 3, importance=TRUE)
model9 # OOB = 55.05%
model9$importance
varImpPlot(model9)
### Model 10 ####
model10 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, tri, aspect4, tpi, flowdir)),
ntree=2001, proximity=TRUE, mtry = 3, importance=TRUE)
model10 # OOB = 54.13%
model10$importance
varImpPlot(model10)
### Model 11 ####
model11 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, tri, aspect4, tpi)),
ntree=2001, proximity=TRUE, mtry = 3, importance=TRUE)
model11 # OOB = 50.46%
model11$importance
varImpPlot(model11)
# predict ----
# Remove predictors if needed --
ptest <- p
names(p)
ptest <- dropLayer(p, c(2,3,5,8,9))
names(ptest)
pred.m11 <- raster::predict(ptest, model11)
# plot ----
plot(pred.m11)
#e <- drawExtent()
e <- extent(115.1187, 115.5686 , -33.6169, -33.32534)
testx <- crop(pred.m11, e)
plot(testx)
# save prediction ---
writeRaster(testx, paste(o.dir, "GBpred-Fine-Bruvs.tif", sep='/'))
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
# https://stat.ethz.ch/pipermail/r-sig-geo/2013-March/017893.html
#pick colors --
#sg <- brocolors("crayons")["Jungle Green"] # "#78dbe2"
#sg <- brocolors("crayons")["Forest Green"] # "#78dbe2"
sg <- brocolors("crayons")["Fern"] # "#78dbe2"
alg <- brocolors("crayons")["Raw Umber"] # "#1dacd6"
sand <- brocolors("crayons")["Unmellow Yellow"] # "#f75394"
# read gb cmr
gb <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/PowAnalysis_for1sParksMeeting/Desktop/shapefiles")
plot(gb)
lp <- levelplot(testx, col.regions=c(alg, sg, sand))
lp
class(lp) # trellis
# https://oscarperpinan.github.io/rastervis/FAQ.html
# plot without CMR--
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold"))
# with the CMR polygon
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold")) + layer(sp.polygons(gb))
lp2
#print(lp2)
trellis.device(device ="png", filename = paste(p.dir, "Bruv-fine-CMR.png", sep='/'), width = 1000, height = 670, res = 200)
print(lp2)
dev.off()
### Model 12 ####
model12 <- randomForest(Class ~ ., data=train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, tri)),
ntree=2001, proximity=TRUE, importance=TRUE)
model12 # OOB = 51.38%
model12$importance
varImpPlot(model12)
# predict ----
# Remove predictors if needed --
ptest <- p
names(p)
ptest <- dropLayer(p, c(1:6,8,9))
names(ptest)
pred.m12 <- raster::predict(ptest, model12)
# plot ----
plot(pred.m12)
#e <- drawExtent()
e <- extent(115.1187, 115.5686 , -33.6169, -33.32534)
testx <- crop(pred.m12, e)
plot(testx)
# basic plot using lattice --
# https://pjbartlein.github.io/REarthSysSci/rasterVis01.html
# https://stat.ethz.ch/pipermail/r-sig-geo/2013-March/017893.html
#pick colors --
sg <- brocolors("crayons")["Jungle Green"] # "#78dbe2"
sg <- brocolors("crayons")["Forest Green"] # "#78dbe2"
sg <- brocolors("crayons")["Fern"] # "#78dbe2"
alg <- brocolors("crayons")["Raw Umber"] # "#1dacd6"
sand <- brocolors("crayons")["Unmellow Yellow"] # "#f75394"
# read gb cmr
gb <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/PowAnalysis_for1sParksMeeting/Desktop/shapefiles")
plot(gb)
lp <- levelplot(testx, col.regions=c(alg, sg, sand))
lp
class(lp) # trellis
# https://oscarperpinan.github.io/rastervis/FAQ.html
# plot without CMR--
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold"))
# with the CMR polygon
lp2 <- levelplot(testx, col.regions=c(alg, sg, sand), xlab = list("Longitude", fontface = "bold"),
ylab = list("Latitude", fontface = "bold")) + layer(sp.polygons(gb))
lp2
#print(lp2)
trellis.device(device ="png", filename = paste(p.dir, "Bruv-fine-CMR.png", sep='/'), width = 1000, height = 670, res = 200)
print(lp2)
dev.off()
#### Validation set assessment model 6: looking at confusion matrix ----
#prediction_for_table <- raster::predict(model6, test[,-c(1,4:8,10)])
prediction_for_table6 <- raster::predict(model11, test %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, aspect4, tpi, tri)))
#table(observed=test[,-c(2:10)], predicted=prediction_for_table)
table(observed=test$Class %>%
car::recode("c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"),
predicted=prediction_for_table6)
# confusion matrix
caret::confusionMatrix(test$Class %>%
car::recode("c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"),
prediction_for_table6)
# Validation set assessment #2: ROC curves and AUC
# Needs to import ROCR package for ROC curve plotting:
install.packages("ROCR")
library(ROCR)
# Calculate the probability of new observations belonging to each class
# prediction_for_roc_curve will be a matrix with dimensions data_set_size x number_of_classes
prediction_for_roc_curve <- predict(model11,
test %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, aspect4, tpi, tri)),
type="prob")
# Plot ROC curve ----
# Use pretty colours:
pretty_colours <- c("#F8766D","#00BA38","#619CFF")
# Specify the different classes
classes <- levels(test$Class %>%
car::recode("c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"))
# For each class
for (i in 1:3)
{
# Define which observations belong to class[i]
true_values <- ifelse(test$Class %>%
car::recode("c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")==classes[i],1,0)
# Assess the performance of classifier for class[i]
pred <- prediction(prediction_for_roc_curve[,i],true_values)
perf <- performance(pred, "tpr", "fpr")
if (i==1)
{
plot(perf,main="ROC Curve",col=pretty_colours[i])
}
else
{
plot(perf,main="ROC Curve",col=pretty_colours[i],add=TRUE)
}
# Calculate the AUC and print it to screen
auc.perf <- performance(pred, measure = "auc")
print(auc.perf@y.values)
}
# Confusion matrix Model 7 ----
prediction_for_table7 <- raster::predict(model7, test %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'")) %>%
select(c(Class, depth, slope4, roughness)))
caret::confusionMatrix(test$Class %>%
car::recode("c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"),
prediction_for_table7)
# # # # # # # #
#### VARIOGRAM ###### this hasn't work yet
# https://stats.idre.ucla.edu/r/faq/how-do-i-generate-a-variogram-for-spatial-data-in-r/
# https://www.aspexit.com/en/implementing-variograms-in-r/
# https://cran.r-project.org/web/packages/elsa/vignettes/elsa.html
df <- read.csv(paste(d.dir, "tidy", "GB_Bruvs_fine_bathy_habitat_dominant_broad.csv", sep='/'))
head(df)
str(df) # check the factors and the predictors
any(is.na(df)) # check for NA's in the data
names(df)
# rename column
names(df)[names(df) == "Max_if_2_habitats_have_same"] <- "class"
names(df)
#dataset is a dataframe (a table) with three columns: the longitude (x), the latitude (y) and the variable of interest
# need to convert the classes to numeric
# https://www.researchgate.net/post/What_could_be_the_most_appropriate_approach_for_applying_spatial_interpolation_to_categorical_variables
str(df)
class <- levels(df$class)
class.no <- c("1", "2", "3", "4", "5")
class.df <- cbind(class.no, class)
class.df
class.df <- as.data.frame(class.df)
df2 <- merge(df, class.df, by = "class")
head(df2)
str(df2)
df2$class.no <- as.numeric(df2$class.no)
# transform df in to spatial points data frame ----
coordinates(df2) <- ~longitude+latitude
variog3 <- variogram(class.no~1, df2, cutoff = 0.5, width = 0.02)
plot(variog3)
# fit a semivariogram model to the data ----
v.fit <- fit.variogram(variog1, vgm("Exp"))
v <- variog(df2, max.dist = 0.5)
### # Feature selection using VSURF ----
train
t <- train %>% mutate(Class = car::recode(Class, "c('Unconsolidated', 'Consolidated')='Unvegetated';'Seagrasses' = 'Seagrass'; c('Turf.algae','Macroalgae')='Algae'"))
head(t)
TrainData <- t[,c(2:10)]
TrainClasses <- t[,1]
rf.def <- VSURF(TrainData, TrainClasses)
plot(rf.def)
summary(rf.def)
rf.def$varselect.pred # [1] 7 : TRI
rf.def$varselect.thres
head(TrainData) # 7 : TRI
|
5b345a60c594b4ee1ab8af8f237b62f6b5d683be | e11f24d2b742a6b775035b950486ca2a0a598c9c | /Sales/ui.R | 2fb0a45d5b60304c33a8ceefebd87efd83e248d9 | [] | no_license | jdmeyer73/ddp_shinyapp | dd99991e3327861af45b45fcdab800c9af02d43b | 988dfb9e4f02d1f99d5bd283696bf542f4d04608 | refs/heads/main | 2023-08-08T07:41:44.930177 | 2021-09-07T15:39:36 | 2021-09-07T15:39:36 | 404,027,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,467 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
library(shiny)
# Define UI for application
shinyUI(fluidPage(
# CSS style
tags$head(
tags$style(HTML("hr {border-top: 5px solid #FF0000;}"))
),
# Application title
titlePanel("Advertising Media and Sales"),
# Sidebar with a checkbox inputs (for predictors and plots)
# and for app information
sidebarLayout(
sidebarPanel(
h3("Options"),
h4("Select the advertising media to be included in the model:"),
checkboxGroupInput(inputId="variable", label=NULL,
choices=list("YouTube"="youtube",
"Facebook"="facebook",
"Newspaper"="newspaper")),
h4("Select plots to show:"),
checkboxGroupInput(inputId="plots", label=NULL,
choices=list("Correlation"="correlation",
"Outlier and Leverage Diagnostic"="outlev")),
hr(),
h3("Application Information"),
p("This Shiny application uses a marketing dataset with sales and
advertising budgets for various media. The user selects which
predictors to include (if any) in linear regression model and
if any plots are desired."),
h4("How to use:"),
p("1. Select the predictors to include in the model"),
p("2. Select if a correlation plot of the selected predictors is desired"),
p("3. Select if a outlier and leverage plot of the fitted model is desired"),
p("4. View results"),
p(),
h4("Author: Jeffrey Meyer"),
tags$a(href="https://github.com/jdmeyer73/ddp_shinyapp",
"Link to GitHub repository")
),
mainPanel(
h3("Results"),
conditionalPanel(
"input.plots.indexOf('correlation') > -1",
h4("Correlation Matrix"),
plotOutput("corr")),
h4("Regression Results"),
verbatimTextOutput("model"),
conditionalPanel(
"input.plots.indexOf('outlev') > -1",
h4("Outlier and Leverage Diagnostic Plot"),
plotOutput("lev"))
)
)
))
|
8f7069f7ef20a3c56b9716b189395167e4f8d0f0 | 87e7afad6293e214dfdf9aa4d75f4937bdd5e36e | /R_scripts/Generate_reports.R | 6f0b40059c6ba37e826baeb3fbae6606288b5db4 | [] | no_license | eleinis/SCBImortality | 9f798d2a7b72ea25b3138c2e28852ef3ac5503ab | 90070954e06975634baad0daba32f7d6e44a7d78 | refs/heads/main | 2023-07-05T08:51:33.197247 | 2021-08-03T13:57:44 | 2021-08-03T13:57:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,560 | r | Generate_reports.R | # Generate reports looking at latest mortality census raw data ####
## this script is run automatically when there is a push
# clear environment ####
rm(list = ls())
# load libraries ####
library(here)
library(readxl)
# load latest mortality data ####
## get the name of latest excel form
latest_FFFs <- list.files(here("raw_data/FFF_excel/"), pattern = ".xlsx", full.names = T)
## load the latest mortality survey
mort <- as.data.frame(read_xlsx(latest_FFFs, sheet = "subform_1", .name_repair = "minimal" ))
mort_root <- as.data.frame(read_xlsx(latest_FFFs, sheet = "Root", .name_repair = "minimal" ))
mort <- cbind(SurveyorID = mort_root$Personnel[match(mort$`Submission Id`, mort_root$`Submission Id`)], mort)
# load and clean up the 3rd main census ####
main_census <- read.csv(paste0("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/scbi.stem3.csv"))
## convert dbh to numeric
main_census$dbh <- as.numeric(main_census$dbh)
## only keep trees > 10cm except for fraxinus and Chionanthus virginicus
main_census <- main_census[grepl("^fr..|^ch..", main_census$sp) | (!is.na(main_census$dbh) & main_census$dbh >= 100), ]
## remove trees that are dead
# main_census <- main_census[!main_census$status %in% "D",]
main_census <- main_census[!grepl("DC|DN|DT", main_census$codes),] # see https://github.com/SCBI-ForestGEO/SCBImortality/issues/31#issuecomment-881702404
# load species table ####
spptable <- read.csv("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/scbi.spptable.csv")
# fix empty lines in mort ####
idx_empty_line <- which(is.na(mort$Quad))
EAB_columns <- c("Crown thinning","Epicormic growth","EABF","D-shaped exit hole count","Crown position < 10 cm DBH")
# replace empty EAB column of line before by the EAB of empty lines
mort[idx_empty_line-1, EAB_columns] <- mort[idx_empty_line, EAB_columns]
# remove empty lines
mort <- mort[!is.na(mort$Quad), ] # fix empty lines
# remove repeated columns
mort <- mort[, unique(names(mort))]
# give a % completion status ####
percent_completion <- round(sum(paste(main_census$tag, main_census$StemTag) %in% paste(mort$Tag, mort$StemTag)) / nrow(main_census) * 100)
png(file.path(here("testthat"), "reports/percent_completion.png"), width = 1, height = 1, units = "in", res = 150)
par(mar = c(0,0,0,0))
plot(0,0, axes = F, xlab = "", ylab = "", type = "n")
text(0,0, paste(percent_completion, "%"))
dev.off()
# write.table(percent_completion, file = file.path(here("testthat"), "reports/percent_completion.txt"), col.names = F, row.names = F)
# --- PERFORM CHECKS ---- ####
# prepare log files #####
require_field_fix_error_file <- NULL
will_auto_fix_error_file <- NULL
warning_file <- NULL
# for each quadrat censused, check all expected trees were censused ####
# filename <- file.path(here("testthat"), "reports/requires_field_fix/quadrat_censused_missing_stems.csv")
error_name = "missing_stem"
# idx_quadrat_censused <- main_census$quadrat %in% as.numeric(mort$Quad)
idx_errors <- !paste(main_census$tag, main_census$StemTag) %in% paste(mort$Tag, mort$StemTag) & main_census$quadrat %in% as.numeric(mort$Quad)
if(sum(idx_errors) > 0) {
# write.csv(main_census[paste(main_census$tag, main_census$StemTag) %in% idx_errors, ], file = filename, row.names = F)
data_to_add <- mort[1:sum(idx_errors),]
data_to_add[1:sum(idx_errors), ] <- NA
data_to_add[, c("Quad",
"Tag",
"StemTag",
"Species",
"QX",
"QY",
"DBH",
"Status 2018",
"HOM")] <-
main_census[idx_errors, c("quadrat", "tag", "StemTag", "sp", "gx", "gy", "dbh", "status")]
require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(data_to_add, error_name))
}
# else {
# if(file.exists(filename) ) file.remove(filename)
# }
# remove any tree with current status DN as we don't need to check errors on those ####
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- !mort[, status_column] %in% c("DN")
mort <- mort[idx_trees, ]
# check if all species exist in species table, if not save a file, if yes, delete that file ####
error_name <- "species_code_error"
idx_error <- !mort$Species %in% spptable$sp
if(sum(idx_error) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_error,], error_name))
# for each quadrat censused, check that there is no duplicated stems ####
filename <- file.path(here("testthat"), "reports/will_auto_fix/quadrat_censused_duplicated_stems.csv")
idx_errors <- paste(mort$Tag, mort$StemTag)[duplicated(paste(mort$Tag, mort$StemTag))]
if(length(idx_errors) > 0) {
write.csv(mort[paste(mort$Tag, mort$StemTag) %in% idx_errors, ], file = filename, row.names = F)
} else {
if(file.exists(filename) ) file.remove(filename)
}
# check that all censused trees have a crown position recorded ####
error_name <- "missing_crown_position"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("A", "AU", "DS")
idx_errors <- is.na(mort$'Crown position') & idx_trees
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that all censused trees have a percent of crown intact recorded ####
error_name <- "missing_percent_crown_intact"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("A", "AU", "DS")
idx_errors <- is.na(mort$'Percentage of crown intact') & idx_trees
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that all censused trees have a percent of crown living recorded ####
error_name <- "missing_percent_crown_living"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("A", "AU", "DS")
idx_errors <- is.na(mort$'Percentage of crown living') & idx_trees
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ] , error_name))
# check percent of crown living <= percent of crown intact####
error_name <- "crown_living_greater_than_crown_intact"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("A", "AU", "DS")
idx_errors <- !is.na(mort$'Percentage of crown living') & !is.na(mort$'Percentage of crown intact') & mort$'Percentage of crown living' > mort$'Percentage of crown intact' & idx_trees
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ] , error_name))
# check percent newly censused trees (DS or DC) have percentage of crown living = 0####
error_name <- "dead_but_crown_living_not_zero"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("DS", "DC")
idx_errors <- !is.na(mort$'Percentage of crown living') & mort$'Percentage of crown living'> 0 & idx_trees
if(sum(idx_errors) > 0) will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ] , error_name))
# check that newly censused alive trees have no FAD selected; no record of wounded main stem, canker, or rotting trunk; DWR (dead with resprouts) not selected ####
error_name <- "status_A_but_unhealthy"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% "A"
idx_FAD <- !is.na(mort$FAD)
idx_wound <- !is.na(mort$'Wounded main stem')
idx_canker <- !is.na(mort$'Canker; swelling, deformity')
idx_rot <- !is.na(mort$'Rotting trunk')
idx_DWR <- !mort$'DWR' %in% "False"
idx_errors <- idx_trees & (idx_FAD | idx_wound | idx_wound | idx_canker | idx_rot | idx_DWR)
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
## and vice-versa ####
error_name <- "unhealthy_but_wrong_status"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU", "DC", "DS")
idx_FAD <- !is.na(mort$FAD)
idx_wound <- !is.na(mort$'Wounded main stem')
idx_canker <- !is.na(mort$'Canker; swelling, deformity')
idx_rot <- !is.na(mort$'Rotting trunk')
idx_DWR <- !mort$'DWR' %in% "False"
idx_errors <- !idx_trees & (idx_FAD | idx_wound | idx_wound | idx_canker | idx_rot | idx_DWR)
if(sum(idx_errors) > 0) will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that status 'AU' does not have DWR (dead with resprouts) selected ####
error_name <- "status_AU_but_DWR_selected"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% "AU"
idx_DWR <- !mort$'DWR' %in% "False"
idx_errors <- idx_trees & idx_DWR
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that status 'DS' or 'DC' have a dbh measured ####
error_name <- "status_DS_or_DC_but_DBH_not_measured"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort[, status_column] %in% c("DS", "DC")
idx_previously_dead <- !mort[,previous_status_column] %in% c("AU","A") & !is.na(mort[,previous_status_column])
idx_no_DBH_if_dead <- is.na(mort$'Dead DBH')
idx_errors <- idx_trees & idx_no_DBH_if_dead & !idx_previously_dead
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that status 'DS' or 'DC' have a dbh within 2cm of most recent census DBH ####
warning_name <- "DBH_dead_suspicious"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("DS", "DC")
idx_DBH_ouside_range <- !is.na(mort$'Dead DBH') & !is.na(as.numeric(mort$DBH)) & (abs(mort$'Dead DBH' - as.numeric(mort$DBH)) > 20)
idx_errors <- idx_trees & idx_DBH_ouside_range
if(sum(idx_errors) > 0) warning_file <- rbind(warning_file, data.frame(mort[idx_errors, ], warning_name))
# check that newly censused 'AU', 'DS' or 'DC trees that were alive in previous census have at least one FAD is selected (OR level selected for `wounded main stem`,`canker,swelling,deformity`, `rotting main stem`) ####
error_name <- "status_AU_DS_or_DC_but_no_FAD"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_previously_dead <- idx_previously_dead <- grepl("D", mort[,previous_status_column]) & !is.na(mort[,previous_status_column])
idx_no_FAD <- is.na(mort$FAD)
idx_wound <- is.na(mort$'Wounded main stem')
idx_canker <- is.na(mort$'Canker; swelling, deformity')
idx_rot <- is.na(mort$'Rotting trunk')
# idx_living_crown <- mort$"Percentage of crown living" == 100 # this was for OR **status is AU and `percentage of crown living`<100**
idx_errors <- idx_trees & (idx_no_FAD & idx_wound & idx_canker & idx_rot) & !idx_previously_dead
# idx_errors[mort[, status_column] %in% "AU" & !idx_living_crown] <- FALSE # overwrite to FALSE for trees that are AU and crown not intact
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name)) # this was for OR **status is AU and `percentage of crown living`<100**
# check that newly censused 'AU', 'DS' or 'DC trees have at one photo taken ####
# filename <- file.path(here("testthat"), "reports/status_AU_DS_or_DC_but_no_photo.csv") # edit file name here
#
# status_column <- rev(grep("Status", names(mort), value = T))[1]
#
# idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
# idx_no_FAD <- is.na(mort$FAD)
#
# idx_errors <- idx_trees & idx_no_FAD
#
#
# if(sum(idx_errors) > 0) {
# write.csv(mort[idx_errors, ], file = filename, row.names = F)
# } else {
# if(file.exists(filename) ) file.remove(filename)
# }
#
#
# check that newly censused 'AU', 'DS' or 'DC with "wound" selected as FAD have selected a level for wounded main stem ####
error_name <- "wounded_but_no_level"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_wounded <- !is.na(mort$FAD) & grepl("W", mort$FAD)
idx_wnd_main_stem <- !is.na(mort$'Wounded main stem')
idx_errors <- idx_trees & idx_wounded & !idx_wnd_main_stem
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
## and vice versa ####
error_name <- "wounded_level_but_wrong_status_or_FAD"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_wounded <- !is.na(mort$FAD) & grepl("W", mort$FAD)
idx_wnd_main_stem <- !is.na(mort$'Wounded main stem')
idx_errors <- idx_trees & !idx_wounded & idx_wnd_main_stem
if(sum(idx_errors) > 0) {
idx_errors <- (!idx_trees | !idx_wounded) & idx_wnd_main_stem
will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ], error_name))
}
# check that newly censused 'AU', 'DS' or 'DC with "canker" selected as FAD have selected a level for canker,swelling,deformity ####
error_name <- "canker_but_no_level"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_canker <- !is.na(mort$FAD) & grepl("K", mort$FAD)
idx_ckr_level <- !is.na(mort$'canker,swelling,deformity')
idx_errors <- idx_trees & idx_canker & !idx_ckr_level
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
## and vice versa ####
error_name <- "canker_level_but_wrong_status_or_FAD"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_canker <- !is.na(mort$FAD) & grepl("K", mort$FAD)
idx_ckr_level <- !is.na(mort$'canker,swelling,deformity')
idx_errors <- (!idx_trees & !idx_canker) & idx_ckr_level
if(sum(idx_errors) > 0) will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that newly censused 'AU', 'DS' or 'DC with "rotting stem" selected as FAD have selected a level for rotting main stem ####
error_name <- "rot_but_no_level"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_rot <- !is.na(mort$FAD) & grepl("R\\>", mort$FAD)
idx_rot_level <- !is.na(mort$'rotting main stem')
idx_errors <- idx_trees & idx_rot & !idx_rot_level
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
## and vice versa ####
error_name <- "rot_level_but_wrong_status_or_FAD"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort[, status_column] %in% c("AU","DS", "DC")
idx_rot <- !is.na(mort$FAD) & grepl("R\\>", mort$FAD)
idx_rot_level <- !is.na(mort$'rotting main stem')
idx_errors <- (!idx_trees & !idx_rot) & idx_rot_level
if(sum(idx_errors) > 0) will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that there is D.shaped.exit.hole.count when EABF is DE ####
error_name <- "DE_but_no_exit_hole_count"
idx_DE <- !is.na(mort$EABF) & grepl("DE", mort$EABF)
idx_exit_count <- !is.na(mort$"D-shaped exit hole count") & mort$"D-shaped exit hole count">0
idx_errors <- idx_DE & !idx_exit_count
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
## and vice versa ####
error_name <- "exit_hole_count_no_DE_EABF"
idx_errors <- !idx_DE & idx_exit_count
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that newly censused 'A' or 'AU', were A or AU in previous year ####
warning_name <- "Dead_but_now_alive"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort[, status_column] %in% c("AU","A")
idx_previously_dead <- !mort[,previous_status_column] %in% c("AU","A") & !is.na(mort[,previous_status_column])
idx_errors <- idx_trees & idx_previously_dead
if(sum(idx_errors) > 0) warning_file <- rbind(warning_file, data.frame(mort[idx_errors, ], warning_name))
# check that newly censused 'A' or 'AU' or 'DS', were not 'DC' in previous year ####
warning_name <- "DC_but_now_A_AU_or_DS"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort[, status_column] %in% c("AU","A", "DS")
idx_previously_dead <- mort[,previous_status_column] %in% c("DC") & !is.na(mort[,previous_status_column])
idx_errors <- idx_trees & idx_previously_dead
if(sum(idx_errors) > 0) warning_file <- rbind(warning_file, data.frame(mort[idx_errors, ], warning_name))
# check that newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), have Crown thinning, Epicormic growth, Crown position < 10 cm DBH (for stems <10cm) all recorded ####
error_name <- "missing_EAB_info"
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_missing_EAB_info <- !complete.cases(mort[, c("Crown thinning", "Epicormic growth") ])
idx_missing_crwn_pos <- !complete.cases(mort[, c("Crown position < 10 cm DBH")])
idx_trees_less_10cm <- !is.na( as.numeric(mort$DBH)) & as.numeric(mort$DBH) <100
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_status <- !mort[, status_column] %in% c("DN")
idx_previously_dead <- idx_previously_dead <- grepl("D", mort[,previous_status_column]) & !is.na(mort[,previous_status_column])
idx_errors <- ((idx_trees & idx_missing_EAB_info) | (idx_trees & idx_missing_crwn_pos & idx_trees_less_10cm)) & (idx_status& !idx_previously_dead)
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if Epicormic growth>0, tree is AU ####
error_name <- "epicormic_growth_but_not_AU"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_epicormic <- !is.na(mort$`Epicormic growth`) & mort$`Epicormic growth` > 0
idx_status <- mort[, status_column] %in% c("AU")
idx_errors <- idx_trees & idx_epicormic & !idx_status
if(sum(idx_errors) > 0) will_auto_fix_error_file <- rbind(will_auto_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if Crown thinning>1, tree is AU or dead ####
error_name <- "crown_thinning_more_than_1_but_not_AU_or_dead"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_crown <- !is.na(mort$`Crown thinning`) & mort$`Crown thinning` > 1
idx_status <- mort[, status_column] %in% c("A")
idx_errors <- idx_trees & idx_crown & idx_status
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if any EABF recorded, tree is AU or dead ####
error_name <- "EABF_recorded_but_not_AU_or_dead"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_EABF <- !is.na(mort$EABF) & !mort$EABF %in% "none"
idx_status <- mort[, status_column] %in% c("A")
idx_errors <- idx_trees & idx_EABF & idx_status
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if D-shaped exit hole count>0, tree is AU or dead ####
error_name <- "exit_hole_count_but_not_AU_or_dead"
status_column <- rev(grep("Status", names(mort), value = T))[1]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_exit_hole <- mort$`D-shaped exit hole count` > 0 & !is.na(mort$`D-shaped exit hole count`)
idx_status <- mort[, status_column] %in% c("A")
idx_errors <- idx_trees & idx_exit_hole & idx_status
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if tree is dead, Epicormic growth=0 ####
error_name <- "dead_but_epicormic_more_than_0"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_epicormic <- !is.na(mort$`Epicormic growth`) & mort$`Epicormic growth` > 0
idx_status <- !mort[, status_column] %in% c("A", "AU")
idx_previously_dead <- idx_previously_dead <- grepl("D", mort[,previous_status_column]) & !is.na(mort[,previous_status_column])
idx_errors <- idx_trees & idx_epicormic & (idx_status & !idx_previously_dead)
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# check that, for newly censused trees (FRAM, FRNI, FRPE, FRSP, or CHVI), if tree is dead, Crown thinning=5 ####
error_name <- "dead_but_crown_thinning_less_than_5"
status_column <- rev(grep("Status", names(mort), value = T))[1]
previous_status_column <- rev(grep("Status", names(mort), value = T))[2]
idx_trees <- mort$Species %in% c( "fram", "frni", "frpe", "frsp", "chvi")
idx_crown <- !is.na(mort$`Crown thinning`) & mort$`Crown thinning` <5
idx_status <- !mort[, status_column] %in% c("A", "AU")
idx_previously_dead <- grepl("D", mort[,previous_status_column]) & !is.na(mort[,previous_status_column])
idx_errors <- idx_trees & idx_crown & (idx_status & !idx_previously_dead)
if(sum(idx_errors) > 0) require_field_fix_error_file <- rbind(require_field_fix_error_file, data.frame(mort[idx_errors, ], error_name))
# clean and save files ####
## remove empty tags
require_field_fix_error_file <- require_field_fix_error_file[!is.na(require_field_fix_error_file$Tag),]
will_auto_fix_error_file <- will_auto_fix_error_file[!is.na(will_auto_fix_error_file$Tag),]
warning_file <- warning_file[!is.na(warning_file$Tag),]
## order by quadrat and tag
require_field_fix_error_file <- require_field_fix_error_file[order(as.numeric(require_field_fix_error_file$Quad), require_field_fix_error_file$Tag, require_field_fix_error_file$StemTag),]
will_auto_fix_error_file <- will_auto_fix_error_file[order(will_auto_fix_error_file$Quad, will_auto_fix_error_file$Tag, will_auto_fix_error_file$StemTag),]
warning_file <- warning_file[order(warning_file$Quad, warning_file$Tag, warning_file$StemTag),]
# save
if(!is.null(require_field_fix_error_file))
write.csv(require_field_fix_error_file[, c(ncol(require_field_fix_error_file), 1:(ncol(require_field_fix_error_file) -1))], file = file.path(here("testthat"), "reports/requires_field_fix/require_field_fix_error_file.csv"), row.names = F)
if(!is.null(will_auto_fix_error_file))
write.csv(will_auto_fix_error_file[, c(ncol(will_auto_fix_error_file), 1:(ncol(will_auto_fix_error_file) -1))], file = file.path(here("testthat"), "reports/will_auto_fix/will_auto_fix_error_file.csv"), row.names = F)
if(!is.null(warning_file))
write.csv(warning_file[, c(ncol(warning_file), 1:(ncol(warning_file) -1))], file = file.path(here("testthat"), "reports/warnings/warnings_file.csv"), row.names = F)
# KEEP TRACK OF ALL THE ISSUES ####
all_reports <- list.files(here("testthat/reports/", c("requires_field_fix", "will_auto_fix", "warnings")), recursive = T, pattern = ".csv", full.names = T)
for(f in all_reports) {
new_f <- gsub("/reports/", "/reports/trace_of_reports/", f)
new_f <- gsub("/requires_field_fix/|/will_auto_fix/|/warnings/", "",new_f)
if(file.exists(new_f)) write.csv(unique(rbind(read.csv(new_f), read.csv(f))), file = new_f, row.names = F)
else write.csv(read.csv(f), file = new_f, row.names = F)
}
# generate a file with summary for each quadrat ####
# quadrat_censused_missing_stems <- read.csv(file.path(here("testthat"), "reports/requires_field_fix/quadrat_censused_missing_stems.csv"))
quadrat_censused_duplicated_stems <- read.csv(file.path(here("testthat"), "reports/will_auto_fix/quadrat_censused_duplicated_stems.csv"))
quad_with_any_issue <- sort(unique(c(require_field_fix_error_file$Quad, will_auto_fix_error_file$Quad, warning_file$Quad, quadrat_censused_duplicated_stems$quadrat)))
quad_summary <- data.frame(Quad = quad_with_any_issue,
n_tag_error_field_fix = c(table(require_field_fix_error_file$Quad[!require_field_fix_error_file$error_name %in% "missing_stem"]))[as.character(quad_with_any_issue)],
n_tag_error_auto_fix = c(table(will_auto_fix_error_file$Quad))[as.character(quad_with_any_issue)],
n_tag_warnings = c(table(warning_file$Quad))[as.character(quad_with_any_issue)],
n_missing_tags = c(table(require_field_fix_error_file$Quad[require_field_fix_error_file$error_name %in% "missing_stem"]))[as.character(quad_with_any_issue)],
n_duplicated_tags = c(table(quadrat_censused_duplicated_stems$Quad))[as.character(quad_with_any_issue)])
quad_summary$sum_missing_and_errors <- quad_summary$n_missing_tags + quad_summary$n_tag_error_field_fix
write.csv(quad_summary[order(as.numeric(quad_summary$Quad)), ], file.path(here("testthat"), "reports/quadrat_n_errors_summary.csv"), row.names = F)
|
b0c5d1bf4323ebc0220d93bf6e1dc60fbda0bc2e | 324f644bc81e1152da777bbb6972ea98a18ba945 | /R/Gffreader.r | 9afe971c98d5e753ed06538e1f853e078af3f455 | [
"MIT"
] | permissive | krishb11/NatTuretzek | 2b4677181239085c0fbeb1b1debe7f896be25462 | 5df2f770d3ddf3cc4adb9bd81d77752d556d3b9b | refs/heads/main | 2023-05-04T00:24:10.856560 | 2021-05-13T04:38:47 | 2021-05-13T04:38:47 | 341,111,042 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,991 | r | Gffreader.r | library("dplyr")
library("readr")
library("Biostrings")
#' Constructor and automated function of class MyDeep to read the gff output after exonerate and extract CDS data
#' @param file: path to gff file.
#' @param species: species that was annotated.
#' @param style: Already set to exonerate, can be modified to use for standard gff files.
#' @param genomefile: Path to the genomefile from which we can perform the extraction of new CDS data.
#' @returns: A list with the gff data, The exon data and finally the fasta data of all the genes(longest isoforms).
MyDeep <- function(file, species, style, genomefile) {
value <- list(pathtofile= file, species=species, data=NULL,
edited_gff_object=list(), style=style, fasta=NULL, genomefile=genomefile)
attr(value, "class") <- "MyDeep"
value$data <- read_gff(value)
value$edited_gff_object <- gffeditor(value)
value$fasta <- gff2fastaprinter(DeepObject=value)
return(value)
}
#' Function to read gff file
#' @param DeepObject: An object of class MyDeep
#' @param pathto file: Can also be used separately from the automation and in such a case this parameter can be used to provide input gff file. Initially set to NULL.
#' @returns: the data in the gff file as a dataframe
read_gff <- function(DeepObject, pathtofile=NULL) {
#read gff file content
if(is.null(pathtofile)) {
file <- DeepObject$pathtofile
} else {
file <- pathtofile
}
x <- paste("grep -i exonerate:est2genome", file, "> new.gff", sep=" ")
system(x)
x <- paste("grep -v 'source-version' new.gff > final.gff")
system(x)
system("rm -rf new.gff")
file <- "final.gff"
suppressWarnings(gff.input <- readr::read_delim(file = file, delim = "\t", col_names = FALSE, comment = "#"))
if (ncol(gff.input) > 9)
stop("The gff file format can not store more than 9 columns!", call. = FALSE)
#name standardized columns
gffNames <- c("seqid", "source", "type", "start", "end", "score",
"strand", "phase", "attribute")
names(gff.input)[seq(ncol(gff.input))] <- gffNames[seq(ncol(gff.input))]
return(gff.input)
}
#data <- read_gff("~/Downloads/inshallah/lol3.gff")
# Function to edit the gff file with MyDeep Object
# @param DeepObejct: An object of class MyDeep.
gffeditor <- function(DeepObject) {
#Filetering out the data we need. Like gene, cds, intron etc
style <- DeepObject$style
#newdata <- DeepObject$data %>%
# filter(type==which_one)
#Modifying the names accordingly
if(style=="exonerate") {
index <- DeepObject$data$type %in% "gene"
count <- 1
d <- c()
for(i in 1:length(index)) {
if(index[i]) {
d[count] <- i
count <- count + 1
}
else {
next
}
}
list <- list()
count <- 1
for(i in 1:length(d)) {
if(d[i] == tail(d, n=1)) {
list[[count]] <- DeepObject$data[d[i]:length(DeepObject$data$type), ]
}
else {
list[[count]] <- DeepObject$data[d[i]:d[i+1], ]
count <- count + 1
}
}
newdata <- DeepObject$data %>%
filter(type=="gene")
names <- strsplit(newdata$attribute, split=";")
names <- lapply(names, function(x) {
x <- x[2]
return(x)
})
names <- unlist(names)
names <- strsplit(names, split=" ")
names <- lapply(names, function(x) {
x <- x[3]
return(x)
})
names <- unlist(names)
names(list) <- names
#print(list)
} else {
stop("code for Standard gff is in process", .call=FALSE)
}
#write_delim(newdata, path=paste(DeepObject$speices, "_edited", ".gff", sep=""), delim="\t", col_names=TRUE)
list <- lapply(list, function(x) {
y <- x %>%
filter(type=="exon")
return(y)
})
#print(list)
return(list)
}
#' Function to print fasta file of the newly acquired CDS co-ordinates
#' @param DeepObject: An object of class MyDeep.
#' @param edited_gff_onject: Currently set to NULL, but in case you have an edited GFF data, it can be directly used instead.
gff2fastaprinter <- function(DeepObject, edited_gff_object=NULL) {
fasta <- DNAStringSet()
if(!(is.null(DeepObject))) {
edited_gff_object <- DeepObject$edited_gff_object
} else {
edited_gff_object <- edited_gff_object
}
#genomefile <- DeepObject$genomefile
genome <- readDNAStringSet(DeepObject$genomefile)
names <- names(genome)
names <- strsplit(names, split=" ")
names <- unlist(lapply(names, function(x) {
return(x[1])
}))
names(genome) <- names
#print(names)
#head(DeepObject$edited_gff_object)
print("starting sequence extraction")
fastac <- DNAStringSet()
newdata <- lapply(edited_gff_object, function(x) {
for(i in 1:length(x$type)) {
if(x$start[i] < width(genome[names(genome) %in% x$seqid[1]])) {
fastac[i] <- subseq(genome[names(genome) %in% x$seqid[1]], start=x$start[i], end=x$end[i])
}
}
#print(fastac)
return(as.list(fastac))
})
#return(newdata)
print("done with extraction now merginig....")
set <- newdata
dat <- c()
count <- 1
for(i in 1:length(set)) {
lol <- as.character(DNAStringSet(set[[i]][[1]]))
if(length(set[[i]]) > 1) {
for(j in 2:length(set[[i]])) {
dummy <- as.character(DNAStringSet(set[[i]][[j]]))
lol <- paste(lol, dummy, sep="")
}
}
dat[count] <- lol
count <- count + 1
}
print("done merginig....")
dat <- DNAStringSet(dat)
names(dat) <- names(DeepObject$edited_gff_object)
writeXStringSet(dat, paste(DeepObject$species, "transcripts", "fasta", sep="."))
return(dat)
}
#####lol#######
# list <- list()
# count <- 1
# for(i in 1:length(d)) {
# if(d[i] == tail(d, n=1)) {
# list[[count]] <- da[d[i]:length(da$type), ]
# }
# else {
# list[[count]] <- da[d[i]:(d[i+1]-1), ]
# count <- count + 1
# }
# }
# names <- lapply(data_mrna, function(x) {
# x <- strsplit(as.character(x[9]), split=";")
# x <- unlist(lapply(x, function(x) {
# return(tail(x, n=2))
# }))
# x <- strsplit(x, split="=", fixed=TRUE)
# x <- unlist(lapply(x, function(x) {
# return(x[2])
# }))
# return(x)
# })
# count <- 1
# for(i in 1:length(list)) {
# if(!("intron" %in% list[[i]]$type)) {
# count <- count + 1
# }
# } |
1143772c7950dcb0757a508ca062e05f9d3293c9 | 2d2491b342b737d2801cb964b3e4ccf960ee5fed | /man/rp.logistic.Rd | d3c99551f92cf362bfa1d3a46604711a5b19a1c5 | [] | no_license | cran/rpanel | 7f64c93985c228f909f313bdf283b816b084f3ec | b2d139fedc899592e8cd9132c4bf7f1c9b16bc0d | refs/heads/master | 2023-02-21T17:57:55.911293 | 2023-02-07T07:21:06 | 2023-02-07T07:21:06 | 17,699,324 | 1 | 3 | null | 2015-12-05T19:08:29 | 2014-03-13T06:10:11 | R | UTF-8 | R | false | false | 2,595 | rd | rp.logistic.Rd | \name{rp.logistic}
\alias{rp.logistic}
\title{Interactive display of logistic regression with a single covariate}
\description{
The function \code{rp.logistic} plots a binary or binomial response variable
against a single covariates and creates a panel which controls the position of
a logistic curve and allows a logistic regression to be fitted to the data and
displayed on the plot.
}
\usage{
rp.logistic(x, y, xlab = NA, ylab = NA, panel.plot = TRUE, panel = TRUE,
hscale = NA, vscale = hscale, alpha = 0, beta = 0,
display = c("jitter" = FALSE, "regression line" = FALSE,
"fitted model" = FALSE))
}
\arguments{
\item{x}{a vector of covariate values.}
\item{y}{a vector of response values with two levels, or a two-column matrix whose first column is the number of `successes' and the second column is the number of `failures' at each covariate value.}
\item{xlab}{a character variable used for the covariate axis label.}
\item{ylab}{a character variable used for the response axis label.}
\item{panel.plot}{a logical variable which determines whether the plot is placed inside the control panel.}
\item{panel}{a logical variable which determines whether an interactive panel is created.}
\item{hscale, vscale}{horizontal and vertical scaling factors for the size of the plots. It can be useful to adjust these for projection on a screen, for example. The default values are 1 on Unix platforms and 1.4 on Windows platforms.}
\item{alpha}{the initial value of the intercept parameter.}
\item{beta}{the initial value of the slope parameter.}
\item{display}{the initial settings of the checkboxes which control whether the data are `jittered' for visual effect and whether the movable and fitted regression lines are displayed.}
}
\details{
The control panel allows a logistic regression line to be drawn on the plot and the intercept and slope of the linear predictor altered interactively. The fitted logistic regression can also be displayed.
If \code{y} is a vector of responses with two values, these are treated as a factor which is then converted to the (0,1) scale by \code{as.numeric}.
The values of the response variable can be `jittered'.
}
\value{Nothing is returned.}
\references{
rpanel: Simple interactive controls for R functions using the tcltk package.
Journal of Statistical Software, 17, issue 9.
}
\seealso{\code{\link{rp.regression}}}
\examples{
\dontrun{
rp.logistic(river$Temperature, river$Low)
}}
\keyword{iplot}
\keyword{dynamic}
|
6195a0411af234b0bf2ff4e697ff7b5a1c033069 | 24b88b18ee7108bf133b074ee0a97970077413a8 | /dataAnalysis/dataAnalysis.r | 9cc212bde8fcd08ade1a0e5b4287ad48ca57f330 | [
"MIT"
] | permissive | afarcome/LMrectangular | 7b9c9c3d4a81baf4d50130dc62805eee079c5503 | a726ff32158e975d45e11d57c3a0a5a37ece0274 | refs/heads/master | 2020-03-10T07:09:06.343238 | 2018-06-19T13:28:34 | 2018-06-19T13:28:34 | 129,256,309 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,020 | r | dataAnalysis.r | rm(list=ls())
load("HDIdata.rda")
y=array(NA,c(164,6,3))
nams=HDIdata[1:164,2]
u=unique(HDIdata[,1])
for(j in 1:6) {
for(h in 1:3) {
y[,j,h]=HDIdata[HDIdata[,1]==u[j],h+2]}}
source("codeKvariable.r")
x=c(0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5)
library(snipEM)
library(mvtnorm)
set.seed(12345)
rl=list()
# pre-training for good inits
inits0=rlm.fixed(y,c(4,4,4,3,3,3))
inits0$k=rep(4,6)
pt=rlm.fixed(y,c(2,3,2,2,4,2))
inits0$pi[2,]=pt$pi[2,]
inits0$PI[2,2,,]=pt$PI[2,2,,]
inits0$PI[3,2,,]=pt$PI[3,2,,]
inits0$PI[2,3,,]=pt$PI[2,3,,]
inits0$PI[2,4,,]=pt$PI[2,4,,]
inits0$PI[4,2,,]=pt$PI[4,2,,]
inits0$xi[2,,]=pt$xi[2,,]
inits0$sigma[2,,]=pt$sigma[2,,]
pt=rlm.fixed(y,c(3,4,3,4,3,4))
inits0$pi[3,]=pt$pi[3,]
inits0$PI[3,4,,]=pt$PI[3,4,,]
rl[[1]]=rlm(y,0,4,inits=inits0)
for(whl in 2:length(x)) {
inits=rl[[whl-1]]
inits$k=rep(3,6)
rl[[whl]]=rlm(y,x[whl],4,inits=inits,hits=50)
}
liks=sapply(1:length(x),function(j) rl[[j]]$lik)
crit=diff(liks)/(mean(liks)*diff(x))
save.image(file="rl.rda")
|
fe952e376d1c566d4bbbd9114762f46c0adfb492 | 9fda538efa5f6cd2a5cecc2df29600871b537d6a | /simbionet/cf_cl.R | d9994c43217d148c226ae9632b1cb4556b031b70 | [] | no_license | mfalda/gen-net-sim | f4b84e8215e4ffcf117099e2d096ee4e61b50915 | 3018f43d4e8dfbf4e37593af8d7d22171061f3ea | refs/heads/master | 2021-03-12T20:00:20.007538 | 2015-08-25T07:34:52 | 2015-08-25T07:34:52 | 41,350,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 109 | r | cf_cl.R | library(SimBioNeT)
MOD<-createMOD(m=4,auto=FALSE)
net2<-SBNT(N=20,MODULES=MOD,Cf.cl=0.1,sepgraph=FALSE)
net2 |
f6c1c9550a7d125e6ee0fd783e8557f5dcb312fc | 242a06fd956601e453d59869b8df690cbef09223 | /tests/testthat.R | 8757f0cd40ea3cdc23ea36186c62f35447d38b5d | [] | no_license | chambm/proBAMr | d890b0458075b7f44c4181debe3a9c5a40765b56 | a03edf68f51215be40717c5374f39ce67bd2e68b | refs/heads/master | 2021-01-19T14:40:08.539433 | 2017-09-25T14:57:36 | 2017-09-25T14:57:36 | 88,178,516 | 0 | 1 | null | 2017-04-13T15:17:07 | 2017-04-13T15:17:07 | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(proBAMr)
test_check("proBAMr")
|
e7c942575d65ed776404a750d95e062155477ff7 | 29585dff702209dd446c0ab52ceea046c58e384e | /nFactors/R/eigenComputes.r | 0f381ab8688a68e2efd5e9392519f1b328a520dd | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 779 | r | eigenComputes.r | eigenComputes <-
function(x, cor=TRUE, model="components", ...) {
dataType <- eigenFrom(x)
if (model == "components") {
res <- switch(dataType,
eigenvalues = as.vector(x),
correlation = {if (cor == FALSE) eigen(x)$values else eigen(cov2cor(x))$values},
data = {if (cor == TRUE) eigen(cor(x, ...))$values else eigen(cov(x, ...))$values}
)
}
if (model == "factors") {
res <- switch(dataType,
eigenvalues = as.vector(x),
correlation = {if (cor == FALSE) eigen(corFA(x, method="ginv"))$values else eigen(cov2cor(corFA(x, method="ginv")))$values},
data = {if (cor == TRUE) eigen(corFA(cor(x, ...),method="ginv"))$values else eigen(corFA(cov(x, ...),method="ginv"))$values}
)
}
return(res)
}
|
09dac0c2c31e8b7f9168ce697d16bc55e151de69 | 017414614b3d26ea10faa775fc3d4e630752ddd1 | /R/class_aws_fst_dt.R | 3769770b9616e2a759deff26cfbb239418ec6b66 | [
"MIT"
] | permissive | krlmlr/targets | 4822815b7ae412af115296e5010de2edc61d1c50 | a8cbf46ce5d2274bd623085be749af3059ce6083 | refs/heads/main | 2023-04-13T16:43:18.213413 | 2021-04-21T20:29:07 | 2021-04-21T20:29:07 | 360,385,953 | 1 | 0 | NOASSERTION | 2021-04-22T03:55:53 | 2021-04-22T03:55:53 | null | UTF-8 | R | false | false | 474 | r | class_aws_fst_dt.R | #' @export
store_new.aws_fst_dt <- function(class, file = NULL, resources = NULL) {
aws_fst_dt_new(file = file, resources = resources)
}
aws_fst_dt_new <- function(file = NULL, resources = NULL) {
force(file)
force(resources)
enclass(
environment(),
c(
"tar_aws_fst_dt",
"tar_aws",
"tar_external",
"tar_fst_dt",
"tar_fst",
"tar_store"
)
)
}
#' @export
store_assert_format_setting.aws_fst_dt <- function(class) {
}
|
61cbcab8b8f48ee4a8e66c40c6e7e326a9cdacf3 | 182077890d03295aa56c4b6a320184e42da190c8 | /man/modulesil.Rd | 2e2526d2143739d74abd38ed59590d1c6ce6dc6e | [] | no_license | xiaowenchenjax/epihet | c5bd97c1e2d6cb06031fde866103cb8e28e07729 | 76da52b7b395233bc8f0de269cdeff02e6103196 | refs/heads/master | 2020-03-12T15:37:25.806931 | 2018-04-23T13:24:49 | 2018-04-23T13:24:49 | 130,694,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 487 | rd | modulesil.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{modulesil}
\alias{modulesil}
\title{modulesil}
\format{A data frame with 501 rows and 3 variables:
\describe{
\item{gene}{gene of module CEBPA_sil}
\item{label}{label of module CEBPA_sil}
\item{color}{color of module CEBPA_sil}
}}
\usage{
data(modulesil)
modulesil
}
\value{
A data frame
}
\description{
module information for CEBPA-sil mutation samples
}
\keyword{datasets}
|
34d588c8c2804516f0f23043975e29b8a0f1ac75 | 8fa69232aa51e5de4d9cc0646eb37fa8a31fa3c9 | /conversion-rate/analysis/logistic-regression.R | d3c18641556bf36e8efaa05023d4e57ea9fc03d6 | [] | no_license | sabman83/data-analysis | e8ef5baf8a3613a34eccbf9c4289824162123100 | bd8e11406b039b3cc3117faa2f2305aafb295bba | refs/heads/master | 2020-12-24T08:37:37.152127 | 2017-01-07T16:49:44 | 2017-01-07T16:49:44 | 73,334,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,993 | r | logistic-regression.R | glm.model <- glm(converted~.,data = conversion_rate_table, family = binomial)
summary(glm.model)
glm.probs = predict(glm.model, type="response")
glm.pred = rep(0, nrow(conversion_rate_table))
glm.pred[glm.probs>0.5] = 1
table(glm.pred,conversion_rate_table$converted)
1 - mean(glm.pred == conversion_rate_table$converted) #0.013811
#validation set
set.seed(17)
train <- sample(nrow(conversion_rate_table), 2 * (nrow(conversion_rate_table)/3))
glm.train.model <- glm(converted ~ ., data = conversion_rate_table, subset = train, family = binomial)
summary(glm.train.model)
glm.test.probs <- predict(glm.train.model, newdata = conversion_rate_table[-train,], type = "response")
glm.test.pred <- rep(0, length(-train))
glm.test.pred[glm.test.probs > 0.5] = 1
mean(glm.test.pred != conversion_rate_table[-train]$converted) #0.01386161
#k-fold cross validation
cv.error.10=rep(0,10)
cost <- function(r, pi = 0) mean(abs(r-pi) > 0.5)
for (i in 1:10) {
cv.error.10[i] = cv.glm(conversion_rate_table,glm.model, cost = cost, K=10)$delta[1]
}
cv.error.10
#rocr
perf <- performance(pred, "cost")
pred@cutoffs
pred@cutoffs[[1]][which.min(perf@y.values[[1]])]
cost.perf = performance(pred, "cost", cost.fp = 1, cost.fn = 2)
pred@cutoffs[[1]][which.min(cost.perf@y.values[[1]])]
#Ridge and Lasso Regression
set.seed(17)
grid=10^seq(10,-2,length=100)
x=model.matrix(conversion_rate_table$converted~., conversion_rate_table)[,-1]
y=conversion_rate_table$converted
lasso.model <- glmnet(x[train,], y[train], family = "binomial", alpha = 1, lambda = grid)
cv.lasso.model = cv.glmnet(x[train,], y[train], family = "binomial", type.measure = "class", alpha = 1)
lasso.bestlam <- cv.lasso.model$lambda.min
lasso.prob <- predict(lasso.model, newx = x[-train,], s = lasso.bestlam, type="response")
lasso.pred <- rep(0, length(lasso.prob))
lasso.pred[lasso.prob > 0.5] <- 1
1 - mean(lasso.pred == y[-train]) #0.01647074
lasso.coef <- predict(lasso.model, s = lasso.bestlam, type="coefficients")
lasso.coef
|
8fecea123c4b5b7a72e3fe60fdafde4a5ec2c317 | ba173389ff874bc1c48437625a48d75905630629 | /SA.r | 35d8643922b03765fd097525f24a7e995c7e5f64 | [] | no_license | mina20/soec | b32c9d435c038be281d618c8e3f1342423a7653e | a8d617f464f76223cd5a6dc6fae498436a00d91f | refs/heads/master | 2020-12-02T22:48:20.707832 | 2017-07-04T06:52:31 | 2017-07-04T06:52:31 | 96,184,605 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 692 | r | SA.r | #simulation anneling
func<-function(x1,x2){
y= (x1+2*x2-7)^2+(2*x1+x2-5)^2
return(y)
}
T<-1000
step<-0.05
r<-runif(1,0,1)
eps<-0.001
for(T in 1000:1){
for(i in 1:10){
x1=runif(1,-1,1)
x2=runif(1,4,5)
x1_new=x1+step*runif(1)
x2_new=x2+step*runif(1)
E_old=func(x1,x2)
E_new=func(x1_new,x2_new)
delE=E_old-E_new
if(E_new<E_old || r<(-delE/T)){
E_new=E_old
x1=x1_new
x2=x2_new
}
if(E_new-E_old<eps){
solution=E_new
}
print(E_new)
}
}
|
8b4c5456b0799119116efdb0f0bdb52aeaccb6e3 | b891263728fc0108d3701ec9723b627a6d6f484a | /R/createFormulaFromDF.R | cd3803d05b762ae6e3b2273cf2f4750d1d9495f2 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | limnoliver/GSqwsr | 2218a40432f75107b31eb1a50168858032635b85 | 0a59fe050eb851de54b0567c317bd07a1b0b099a | refs/heads/master | 2021-01-01T18:47:17.291369 | 2015-10-14T20:57:49 | 2015-10-14T20:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,060 | r | createFormulaFromDF.R | #'Creates text for upperBoundFormula from a dataframe
#'
#'Creates text for upperBoundFormula prelimModelDev formula. Takes a dataframe (df), one column
#'is named 'Scalar', and one is named 'variableNames', the rest are the available parameters. Each row in the data
#'contains one of the available parameters. For every 1 in the 'Scalar' column, the parameter is
#'added to the formula. If a 1 is in the matrix of parameters, interaction terms are created.
#'
#'@param df dataframe
#'@return modelFormula text string of formula that includes variables and log variables in df
#'@keywords formula creation
#'@export
#'@examples
#'parameters <- sampleParameters
#'formulaText <- createFormulaFromDF(parameters)
createFormulaFromDF <- function(df){
scalarVariables <- NULL
scalarVariables <- as.character(df$variableNames[which(1 == df$Scalar)])
scalarVariables <- paste(scalarVariables,collapse=" + ")
interactionSet <- df[,-2]
interactionVariables <- ""
modelFormula <- scalarVariables
for (i in colnames(interactionSet)){
var1 <- as.character(interactionSet$variableNames[which(1 == interactionSet[[i]])])
if(substring(i, 1, 4) == "log."){
i <- paste("log(", substring(i, 5, (nchar(i)-1)), ")",sep="")
}
if(length(var1) > 0){
vars <- paste(i,var1,sep=":")
vars <- paste(vars,collapse=" + ")
interactionVariables <- paste(interactionVariables,vars,sep=" + ")
}
}
if (nchar(interactionVariables) > 0){
interactionVariables <- substring(interactionVariables,4,nchar(interactionVariables))
modelFormula <- interactionVariables
}
if (nchar(interactionVariables) > 0 & nchar(scalarVariables) > 0){
modelFormula <- paste(scalarVariables, interactionVariables, sep= " + ")
}
if(any(grepl("sinDY",modelFormula) | grepl("cosDY",modelFormula))){
if(!any(grepl("sinDY",modelFormula) & grepl("cosDY",modelFormula))){
warning("Attempting to create model with only sinDY or cosDY, not both")
}
}
return(modelFormula)
} |
e2a86f21a92dab054bb047855a4b0e0873e5cf40 | b648e8fc3c21cb8779855d0bbd180bcdcc5a0fcd | /R/qqmathFitted.R | 9fc730bbbf0ac18bda29d845d8fa48536273b091 | [] | no_license | christopherggreen/cggmisc | e65651b889cdc17c8c44175197fc71377fef24af | 69d089ef02f2cd24ebb13e24fcda6373eb9b7abf | refs/heads/master | 2020-05-28T14:01:21.059600 | 2016-08-22T10:09:43 | 2016-08-22T10:09:43 | 20,460,039 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,771 | r | qqmathFitted.R | ### Copyright (c) 2012 Christopher G. Green <christopher.g.green@gmail.com>
###
### This file is part of the cggmisc package for R.
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### Based on qqmath.formula from the lattice package written by
### Deepayan Sarkar and the lmfmResQQPlot function from the robust
### package maintained by Kjell Konis.
###
### See the COPYRIGHTS file in the top-level package directory for
### a full list of copyright and authorship information.
###
"qqmathFitted" <- function(formula,
data = NULL,
allow.multiple = is.null(groups) || outer,
outer = !is.null(groups),
distribution = qnorm,
f.value = NULL,
auto.key = FALSE,
aspect = "fill",
panel = panel.qqmath.fitted,
prepanel = prepanel.qqmath.fitted,
scales = list(),
strip = TRUE,
groups = NULL,
xlab,
xlim,
ylab,
ylim,
drop.unused.levels = lattice.getOption("drop.unused.levels"),
distribution.fit = function(y) list(mean=0, sd=1),
quantile.type = 7,
qqstyle = NULL,
add.qqline = c("none", "lm", "lmRob"),
add.qqline.par = if ( !is.null(groups) )
trellis.par.get("panel.superpose") else
trellis.par.get("add.line"),
envelope = TRUE,
rdist = rnorm,
rdist.args = NULL,
sig.level = 0.95,
mc.samples = 100,
seed = .Random.seed,
envstyle = NULL,
id.n = 3,
labelargs = FALSE,
verbose = FALSE,
...,
lattice.options = NULL,
default.scales = list(),
subscripts = !is.null(groups),
subset = TRUE
)
{
#formula <- x
dots <- list(...)
groups <- eval(substitute(groups), data, environment(formula))
subset <- eval(substitute(subset), data, environment(formula))
if (!is.null(lattice.options))
{
oopt <- lattice.options(lattice.options)
on.exit(lattice.options(oopt), add = TRUE)
}
## Step 1: Evaluate x, y, etc. and do some preprocessing
form <-
latticeParseFormula(formula, data, subset = subset,
groups = groups, multiple = allow.multiple,
outer = outer, subscripts = TRUE,
drop = drop.unused.levels)
groups <- form$groups
if (!is.function(panel)) panel <- eval(panel)
if (!is.function(strip)) strip <- eval(strip)
if ("subscripts" %in% names(formals(panel))) subscripts <- TRUE
if (subscripts) subscr <- form$subscr
prepanel <-
if (is.function(prepanel)) prepanel
else if (is.character(prepanel)) get(prepanel)
else eval(prepanel)
cond <- form$condition
## number.of.cond <- length(cond)
x <- form$right
if (length(cond) == 0)
{
strip <- FALSE
cond <- list(gl(1, length(x)))
## number.of.cond <- 1
}
dist.name <- paste(deparse(substitute(distribution)), collapse = "")
if (missing(xlab)) xlab <- dist.name
if (missing(ylab)) ylab <- form$right.name
## create a skeleton trellis object with the
## less complicated components:
# cgg trellis.skeleton is private to lattice
trellis.skeleton <- lattice:::trellis.skeleton
foo <-
do.call("trellis.skeleton",
c(list(formula = formula,
cond = cond,
aspect = aspect,
strip = strip,
panel = panel,
xlab = xlab,
ylab = ylab,
xlab.default = dist.name,
ylab.default = form$right.name,
lattice.options = lattice.options),
dots))
dots <- foo$dots # arguments not processed by trellis.skeleton
foo <- foo$foo
foo$call <- sys.call(sys.parent()); foo$call[[1]] <- quote(qqmath)
## Step 2: Compute scales.common (leaving out limits for now)
if (is.character(scales)) scales <- list(relation = scales)
scales <- lattice:::updateList(default.scales, scales)
construct.scales <- lattice:::construct.scales
foo <- c(foo, do.call("construct.scales", scales))
## Step 3: Decide if limits were specified in call:
have.xlim <- !missing(xlim)
if (!is.null(foo$x.scales$limit))
{
have.xlim <- TRUE
xlim <- foo$x.scales$limit
}
have.ylim <- !missing(ylim)
if (!is.null(foo$y.scales$limit))
{
have.ylim <- TRUE
ylim <- foo$y.scales$limit
}
## Step 4: Decide if log scales are being used:
have.xlog <- !is.logical(foo$x.scales$log) || foo$x.scales$log
have.ylog <- !is.logical(foo$y.scales$log) || foo$y.scales$log
## This is slightly weird because 'x' is eventually plotted in the
## Y-axis
if (have.xlog)
{
warning("Can't have log X-scale")
have.xlog <- FALSE
foo$x.scales$log <- FALSE
}
if (have.ylog)
{
ylog <- foo$y.scales$log
ybase <-
if (is.logical(ylog)) 10
else if (is.numeric(ylog)) ylog
else if (ylog == "e") exp(1)
x <- log(x, ybase)
if (have.ylim) ylim <- lattice:::logLimits(ylim, ybase)
}
## Step 5: Process cond
cond.max.level <- unlist(lapply(cond, nlevels))
## Step 6: Determine packets
# cgg force argument matching for add.qqline
add.qqline <- match.arg(add.qqline)
# cgg modify this to include all panel function arguments
foo$panel.args.common <-
c(list(distribution = distribution,
f.value = f.value,
distribution.fit = distribution.fit,
quantile.type = quantile.type,
qqstyle = qqstyle,
add.qqline = add.qqline,
add.qqline.par = add.qqline.par,
envelope = envelope,
rdist = rdist,
rdist.args = rdist.args,
sig.level = sig.level,
mc.samples = mc.samples,
seed = seed,
envstyle = envstyle,
id.n = id.n,
labelargs = labelargs,
storeframe = sys.frame(sys.nframe()),
verbose = verbose
), dots)
if (subscripts)
{
foo$panel.args.common$groups <- groups
}
npackets <- prod(cond.max.level)
if (npackets != prod(sapply(foo$condlevels, length)))
stop("mismatch in number of packets")
# setting up arguments for each panel
foo$panel.args <- vector(mode = "list", length = npackets)
foo$packet.sizes <- numeric(npackets)
if (npackets > 1)
{
dim(foo$packet.sizes) <- sapply(foo$condlevels, length)
dimnames(foo$packet.sizes) <- lapply(foo$condlevels, as.character)
}
cond.current.level <- rep(1, length(cond))
for (packet.number in seq_len(npackets))
{
id <- lattice:::compute.packet(cond, cond.current.level)
foo$packet.sizes[packet.number] <- sum(id)
foo$panel.args[[packet.number]] <-
list(x = x[id])
if (subscripts)
foo$panel.args[[packet.number]]$subscripts <-
subscr[id]
cond.current.level <-
lattice:::cupdate(cond.current.level,
cond.max.level)
}
# fitted values for each panel
fitted.args <- vector(mode = "list", length = npackets)
more.comp <-
c(lattice:::limits.and.aspect(prepanel, #prepanel.default.qqmath,
prepanel = NULL, #prepanel,
have.xlim = have.xlim, xlim = xlim,
have.ylim = have.ylim, ylim = ylim,
x.relation = foo$x.scales$relation,
y.relation = foo$y.scales$relation,
panel.args.common = foo$panel.args.common,
panel.args = foo$panel.args,
aspect = aspect,
npackets = npackets,
x.axs = foo$x.scales$axs,
y.axs = foo$y.scales$axs,
count = quote(count)),
lattice:::cond.orders(foo))
foo[names(more.comp)] <- more.comp
# prepanel will set up fitted.args properly.
# now modify distribution.fit to use the fitted values
# in the panel function, thereby eliminating the need
# to refit the distribution parameters
for (packet.number in seq_len(npackets)) {
foo$panel.args[[packet.number]]$fitted.args <-
fitted.args[[packet.number]]
}
# clear distribution.fit in the common args
foo$panel.args.common$distribution.fit <- NULL
foo$panel.args.common$storeframe <- NULL
if (is.null(foo$legend) && !is.null(groups) &&
(is.list(auto.key) || (is.logical(auto.key) && auto.key)))
{
foo$legend <-
list(list(fun = "drawSimpleKey",
args = lattice:::updateList(list(text = levels(as.factor(groups)),
points = TRUE,
rectangles = FALSE,
lines = FALSE),
if (is.list(auto.key)) auto.key else list())))
foo$legend[[1]]$x <- foo$legend[[1]]$args$x
foo$legend[[1]]$y <- foo$legend[[1]]$args$y
foo$legend[[1]]$corner <- foo$legend[[1]]$args$corner
names(foo$legend) <-
if (any(c("x", "y", "corner") %in% names(foo$legend[[1]]$args)))
"inside"
else
"top"
if (!is.null(foo$legend[[1]]$args$space))
names(foo$legend) <- foo$legend[[1]]$args$space
}
class(foo) <- "trellis"
foo
}
|
33ac0d98a30247749515292a6ed675d421951462 | 91e1df32aa1436f3361311d35c1cb622de80d897 | /tests/testthat/test-tt_read_data.R | 8ce71dda321ef346ef25168e5e5205bc1d5c4514 | [
"MIT"
] | permissive | BESTDATASCIENCE/manosaladataR | deb3d53fbaeb66308092c16b23aa0165bf77e9de | 5ce0bc7cd906aa34c16d8d79dc185ee3cfb8c86b | refs/heads/master | 2021-03-15T14:29:06.868402 | 2020-03-13T13:01:15 | 2020-03-13T13:01:15 | 246,857,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,799 | r | test-tt_read_data.R | context("test-tt_read_data")
test_that("tt_read_data only works for numeric,integer, or character entries", {
tt_gh_data <- tt_load_gh("2019-01-15")
numericRead <- tt_read_data(tt_gh_data, 1)
integerRead <- tt_read_data(tt_gh_data, 1L)
characterRead <- tt_read_data(tt_gh_data, "agencies.csv")
numericRead <- tt_read_data(tt_gh_data, 1)
integerRead <- tt_read_data(tt_gh_data, 1L)
characterRead <- tt_read_data(tt_gh_data, "agencies.csv")
url <- paste0(
gsub("tree", "blob", file.path(attr(tt_gh_data, ".url"), "agencies.csv")),
"?raw=true"
)
readURL <- read_csv(url)
expect_equal(numericRead, readURL)
expect_equal(integerRead, readURL)
expect_equal(characterRead, readURL)
# fails when not expected class
expect_error(
{
tt_read_data(tt_gh_data, factor("agencies.csv"))
},
"No method for entry of class:"
)
})
test_that("tt_read_data informs when selection is out of range/not available", {
tt_gh_data <- tt_load_gh("2019-01-15")
expect_error(
{
tt_read_data(tt_gh_data, "wrong_entry.csv")
},
"That is not an available file"
)
expect_error(
{
tt_read_data(tt_gh_data, 45)
},
"That is not an available index"
)
expect_error(
{
tt_read_data(tt_gh_data, 45L)
},
"That is not an available index"
)
})
test_that("tt_read_data can load RDS files just as easily as text files",{
tt_gh_data <- tt_load_gh("2019-01-01")
expect_is(
tt_read_data(tt_gh_data, 1),
c("tbl_df","tbl","data.frame")
)
})
test_that("read_rda will not arbitrarily assign the object to the current environment",{
new_dataset<-read_rda("testfiles/test.rda")
expect_false(exists("testdf"))
expect_equal(data.frame(x=c(1,2,3),y=c("A","B","C")),
new_dataset)
})
|
ff8ab91e8b92e88caaf16e3ea6dde786eb97e6ee | e35ef3815b5b4afd1944b25f31e019ccbe84744b | /8 - Count Data/03 - SCRIPT - Modelos para Dados de Contagem.R | 27d7c8d2efb863803fea2a611778244bd4313c44 | [] | no_license | juanalencarvagas/RLang | 48741ddd13b65b7ef66c167688e53bbe7486b1e0 | 35d443d2e045274011862e32f569d543cb3d6a26 | refs/heads/main | 2023-08-23T00:49:17.893365 | 2021-11-03T16:51:32 | 2021-11-03T16:51:32 | 423,021,170 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,094 | r | 03 - SCRIPT - Modelos para Dados de Contagem.R | ################################################################################
# INSTALAÇÃO E CARREGAMENTO DE PACOTES NECESSÁRIOS #
################################################################################
#Pacotes utilizados
pacotes <- c("plotly","tidyverse","knitr","kableExtra","reshape2","ggrepel",
"fastDummies","lmtest","splines","jtools","questionr","MASS",
"pscl","overdisp","magick","cowplot","beepr")
if(sum(as.numeric(!pacotes %in% installed.packages())) != 0){
instalador <- pacotes[!pacotes %in% installed.packages()]
for(i in 1:length(instalador)) {
install.packages(instalador, dependencies = T)
break()}
sapply(pacotes, require, character = T)
} else {
sapply(pacotes, require, character = T)
}
################################################################################
# A DISTRIBUIÇÃO POISSON - PARTE CONCEITUAL #
################################################################################
#Estabelecendo uma função da distribuição Poisson com lambda = 1
poisson_lambda1 <- function(m){
lambda <- 1
(exp(-lambda) * lambda ^ m) / factorial(m)
}
#Estabelecendo uma função da distribuição Poisson com lambda = 4
poisson_lambda4 <- function(m){
lambda <- 4
(exp(-lambda) * lambda ^ m) / factorial(m)
}
#Estabelecendo uma função da distribuição Poisson com lambda = 10
poisson_lambda10 <- function(m){
lambda <- 10
(exp(-lambda) * lambda ^ m) / factorial(m)
}
#Plotagem das funções estabelecidas anteriormente
data.frame(m = 0:20) %>%
ggplot(aes(x = m)) +
stat_function(fun = poisson_lambda1, size = 1.5,
aes(color = "01")) +
stat_function(fun = poisson_lambda4, size = 1.5,
aes(color = "04")) +
stat_function(fun = poisson_lambda10, size = 1.5,
aes(color = "10")) +
scale_color_viridis_d("Valores de" ~ lambda ~ "") +
labs(y = "Probabilidades", x = "m") +
theme_bw()
##############################################################################
# REGRESSÃO PARA DADOS DE CONTAGEM #
# CARREGAMENTO DA BASE DE DADOS corruption #
##############################################################################
#Fisman, R.; Miguel, E. Corruption, Norms, and Legal Enforcement:
#Evidence from Diplomatic Parking Tickets.
#Journal of Political Economy, v. 15, n. 6, p. 1020-1048, 2007.
#https://www.journals.uchicago.edu/doi/abs/10.1086/527495
load(file = "corruption.RData")
##############################################################################
# OBSERVAÇÃO DA BASE DE DADOS corruption #
##############################################################################
#Visualizando a base de dados
corruption %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = F,
font_size = 19)
glimpse(corruption) #Visualização das observações e das especificações
#referentes às variáveis da base de dados
#Estatísticas descritivas univariadas e tabela de frequências
summary(corruption)
#Tabela de frequências da variável dependente (função freq para gerar tabelas de
#frequência do pacote questionr)
freq(corruption$violations) %>%
kable()%>%
kable_styling(bootstrap_options = "striped",
full_width = T,
font_size = 19)
#Histograma da variável dependente
ggplotly(
corruption %>%
ggplot(aes(x = violations,
fill = ..count..)) +
geom_histogram(bins = round(2 * nrow(corruption) ^ (1 / 3)),
color = "black") +
scale_fill_gradient("Contagem",
low = "#440154FF",
high = "#FDE725FF") +
labs(x = "Quantidade de violações de trânsito",
y = "Frequência") +
theme_bw()
)
#Diagnóstico preliminar para observação de eventual igualdade entre a média e
#a variância da variável dependente 'violations'
corruption %>%
summarise(Média = mean(violations),
Variância = var(violations)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = T,
font_size = 30)
#Comportamento das variáveis 'corruption' e 'violations' antes e depois do
#início da vigência da lei
corruption %>%
mutate(lnviolations = log(violations),
lnviolations = ifelse(lnviolations == -Inf,
yes = 0,
no = lnviolations)) %>%
ggplot(aes(x = corruption, y = lnviolations)) +
geom_point(color = "black") +
geom_smooth(aes(color = "Fitted Values"),
method = "lm",
formula = y ~ splines::bs(x),
se = FALSE, size = 2) +
geom_text_repel(aes(label = code), # pacote ggrepel
size = 2,
color = "black",
max.overlaps = 100) +
labs(y = "Violações de Trânsito em NY (logs)",
x = "Índice de Corrupção dos Países") +
scale_color_manual("Label:",
values = "gold") +
facet_wrap(~post) +
theme_bw()
################################################################################
# ESTIMAÇÃO DO MODELO POISSON #
################################################################################
#Estimação do modelo
modelo_poisson <- glm(formula = violations ~ staff + post + corruption,
data = corruption,
family = "poisson")
#Parâmetros do modelo_poisson
summary(modelo_poisson)
#Extração do valor de Log-Likelihood (LL)
logLik(modelo_poisson)
#Outra forma de visualização dos parâmetros - função summ do pacote jtools
summ(modelo_poisson, digits = 4, confint = T, ci.width = 0.95)
export_summs(modelo_poisson, scale = F, digits = 4)
#LR Test - função lrtest do pacote lmtest
#(likelihood ratio test para comparação de LL's entre modelos)
lrtest(modelo_poisson) #no caso, comparação com modelo nulo (somente com intercepto)
#Todas as variáveis preditoras se mostraram estatisticamente diferentes de zero,
#considerando-se um nível de significância de 5%, ceteris paribus. Porém, já se
#pode afirmar que a estimação Poisson é a mais adequada?
################################################################################
# TESTE DE SUPERDISPERSÃO DE CAMERON E TRIVEDI (1990) #
################################################################################
#CAMERON, A. C.; TRIVEDI, P. K. Regression-based tests for overdispersion in
#the Poisson model. Journal of Econometrics, v. 46, n. 3, p. 347-364, 1990.
#1º Passo: estimar um modelo Poisson;
#2º Passo: criar uma nova variável (Y*) utilizando os fitted values do modelo
#Poisson estimado anteriormente;
#3º Passo: estimar um modelo auxiliar OLS, com a variável Y* como variável
#dependente, os fitted values do modelo Poisson como única variável preditora e
#sem o intercepto;
#4º Passo: Observar a significância do parâmetro beta.
#Adicionando os fitted values do modelo Poisson (lambda_poisson) à base de dados:
corruption$lambda_poisson <- modelo_poisson$fitted.values
#Criando a nova variável Y*:
attach(corruption)
corruption$ystar <- (((violations - lambda_poisson) ^ 2)
- violations) / lambda_poisson
detach(corruption)
#Estimando o modelo auxiliar OLS, sem o intercepto:
modelo_auxiliar <- lm(formula = ystar ~ 0 + lambda_poisson,
data = corruption)
#Observando os parâmetros do modelo_auxiliar
summary(modelo_auxiliar)
#Caso o p-value do parâmetro do lambda_poisson seja maior que 0.05,
#verifica-se a existência de equidispersão nos dados.
#Caso contrário, diagnostica-se a existência de superdispersão nos dados, fato
#que favorecerá a estimação de um modelo binomial negativo.
#Uma abordagem mais direta para a detecção da superdispersão pelo Teste de
#Cameron e Trivedi (1990) é por meio da utilização do algoritmo overdisp().
#Função overdisp do pacote overdisp
overdisp(x = corruption,
dependent.position = 3,
predictor.position = 4:6)
#Apenas para fins didáticos, caso considerássemos a estimação Poisson como a
#mais adequada, qual seria a quantidade média esperada de violações de trânsito
#para um país cujo corpo diplomático fosse composto por 23 membros, considerando
#o período anterior à vigência da lei e cujo índice de corrupção seja
#igual a 0.5?
predict(object = modelo_poisson,
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Qual seria a quantidade média esperada de violações de trânsito para o mesmo
#país, porém agora considerando a vigência da lei?
predict(object = modelo_poisson,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
################################################################################
# A DISTRIBUIÇÃO BINOMIAL NEGATIVA - PARTE CONCEITUAL #
################################################################################
#Criando uma função da distribuição binomial negativa, com theta=2 e delta=2
#theta: parâmetro de forma da distribuição Poisson-Gama (binomial negativa)
#delta: parâmetro de taxa de decaimento da distribuição Poisson-Gama
bneg_theta2_delta2 <- function(m){
theta <- 2
delta <- 2
((delta ^ theta) * (m ^ (theta - 1)) * (exp(-m * delta))) / factorial(theta - 1)
}
#Criando uma função da distribuição binomial negativa, com theta=3 e delta=1
bneg_theta3_delta1 <- function(m){
theta <- 3
delta <- 1
((delta ^ theta) * (m ^ (theta - 1)) * (exp(-m * delta))) / factorial(theta - 1)
}
#Criando uma função da distribuição binomial negativa, com theta=3 e delta=0,5
bneg_theta3_delta05 <- function(m){
theta <- 3
delta <- 0.5
((delta ^ theta) * (m ^ (theta - 1)) * (exp(-m * delta))) / factorial(theta - 1)
}
#Plotagem das funções estabelecidas anteriormente
data.frame(m = 1:20) %>%
ggplot(aes(x = m)) +
stat_function(fun = bneg_theta2_delta2,
aes(color = "Theta igual a 2 e Delta igual a 2"),
size = 1.5) +
stat_function(fun = bneg_theta3_delta1,
aes(color = "Theta igual a 3 e Delta igual a 1"),
size = 1.5) +
stat_function(fun = bneg_theta3_delta05,
aes(color = "Theta igual a 3 e Delta igual a 0,5"),
size = 1.5) +
scale_color_viridis_d("Valores de " ~ theta ~ "e " ~ delta ~ "") +
labs(y = "Probabilidades", x = "m") +
theme_bw()
################################################################################
# ESTIMAÇÃO DO MODELO BINOMIAL NEGATIVO #
################################################################################
#Estimação do modelo binomial negativo pela função glm.nb do pacote MASS
#Modelo Binomial Negativo do Tipo 2 (NB2)
modelo_bneg <- glm.nb(formula = violations ~ staff + post + corruption,
data = corruption)
#Parâmetros do modelo_bneg
summary(modelo_bneg)
#Parâmetro de forma da distribuição binomial negativa
1 / modelo_bneg$theta #phi
modelo_bneg$theta
#Estatística z de Wald do parâmetro theta para verificação da
#significância estatística
modelo_bneg$theta / modelo_bneg$SE.theta #maior que 1.96
#Extração do valor de Log-Likelihood (LL)
logLik(modelo_bneg)
#Parâmetros do modelo_bneg
summ(modelo_bneg, digits = 4, confint = T, ci.width = 0.95)
export_summs(modelo_bneg, scale = F, digits = 4)
#Comparando os modelos Poisson e Binomial Negativo
#modelo_poisson: linha 144 deste script!
export_summs(modelo_poisson, modelo_bneg, scale = F, digits = 4,
model.names = c("POISSON","BNEG"))
data.frame(LL_Poisson = round(logLik(modelo_poisson), 1),
LL_Bneg = round(logLik(modelo_bneg), 1)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped", position = "center",
full_width = F,
font_size = 30)
#Likelihoo-ratio test
lrtest(modelo_poisson, modelo_bneg)
#Gráfico para a comparação dos LL dos modelos Poisson e Binomial Negativo
my_plot <-
data.frame(Poisson = logLik(modelo_poisson),
BNeg = logLik(modelo_bneg)) %>%
melt() %>%
ggplot(aes(x = variable, y = value)) +
geom_bar(aes(fill = factor(variable)),
stat = "identity",
color = "black") +
geom_text(aes(label = round(value, digits = 3)),
color = "black",
size = 4,
vjust = -0.5,
angle = 90) +
scale_fill_manual("Legenda:", values = c("#440154FF", "orange")) +
coord_flip() +
labs(x = "Estimação",
y = "Log-Likelihood") +
theme_cowplot()
my_plot
#Com JPEG
ggdraw() +
draw_image("https://cdn.pixabay.com/photo/2016/08/21/18/48/emoticon-1610518_960_720.png",
x = -0.12, y = 0.23, scale = .43) +
draw_plot(my_plot)
#COMPARAÇÕES ENTRE AS PREVISÕES:
#Qual seria a quantidade média esperada de violações de trânsito para um país
#cujo corpo diplomático seja composto por 23 membros, considerando o período
#anterior à vigência da lei e cujo índice de corrupção seja igual 0.5?
#Modelo Poisson:
predict(object = modelo_poisson, #linha 144 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg,
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Qual seria a quantidade média esperada de violações de trânsito para o mesmo
#país, porém agora considerando a vigência da lei?
#Modelo Poisson:
predict(object = modelo_poisson,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Adicionando os fitted values dos modelos estimados até o momento, para fins de
#comparação:
corruption %>%
mutate(fitted_poisson = modelo_poisson$fitted.values,
fitted_bneg = modelo_bneg$fitted.values) %>%
dplyr::select(country, code, violations, fitted_poisson,
fitted_bneg) %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = F,
font_size = 19)
#Fitted values dos modelos POISSON e BINOMIAL NEGATIVO, considerando,
#para fins didáticos, apenas a variável preditora 'staff':
corruption %>%
ggplot() +
geom_point(aes(x = staff, y = violations), alpha = 0.5, size = 2) +
geom_smooth(aes(x = staff, y = modelo_poisson$fitted.values,
color = "POISSON"), se = F, size = 1.5) +
geom_smooth(aes(x = staff, y = modelo_bneg$fitted.values,
color = "BNEG"), se = F, size = 1.5) +
scale_color_manual("Estimação:",
values = c("orange", "#440154FF")) +
labs(x = "Number of Diplomats (staff)",
y = "Unpaid Parking Violations (violations)") +
theme(panel.background = element_rect("white"),
panel.grid = element_line("grey95"),
panel.border = element_rect(NA),
legend.position = "bottom")
################################################################################
# ESTIMAÇÕES MUITO PRÓXIMAS PARA POISSON E BNEG SEM SUPERDISPERSÃO! #
################################################################################
#Para fins didáticos, vamos gerar novo dataset 'corruption2', com quantidades
#de violações de trânsito iguais, no máximo, a 3. Este procedimento poderá,
#eventualmente, eliminar o fenômeno da superdispersão nos dados da variável
#dependente e, consequentemente, tornar as estimações dos modelos POISSON e
#BINOMIAL NEGATIVO praticamente iguais.
#Gerando novo dataset 'corruption2' com violations <= 3
corruption2 <- corruption[which(corruption$violations <= 3),1:6]
#Histograma da variável dependente 'violations' no dataset 'corruption2'
ggplotly(
corruption2 %>%
ggplot(aes(x = violations,
fill = ..count..)) +
geom_histogram(bins = 4,
color = "black") +
scale_fill_gradient("Contagem",
low = "#440154FF",
high = "#FDE725FF") +
labs(x = "Quantidade de violações de trânsito",
y = "Frequência") +
theme_bw()
)
#Diagnóstico preliminar para observação de eventual igualdade entre a média e
#a variância da variável dependente 'violations' no dataset 'corruption2'
corruption2 %>%
summarise(Média = mean(violations),
Variância = var(violations)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = T,
font_size = 30)
#Estimação do modelo_poisson2
modelo_poisson2 <- glm(formula = violations ~ staff + post + corruption,
data = corruption2,
family = "poisson")
#Parâmetros do modelo_poisson2
summary(modelo_poisson2)
#Teste de superdispersão no dataset 'corruption2'
overdisp(x = corruption2,
dependent.position = 3,
predictor.position = 4:6)
#Estimação do modelo_bneg2
modelo_bneg2 <- glm.nb(formula = violations ~ staff + post + corruption,
data = corruption2)
#Parâmetros do modelo_bneg2
summary(modelo_bneg2)
#Significância estatística do parâmetro de forma da distribuição
#binomial negativa para o modelo_bneg2
modelo_bneg2$theta / modelo_bneg2$SE.theta #menor que 1.96
#Comparando os parâmetros e os valores de LL de modelo_poisson2 e modelo_bneg2
export_summs(modelo_poisson2, modelo_bneg2, scale = F, digits = 4,
model.names = c("POISSON2","BNEG2"))
data.frame(LL_Poisson2 = round(logLik(modelo_poisson2), 1),
LL_Bneg2 = round(logLik(modelo_bneg2), 1)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped", position = "center",
full_width = F,
font_size = 30)
#Likelihoo-ratio test para a comparação entre modelo_poisson2 e modelo_bneg2
lrtest(modelo_poisson2, modelo_bneg2)
################################################################################
# A DISTRIBUIÇÃO ZERO-INFLATED POISSON (ZIP) - PARTE CONCEITUAL #
################################################################################
#LAMBERT, D. Zero-inflated Poisson regression, with an application to defects
#in manufacturing. Technometrics, v. 34, n. 1, p. 1-14, 1992.
#Exemplo de uma função da distribuição ZI Poisson, com lambda = 1 e plogit = 0,7
zip_lambda1_plogit07 <- function(m){
lambda <- 1
plogit <- 0.7
ifelse(m == 0,
yes = (plogit) + ((1 - plogit) * exp(-lambda)),
no = (1 - plogit) * ((exp(-lambda) * lambda ^ m) / factorial(m)))
}
#Comparando as distribuições Poisson, BNeg e ZIP
data.frame(m = 0:20) %>%
ggplot(aes(x = m)) +
stat_function(fun = poisson_lambda1, size = 0.7,
aes(color = "Poisson - Lambda igual a 1")) +
stat_function(fun = poisson_lambda4, size = 0.7,
aes(color = "Poisson - Lambda igual a 4")) +
stat_function(fun = poisson_lambda10, size = 0.7,
aes(color = "Poisson - Lambda igual a 10")) +
stat_function(fun = bneg_theta2_delta2, size = 0.7,
aes(color = "BNeg - Theta igual a 2 e Delta igual a 2")) +
stat_function(fun = bneg_theta3_delta1, size = 0.7,
aes(color = "BNeg - Theta igual a 3 e Delta igual a 1")) +
stat_function(fun = bneg_theta3_delta05, size = 0.7,
aes(color = "BNeg - Theta igual a 3 e Delta igual a 0,5")) +
stat_function(fun = zip_lambda1_plogit07, size = 1.5,
aes(color = "ZIP - Lambda igual a 1 e plogit igual a 0,7")) +
scale_color_viridis_d("Distribuição:") +
labs(y = "Probabilidade", x = "m") +
theme_bw()
################################################################################
# ESTIMAÇÃO DO MODELO ZERO-INFLATED POISSON (ZIP) #
################################################################################
#VOLTANDO AO DATASET 'corruption'
#Estimação do modelo ZIP pela função zeroinfl do pacote pscl
modelo_zip <- zeroinfl(formula = violations ~ corruption + post + staff
| corruption,
data = corruption,
dist = "poisson")
#Parâmetros e LL do modelo_zip
summary(modelo_zip)
logLik(modelo_zip)
#Teste de Vuong:
#VUONG, Q. H. Likelihood ratio tests for model selection and non-nested
#hypotheses. Econometrica, v. 57, n. 2, p. 307-333, 1989.
vuong(m1 = modelo_poisson, #linha 144 deste script
m2 = modelo_zip)
#Comparando os LL dos modelos Poisson e ZIP
data.frame(LL_Poisson = round(logLik(modelo_poisson), 1),
LL_ZIP = round(logLik(modelo_zip), 1)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped", position = "center",
full_width = F,
font_size = 30)
#Likelihoo-ratio test
lrtest(modelo_poisson, modelo_zip)
data.frame(Poisson = logLik(modelo_poisson),
ZIP = logLik(modelo_zip),
BNeg = logLik(modelo_bneg)) %>%
melt() %>%
ggplot(aes(x = variable, y = value)) +
geom_bar(aes(fill = factor(variable)),
stat = "identity",
color = "black") +
geom_text(aes(label = format(value, digts = 3)),
color = "black",
size = 4,
vjust = -0.5,
angle = 90) +
scale_fill_manual("Legenda:", values = c("#440154FF", "#453781FF", "orange")) +
coord_flip() +
labs(x = "Estimação",
y = "Log-Likelihood") +
theme_bw()
#COMPARAÇÕES ENTRE AS PREVISÕES:
#Supondo que considerássemos a estimação ZIP como a mais adequada, qual seria a
#quantidade média esperada de violações de trânsito para um país cujo corpo
#diplomático seja composto por 23 membros, considerando o período anterior à
#vigência da lei e cujo índice de corrupção seja igual a 0.5?
#Modelo Poisson:
predict(object = modelo_poisson, #linha 144 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg, #linha 275 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo ZIP:
predict(object = modelo_zip,
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Qual seria a quantidade média esperada de violações de trânsito para o mesmo
#país ao se considerar o início da vigência da lei?
#Modelo Poisson:
predict(object = modelo_poisson,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo ZIP:
predict(object = modelo_zip,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
################################################################################
# A DISTRIBUIÇÃO ZERO-INFLATED BINOMIAL NEGATIVA (ZINB) - PARTE CONCEITUAL #
################################################################################
#Exemplo de uma função da distribuição ZI Binomial Negativa, com theta = 2,
#delta = 2, plogit = 0,7 e lambda_bneg = 2
zinb_theta2_delta2_plogit07_lambda2 <- function(m){
theta <- 2
delta <- 2
plogit <- 0.7
lambda_bneg <- 2
ifelse(m == 0,
yes = (plogit) + ((1 - plogit) * (((1) / (1 + 1/theta * lambda_bneg)) ^ theta)),
no = (1 - plogit) * ((delta ^ theta) * (m ^ (theta - 1)) *
(exp(-m * delta))) / factorial(theta - 1))
}
#Comparando as distribuições Poisson, BNeg, ZIP e ZINB
data.frame(m = 0:20) %>%
ggplot(aes(x = m)) +
stat_function(fun = poisson_lambda1, size = 0.7,
aes(color = "Poisson - Lambda igual a 1")) +
stat_function(fun = poisson_lambda4, size = 0.7,
aes(color = "Poisson - Lambda igual a 4")) +
stat_function(fun = poisson_lambda10, size = 0.7,
aes(color = "Poisson - Lambda igual a 10")) +
stat_function(fun = bneg_theta2_delta2, size = 0.7,
aes(color = "BNeg - Theta igual a 2 e Delta igual a 2")) +
stat_function(fun = bneg_theta3_delta1, size = 0.7,
aes(color = "BNeg - Theta igual a 3 e Delta igual a 1")) +
stat_function(fun = bneg_theta3_delta05, size = 0.7,
aes(color = "BNeg - Theta igual a 3 e Delta igual a 0,5")) +
stat_function(fun = zip_lambda1_plogit07, size = 0.7,
aes(color = "ZI Poisson - Lambda igual a 1 e plogit igual a 0,7")) +
stat_function(fun = zinb_theta2_delta2_plogit07_lambda2, size = 1.5,
aes(color = "ZINB - Theta igual a 2, Delta igual a 2 e plogit igual a 0,7")) +
scale_color_viridis_d("Distribuição:") +
labs(y = "Probabilidade", x = "m") +
theme_bw()
################################################################################
# ESTIMAÇÃO DO MODELO ZERO-INFLATED BINOMIAL NEGATIVO (ZINB) #
################################################################################
#Estimação do modelo ZINB pela função zeroinfl do pacote pscl
modelo_zinb <- zeroinfl(formula = violations ~ corruption + post + staff
| corruption,
data = corruption,
dist = "negbin")
#Parâmetros e LL do modelo_zinb
summary(modelo_zinb)
logLik(modelo_zinb)
modelo_zinb$theta
1/modelo_zinb$theta #phi
#Teste de Vuong (1989)
vuong(m1 = modelo_bneg, #linha 275 deste script
m2 = modelo_zinb)
#Comparando os LL dos modelos Bneg e ZINB
data.frame(LL_Bneg = round(logLik(modelo_bneg), 2),
LL_ZINB = round(logLik(modelo_zinb), 2)) %>%
kable() %>%
kable_styling(bootstrap_options = "striped", position = "center",
full_width = F,
font_size = 30)
#Likelihoo-ratio test
lrtest(modelo_bneg, modelo_zinb)
my_plot2 <-
data.frame(Poisson = logLik(modelo_poisson),
ZIP = logLik(modelo_zip),
Bneg = logLik(modelo_bneg),
ZINB = logLik(modelo_zinb)) %>%
melt() %>%
ggplot(aes(x = variable, y = value)) +
geom_bar(aes(fill = factor(variable)),
stat = "identity",
color = "black") +
geom_text(aes(label = format(value, digts = 3)),
color = "black",
size = 3.5,
vjust = -0.5,
angle = 90) +
scale_fill_manual("Legenda:", values = c("#440154FF", "#453781FF",
"orange", "#FDE725FF")) +
coord_flip() +
labs(x = "Estimação",
y = "Log-Likelihood") +
theme_cowplot()
my_plot2
#Com JPEG
ggdraw() +
draw_image("https://i.pinimg.com/originals/4a/ac/99/4aac9978c444c55cd462fd92c8ac400e.png",
x = -0.07, y = 0.244, scale = .43) +
draw_plot(my_plot2)
beep("mario")
#COMPARAÇÕES ENTRE AS PREVISÕES:
#Supondo que considerássemos a estimação ZINB como a mais adequada, qual seria a
#quantidade média esperada de violações de trânsito para um país cujo corpo
#diplomático seja composto por 23 membros, considerando o período anterior à
#vigência da lei e cujo índice de corrupção seja igual a 0.5?
#Modelo Poisson:
predict(object = modelo_poisson, #linha 144 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg, #linha 275 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo ZIP:
predict(object = modelo_zip, #linha 447 deste script
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Modelo ZINB:
predict(object = modelo_zinb,
newdata = data.frame(staff = 23,
post = "no",
corruption = 0.5),
type = "response")
#Qual seria a quantidade média esperada de violações de trânsito para o mesmo
#país, porém agora considerando a vigência da lei?
#Modelo Poisson:
predict(object = modelo_poisson,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo Binomial Negativo:
predict(object = modelo_bneg,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo ZIP:
predict(object = modelo_zip,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Modelo ZINB:
predict(object = modelo_zinb,
newdata = data.frame(staff = 23,
post = "yes",
corruption = 0.5),
type = "response")
#Adicionando os fitted values dos modelos estimados para fins de comparação
corruption %>%
mutate(fitted_poisson = modelo_poisson$fitted.values,
fitted_bneg = modelo_bneg$fitted.values,
fitted_zip = modelo_zip$fitted.values,
fitted_zinb = modelo_zinb$fitted.values) %>%
dplyr::select(country, code, violations, fitted_poisson,
fitted_bneg, fitted_zip, fitted_zinb) %>%
kable() %>%
kable_styling(bootstrap_options = "striped",
full_width = T,
font_size = 13)
#Fitted values dos modelos POISSON, BNEG, ZIP e ZINB, considerando, para fins
#didáticos, a variável dependente 'violations' em função apenas da variável
#preditora 'staff'
ggplotly(
corruption %>%
ggplot() +
geom_point(aes(x = staff, y = violations), alpha = 0.5, size = 2) +
geom_smooth(aes(x = staff, y = modelo_poisson$fitted.values,
color = "POISSON"), se = F) +
geom_smooth(aes(x = staff, y = modelo_bneg$fitted.values,
color = "BNEG"), se = F) +
geom_smooth(aes(x = staff, y = modelo_zip$fitted.values,
color = "ZIP"), se = F) +
geom_smooth(aes(x = staff, y = modelo_zinb$fitted.values,
color = "ZINB"), se = F) +
scale_color_manual("Estimação:",
values = c("orange", "#440154FF", "#FDE725FF", "#453781FF")) +
labs(x = "Number of Diplomats (staff)",
y = "Unpaid Parking Violations (violations)") +
theme(panel.background = element_rect("white"),
panel.grid = element_line("grey95"),
panel.border = element_rect(NA),
legend.position = "bottom")
)
####################################### FIM ####################################
|
e9f8e547aa21d42e23f450a7ad728d5c3f0287bd | 0f644a0675c39d23f6348df4a526c7877f4c8b92 | /man/square_lookup.Rd | 8901351fa6f9aaff249ae80cf3f25a916345f105 | [
"MIT"
] | permissive | MatthewJWhittle/osgridref | 30cab8c2b013b6ab3d66f956b15953f148f04189 | cbdee65b45097f10d29a22cf8e7989e5b3ae5556 | refs/heads/master | 2022-11-14T18:37:11.910337 | 2020-07-10T10:38:05 | 2020-07-10T10:38:05 | 268,344,191 | 0 | 0 | null | 2020-06-01T07:05:27 | 2020-05-31T18:57:03 | R | UTF-8 | R | false | true | 564 | rd | square_lookup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/square_lookup.R
\docType{data}
\name{square_lookup}
\alias{square_lookup}
\title{Square coordinates of the British National Grid}
\format{
A data frame with 104 rows and 3 variables:
\describe{
\item{x}{xmin of grid square}
\item{y}{ymin of grid square}
\item{square_letters}{the two letter code for each grid square}
}
}
\usage{
square_lookup
}
\description{
A dataset containing the letter codes, xmin and ymin coordinates of squares on the british naitonal grid
}
\keyword{datasets}
|
7150fec729f87060bf2480a72f6854313851ae23 | 4ff5c316a1b88fc5856bb0ad2bb78060ffc90597 | /man/cv_entries.Rd | cecf4cb3785b55812cd27edccfd7677a9d4022f3 | [] | no_license | erex/vitae-etaremune | 874225212c8ba529f36884e68cc27d44bdba9a7c | 1cee6f6367e6b0fd5117b0cd20a190416130bcbb | refs/heads/master | 2020-04-17T08:03:18.600693 | 2019-01-18T11:46:57 | 2019-01-18T11:46:57 | 166,395,823 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,258 | rd | cv_entries.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brief.R, R/detailed.R
\name{brief_entries}
\alias{brief_entries}
\alias{cv_entries}
\alias{detailed_entries}
\title{CV entries}
\usage{
brief_entries(data, what, when, with, .protect = TRUE)
detailed_entries(data, what, when, with, where, why, .protect = TRUE)
}
\arguments{
\item{data}{A \code{data.frame} or \code{tibble}.}
\item{what}{The primary value of the entry (such as workplace title or degree).}
\item{when}{The time of the entry (such as the period spent in the role).}
\item{with}{The company or organisation.}
\item{.protect}{When TRUE, inputs to the previous arguments will be protected
from being parsed as LaTeX code.}
\item{where}{The location of the entry.}
\item{why}{Any additional information, to be included as dot points. Each
entry for why is provided in long form (where the what, when, with, and where
is duplicated)}
}
\description{
This function accepts a data object (such as a tibble) and formats the output
into a suitable format for the template used. The inputs can also involve
further calculations, which will be done using the provided data.
}
\details{
All non-data inputs are optional, and will result in an empty space if omitted.
}
|
782a7b62abcd0706925030c246f6969558591d85 | bae91e2a131a521f72b953682c0e2c0255d8905e | /plots_files/table_ics.R | ecb76e765c6eba8848d1440d5f2dbe93e695805e | [] | no_license | McKers/hidiTS_backup | fac9785997aca0f03f103b37f85ec15c39d0321d | b0c844cbddd43f9525abd5803cbb18c11ff8b750 | refs/heads/main | 2023-03-06T20:38:32.469603 | 2021-02-25T19:24:38 | 2021-02-25T19:24:38 | 342,270,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,267 | r | table_ics.R | #Latex table MSES
library(xtable)
load("simulated_data/raspberry_paper_medium_lambda_ext.RData")
sim_data_medium <- simulated_data[!duplicated(simulated_data[c('n','T')]),]
ns <- c(5, 10, 30, 50, 100,300)
Ts <- c(7, 15, 30, 50, 100, 300)
data_medium <- sim_data_medium[ which(sim_data_medium$T %in% Ts
& sim_data_medium$n %in% ns), ]
ics_medium <- data_medium[,c('pca_bai_right', 'pca_bic_right', 'pca_bic_T_right', 'pca_bic_nT_right')]
names(ics_medium) <- c('BNIC_med', 'BIC_n_med', 'BIC_T_med', 'BIC_nT_med')
load("simulated_data/raspberry_paper_small_lambda_ext.RData")
sim_data_small <- simulated_data[!duplicated(simulated_data[c('n','T')]),]
data_small <- sim_data_small[ which(sim_data_small$T %in% Ts
& sim_data_small$n %in% ns), ]
ics_small<- data_small[,c('n', 'T','pca_bai_right', 'pca_bic_right', 'pca_bic_T_right', 'pca_bic_nT_right')]
names(ics_small) <- c('n', 'T','BNIC_small', 'BIC_n_small', 'BIC_T_small', 'BIC_nT_small')
data_final <- cbind(ics_small, ics_medium)
data_final['n'] <- as.integer(data_final[,'n'])
data_final['T'] <- as.integer(data_final[,'T'])
print(xtable(data_final, type = "latex", digits=c(0,0,0,2,2,2,2,2, 2,2,2)), include.rownames=FALSE)
|
494729b33391ca92aef5b53f373d555cbdb396e1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/simPH/examples/coxsimPoly.Rd.R | b70a43321c54233b8831dd6b3e8bb105e994c248 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,035 | r | coxsimPoly.Rd.R | library(simPH)
### Name: coxsimPoly
### Title: Simulate quantities of interest for a range of values for a
### polynomial nonlinear effect from Cox Proportional Hazards models
### Aliases: coxsimPoly
### ** Examples
# Load Carpenter (2002) data
data("CarpenterFdaData")
# Load survival package
library(survival)
# Run basic model
M1 <- coxph(Surv(acttime, censor) ~ prevgenx + lethal + deathrt1 +
acutediz + hosp01 + hhosleng + mandiz01 + femdiz01 +
peddiz01 + orphdum + natreg + I(natreg^2) +
I(natreg^3) + vandavg3 + wpnoavg3 +
condavg3 + orderent + stafcder, data = CarpenterFdaData)
# Simulate simpoly First Difference
Sim1 <- coxsimPoly(M1, b = "natreg", qi = "First Difference",
pow = 3, Xj = seq(1, 150, by = 5), nsim = 100)
## Not run:
##D # Simulate simpoly Hazard Ratio with spin probibility interval
##D Sim2 <- coxsimPoly(M1, b = "natreg", qi = "Hazard Ratio",
##D pow = 3, Xj = seq(1, 150, by = 5), spin = TRUE)
## End(Not run)
|
83bcd4430fcd3c0d6a515eb0ac1893d3b0a87d25 | 5e85df6e3edead3eca4a2a4730f1705d1228c23d | /unsorted_code/funzioni erpR altre/average.mean_ver2.R | 1a548a243be1620adf1a5128706d1dca9b4feb72 | [
"MIT"
] | permissive | giorgioarcara/R-code-Misc | 125ff2a20531b2fbbc9536554042003b4e121766 | decb68d1120e43df8fed29859062b6a8bc752d1d | refs/heads/master | 2022-11-06T14:44:02.618731 | 2022-10-26T07:34:09 | 2022-10-26T07:34:09 | 100,048,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 887 | r | average.mean_ver2.R | ### INCOMPLETA!!!
average.mean <-
# in questa funzione non uso più base e numbers, ma obj. Non l'ho più sviluppata perché creava casino di consistenza tra soggetti.
function(obj, win.ini, win.end, Subject.n=F, env=.GlobalEnv, startmsec=-200, endmsec=1200)
{
datall=NULL
for (i in 1:length(obj))
{
average.temp=eval(parse(file="", text=obj[i]), env=env)
Subject_name=comment(eval(parse(file="", text=obj[i]), env=env))
average.temp=colMeans(average.temp[round(msectopoints(win.ini,dim(average.temp)[1],startmsec, endmsec)):round(msectopoints(win.end,dim(average.temp)[1],startmsec, endmsec)),])
average.temp=data.frame(t(average.temp))
if (Subject.n==TRUE)
{
average.temp$Subject.n=i
}
average.temp$Subject_name=Subject_name
average.temp$Subject=obj[i]
datall=rbind(datall, average.temp)
}
rownames(datall)=1:dim(datall)[1]
return(datall)
}
|
03f63e713ec71bbb322968f838c1c43c9985f287 | 9159536fcc484215c0e89a16e6d177e67a940db8 | /3st Semester/Statistical Consulting1/통계의뢰1,2/2/[통계상담] 작업 문서/ing.R | 9963f507628e0cc6f1746382e4149d7312fa4713 | [] | no_license | ehdrb5011992/Statistic | 6410cdff87e3a1672d1ac0accb58fa1207ea899f | 9475d33552d92eb78905919703924247c6acf4d3 | refs/heads/master | 2021-07-25T07:23:45.566270 | 2020-09-28T14:30:16 | 2020-09-28T14:30:16 | 220,902,621 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 13,754 | r | ing.R | # https://every-day-life.tistory.com/29 참ㄱ
library(tidyverse)
library(survival)
library(survminer)
library(randomForestSRC)
library(clinfun)
# 1. data 가볍게 전처리 ########################################
x= read.csv('C:\\Users\\82104\\Desktop\\600.csv',header=T)
head(x)
name <- colnames(x)
colnames(x) <- c("a","b","c","d","e","f","g","h","i","j",
"k","l","m","n","o","p","q","r","s","t","u","v","w")
# a 나이 / b 성별 (1:남자, 2:여자) / c 결혼 (1:기혼, 2:미혼) /
# d 종교 (1:유, 2:무) / e 학위 (1:3년제이하, 2:4년제이상) /
# f 부서 (1:내과, 2:외과) / g 이직계획유무 (1:유, 2:무) /
# h 건강상태 (1:좋음, 2:나쁨)/ i 근무지역 (1:수도권, 2:지방) /
# j 공감_tot / k 공감_com / l 공감_sen / m 공감_ins /
# n 이직여부(안씀) / o 이직,퇴직여부(0:censoring 1:event) /
# p 임상경력(일)(안씀) / q 최종임상경력(일)(안씀) /
# r 최종임상경력(월) (생존시간) / s 최종임상경력(년) 안씀 /
# t 병동경력(일)(안씀) / u 년차_최종 (1:3년차이하, 2:3~6 , 3:6년차 초과) /
# v 연차별시작문항응답(안씀) / w 연차별최종문항응답(안씀)
x$b <- as.factor(x$b)
x$c <- as.factor(x$c)
x$d <- as.factor(x$d)
x$e <- as.factor(x$e)
x$f <- as.factor(x$f)
x$g <- as.factor(x$g)
x$h <- as.factor(x$h)
x$i <- as.factor(x$i)
x$u <- as.factor(x$u)
# 나이 그룹 2개로 나누기
# 20대 : 1 , 30대이상 : 2
x$age <- 2
for (i in 1:nrow(x)){
if (x$a[i] < 30) {
x$age[i] <- 1
}
}
x$age <- as.factor(x$age)
# 공감역량 4개 분포 (전부 정규분포)
with(x,hist(k)) # communication
with(x,hist(l)) # sensitivity
with(x,hist(m)) # inspiration
with(x,hist(j)) # total
# 3개로 그룹화. 낮음, 보통, 높음
# 이 범주형 변수화는 왠지 효과는 없는 듯 하다.
for (i in 1:nrow(x)){
if (x$k[i]<3){
x$trt_k[i] <- 1
} else if(x$k[i]<4){
x$trt_k[i] <- 2
} else{
x$trt_k[i] <- 3
}
if (x$l[i]<3){
x$trt_l[i] <- 1
} else if(x$l[i]<4){
x$trt_l[i] <- 2
} else{
x$trt_l[i] <- 3
}
if (x$m[i]<3){
x$trt_m[i] <- 1
} else if(x$m[i]<4){
x$trt_m[i] <- 2
} else{
x$trt_m[i] <- 3
}
if (x$j[i]<3){
x$trt_j[i] <- 1
} else if(x$k[i]<4){
x$trt_j[i] <- 2
} else{
x$trt_j[i] <- 3
}
}
x$trt_k <- as.factor(x$trt_k)
x$trt_l <- as.factor(x$trt_l)
x$trt_m <- as.factor(x$trt_m)
x$trt_j <- as.factor(x$trt_j)
# 1. total
x1 <- x
# 2. 3년차 미만
x2 <- x[x$u == 1,]
# 3. 3년차 이상 6년차 미만
x3 <- x[x$u == 2,]
# 4. 6년차 이상
x4 <- x[x$u == 3,]
################################################################
# class가 2개라면, 1 대비 2가 exp(회귀계수)만큼 위험함.
# (ex.변수 c 1:기혼 2:미혼 -> 기혼대비 미혼이 exp(0.36514) = 1.44배 위험함. )
# 다르게 해석하고 싶다면 x$c <- relevel(x$c, ref = "patchOnly") 후, 다시 돌리면 됨.
fit.coxph <- coxph(Surv(r,o)~a+c+e+g+h+k+l+m, data=x,ties='breslow')
summary(fit.coxph)
with(x,hist(r))
with(x,table(o,r))
with(x,hist(a)) # 나이의 분포
# matritngale residual (연속형변수 처리방법)
# 그룹을 나이로 보고, 위험도를 살펴봄. 여기서 연속형변수를 어떻게 범주화시킬지 고려 가능
# 지금과 같은 경우, 20대 / 20대 아닌사람들 이렇게 나누기 합당해보임.
m1 <- coxph(Surv(r,o)~1,data = x)
x$resid <- residuals(m1, type = "martingale")
x %>% ggplot(aes(a,resid))+
geom_point()+
geom_smooth()+
theme_classic()
# 나이 그룹나눠서 해본것 (안좋음)
fit.coxph <- coxph(Surv(r,o)~age+c+e+g+h+k+l+m, data=x,ties='breslow')
# 그룹화 해서 해본것 / trt_k,trt_l,trt_m 적용
# x1,x2,x3,x4 dataset
# x1
fit.coxph <- coxph(Surv(r,o)~a+c+e+g+h+k+l+m, data=x1,ties='breslow') # age ,trt조합 다 안좋음.
summary(fit.coxph)
# x2
fit.coxph <- coxph(Surv(r,o)~g+k+l+m, data=x2,ties='breslow') # age ,trt조합 다 안좋음.
summary(fit.coxph)
# x3
fit.coxph <- coxph(Surv(r,o)~b+g+k+l+m, data=x3,ties='breslow') # age ,trt조합 오히려 더 안좋음.
summary(fit.coxph)
# x4
fit.coxph <- coxph(Surv(r,o)~g+h+k+l+m, data=x3,ties='breslow') # age ,trt조합 오히려 더 안좋음.
summary(fit.coxph)
###################### 비례위험 확인 part ######################
# 1. Kaplan curve 를 이용한 비례위험 확인
m1 <- survfit(Surv(r,o)~b,data=x)
plot(m1, col=c("red","blue"))
#2. Scheonfeld method 통계량 & plot 확인 (기울기=0 검정)
# 변수마다 다 봐야함. 비례위험을 만족하는지 안하는지
# a,b,c,d,e,f,g,h,i (나이, 성별, 결혼, 종교,학위, 부서, 이직계획유무, 건강상태, 근무지)
# 굉장히 볼게 많다. 그룹 3개별, 각 변수별 다 살펴봐야함 (비례위험을 만족하지 않은 변수에 대해)
m2 <- coxph(Surv(r,o)~c,data = x)
summary(m2)
cox.zph(m2)
plot(cox.zph(m2))
abline(h=0, col="red", lty=2)
abline(h=0.7055,col="blue")
#3. time dependent covariate 투입
m5 <- coxph(Surv(r,o)~c+tt(c),data=x) # 유의하기 때문에 , tt(c) 항목을 남겨놓으면 됨.
m5
# Scheonfeld residual을 이용한 방법에 비해 신뢰도가 떨어지며,
# 좀 더 정확하게는 time, log(time), rank(time) 등, time을 변형한 수치도 투입해서 확인해야 한다는 점임.
# (위의 예제는 time transfer function(=tt)에 아무 옵션을 주지 않았기 때문에 rank(time)이 기본치이다.)
################################################################
################# non proportional hazard 교정 #################
# 1. step function (연속형일때) - 자르는거임.
m2 <- coxph(Surv(r,o)~a,data = x)
summary(m2)
cox.zph(m2)
plot(cox.zph(m2))
abline(h=0, col="red", lty=2)
abline(h=-0.1053,col="blue")
sub_x <- survSplit(Surv(r,o)~.,data=x,cut=c(90,100),episode = "tgroup")
head(x)
head(sub_x)
table(sub_x$tgroup)
m1 <- coxph(Surv(r,o)~a,data=x)
m1
cox.zph(m1)
m2 <- coxph(Surv(time = tstart,time2 = r,event = o)~a:strata(tgroup),data=sub_x)
m2
cox.zph(m2)
#2. time dependent covariate 투입 (연속형변수)
m1 <- coxph(Surv(r,o)~a+tt(a),data=x,tt=function(x,t, ...) x*t)
summary(m1)
m2 <- coxph(Surv(r,o)~a+tt(a),data=x,tt=function(x,t, ...) x*log(t))
#3. Stratified Cox 비례위험모형 (이게 아마 내가해야할 모형 , 범주형변수)
fit.coxph <- coxph(Surv(r,o) ~ a+strata(c)+e+g+h+k+l+m, data=x)
summary(fit.coxph)
fit.coxph1 <- coxph(Surv(r, o) ~ a+e+g+h+k+l+m, data=x[x$c== 1,])
summary(fit.coxph1)
fit.coxph2 <- coxph(Surv(r, o) ~ a+e+g+h+k+l+m, data=x[x$c== 2,])
summary(fit.coxph2)
m2 <- coxph(Surv(r,o)~a+strata(c),data = x)
cox.zph(m2)
plot(cox.zph(m2))
################################################################################
################################################################################
################################################################################
# 분석 start
?plot.coxph
# 1. 전체대상
# c(결혼) 변수를 층화(교호작용 고려)하여, 분석함.
# 이때, 변수는 univariate로 survival에 p<0.2 인 유의한 변수를 택했으며,
# 이 변수들 중, 비례위험 가정을 만족하지 않는 변수는 cox.zph함수를 통해 c(결혼) 만 있었기에, 층화시킴.(범주형)
# 그리고 결혼층은 다른 변수들과 교호작용이 있는 것으로 판단, 모형을 교호작용텀으로 적용하였음.
# cf) 나이는 연속형 변수고, 시간에 따라 변하지만, 고정된 공변량으로 처리한다고 함. 따라서 열외.
# fit.coxph <- coxph(Surv(r, o) ~ c, data = x1) ... fit.coxph <- coxph(Surv(r, o) ~ i, data = x1)
# summary(fit.coxph) ; cox.zph(fit.coxph)
fit.coxph <- coxph(Surv(r, o) ~ c, data = x1,ties='breslow')
summary(fit.coxph)
cox.zph(fit.coxph)
plot(cox.zph(fit.coxph) , ylab = 'Beta(t) for Marriage', xlab='Time (month)' , main = 'Mariage: Schoenfeld Individual Test (p= 0.001)')
abline(h=0, col="red", lty=2)
abline(h=0.7055,col="blue")
?plot
# 아래는 교호작용x, 단순 층화
fit.coxph0 <- coxph(Surv(r, o) ~ a+strata(c)+e+g+h+k+l+m, data=x1)
summary(fit.coxph0)
### 교호작용 선택모형 검정.
strata.mariage <- coxph(Surv(r, o) ~ a+strata(c)+e+g+h+k+l+m, data=x1)
strata.mariage
interaction.mariage <- coxph(Surv(r, o) ~ (a+e+g+h+k+l+m)*c - c + strata(c), data=x1)
interaction.mariage
anova(interaction.mariage, strata.mariage) # 유의하므로, 교호작용 모델 선택.
# 아래는 교호작용 고려 (이게 결과제시로 쓸 것.)
# 단, 여기서 교호작용이 있기 때문에, 추정된 계수값은 bias가 존재.
fit.coxph1 <- coxph(Surv(r, o) ~ a+e+g+h+k+l+m, data=x1[x1$c== 1,])
summary(fit.coxph1)
fit.coxph2 <- coxph(Surv(r, o) ~ a+e+g+h+k+l+m, data=x1[x1$c== 2,])
summary(fit.coxph2)
# 위 두개를 전부 해석해야함.
new_df <- with(x1[x1$c== 1,],
data.frame(a = rep(mean(a, na.rm = TRUE), 8),
e = c(1,1,1,1,0,0,0,0)+1,
g = c(1,1,0,0,1,1,0,0)+1,
h = c(1,0,1,0,1,0,1,0)+1,
k = rep(mean(k, na.rm = TRUE), 8),
l = rep(mean(l, na.rm = TRUE), 8),
m = rep(mean(m, na.rm = TRUE), 8)
)
)
new_df
####### cox regression 새로운 값이 주어졌을때 예측 그래프. #######
#### survival graph fit.coxph1
# 공감_ins 점수 1(strata=1),3(strata=2),5(strata=3) 별 그림임.
# 나이 : 평균, 공감_com : 평균 , 공감_sen : 평균, e학위 : 4년제이상 , g 이직계획유무: 없음, h 건강상태: 나쁨
# example
new_df <- with(x1[x1$c== 1,],
data.frame(a = rep(mean(a, na.rm = TRUE), 3),
e = as.factor(c(1,1,1)+1),
g = as.factor(c(1,1,1)+1),
h = as.factor(c(1,1,1)+1),
k = rep(mean(k, na.rm = TRUE), 3),
l = rep(mean(l, na.rm = TRUE), 3),
m = c(1,3,5)
)
)
new_df
# Survival curves with new data
library(survminer)
fit <- survfit(fit.coxph1, newdata = new_df)
ggsurvplot(fit, conf.int = F,
censor = FALSE, surv.median.line = "hv",data=x1[x1$c== 1,],legend.title="",
legend.labs=c("Com_ins_1","Com_ins_3","Com_ins_5"),linetype=c(1,2,3))
#### survival graph fit.coxph2
# example
# 공감_sen 점수 1(strata=1),3(strata=2),5(strata=3) 별 그림임.
# 나이 : 평균, 공감_com : 평균 , 공감_ins : 평균, e학위 : 4년제이상 , g 이직계획유무: 없음, h 건강상태: 나쁨
new_df <- with(x1[x1$c== 2,],
data.frame(a = rep(mean(a, na.rm = TRUE), 3),
e = as.factor(c(1,1,1)+1),
g = as.factor(c(1,1,1)+1),
h = as.factor(c(1,1,1)+1),
k = rep(mean(k, na.rm = TRUE), 3),
l = c(1,3,5),
m = rep(mean(l, na.rm = TRUE), 3)
)
)
new_df
# Survival curves with new data
library(survminer)
fit <- survfit(fit.coxph2, newdata = new_df)
ggsurvplot(fit, conf.int = F,
censor = FALSE, surv.median.line = "hv",data=x1[x1$c== 2,],legend.title="",
legend.labs=c("Com_sen_1","Com_sen_3","Com_sen_5"),linetype=c(1,2,3))
#################################################################################
# 2. 3년차이하 ******
# 변수는 univariate로 survival에 p<0.2 인 유의한 변수를 택함.
# 변수 g는 비례위험가정을 만족하나, 모형이 적합하지 않음.
# fit.coxph <- coxph(Surv(r, o) ~ a, data = x2, ties='breslow') ... fit.coxph <- coxph(Surv(r, o) ~ i, data = x2, ties='breslow')
# summary(fit.coxph) ; cox.zph(fit.coxph)
# 비례위험 체크 (g변수)
fit.coxph <- coxph(Surv(r, o) ~ g, data = x2) ;summary(fit.coxph) ; cox.zph(fit.coxph)
plot(cox.zph(fit.coxph))
abline(h=0, col="red", lty=2)
abline(h=-0.5819,col="blue")
fit.coxph0 <- coxph(Surv(r, o) ~ g+k+l+m, data=x2)
summary(fit.coxph0)
# 3. 3년차초과 6년차 이하
# 변수는 univariate로 survival에 p<0.2 인 유의한 변수를 택함.
# cox모형 그냥 다 만족하므로, 그냥 쓰면 됨.
# fit.coxph <- coxph(Surv(r, o) ~ a, data = x3, ties='breslow') ... fit.coxph <- coxph(Surv(r, o) ~ i, data = x3, ties='breslow')
# summary(fit.coxph) ; cox.zph(fit.coxph)
# 비례위험 체크 (b,g변수)
fit.coxph <- coxph(Surv(r, o) ~ b, data = x3) ;summary(fit.coxph) ; cox.zph(fit.coxph)
fit.coxph <- coxph(Surv(r, o) ~ g, data = x3) ;summary(fit.coxph) ; cox.zph(fit.coxph)
# 모형도 적합, k,l,m 유의한거 확인.
fit.coxph0 <- coxph(Surv(r, o) ~ b+g+k+l+m, data=x3,ties='breslow')
summary(fit.coxph0)
# 4. 6년차 초과 ******
# 변수는 univariate로 survival에 p<0.2 인 유의한 변수를 택함.
# fit.coxph <- coxph(Surv(r, o) ~ a, data = x4, ties='breslow') ... fit.coxph <- coxph(Surv(r, o) ~ i, data = x4, ties='breslow')
# summary(fit.coxph) ; cox.zph(fit.coxph)
coxph(Surv(r, o) ~ i, data = x4, ties='breslow')
fit.coxph <- coxph(Surv(r, o) ~ g, data = x4) ;summary(fit.coxph) ; cox.zph(fit.coxph)
fit.coxph0 <- coxph(Surv(r, o) ~ g+k+l+m, data=x4)
summary(fit.coxph0)
|
4df8bf8078f31305061c9399060f2e4952e8aa6d | e00befe0f92d42dd1f97e9304973f4b22da03af5 | /BCS_tQQplots/BCS_tQQplots.R | 96f7a0ee297ab0e47f84b23a110606b7b1b13dbf | [] | no_license | QuantLet/BCS | a706ffdc3cf8777b5443b2c66ff601c3bc517ee0 | 4a5d9fc2c058e5e02534ccb37898d9e9cf2edd9e | refs/heads/master | 2023-04-03T23:59:31.647499 | 2023-03-27T22:14:39 | 2023-03-27T22:14:39 | 51,316,067 | 4 | 10 | null | null | null | null | UTF-8 | R | false | false | 393 | r | BCS_tQQplots.R | par(mfrow = c(1, 2))
# create random sample which is t-distributed
x = rt(100, df = 3)
# quantiles of t with 3 degrees of freedom
qqnorm(x, col = "red", pch = 20, asp = 1)
# line of perfect fit
qqline(x)
# create random sample which is t-distributed
y = rt(100, df = 100)
# quantiles of t with 100 degrees of freedom
qqnorm(y, col = "red", pch = 20, asp = 1)
# line of perfect fit
qqline(y)
|
2232f69000b13e9febafd37415ff808d3a129090 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962288-test.R | 9a49fa2b2f650c90efd4ff2f990e0393ab2f277f | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 502 | r | 1609962288-test.R | testlist <- list(x = c(673872499L, 1952735075L, 1869443186L, 1702064991L, 1651471726L, 1680418915L, 1869509492L, 543780468L, 704632055L, -24673L, -1616928865L, -1612720385L, -1L, -218103809L, -702277039L, 1358954496L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
20642f84586f077949c6e3887b6ac00c43532081 | 706a46c04c3b0f90a5c89137d5f6f353a627f5f5 | /man/xlex.Rd | a7cb70a1f8dd74de1d8cc4f76b9ce42a4cfcd4eb | [] | no_license | cran/modgetxl | 5d227bab3a5507539a367278f6d9864decd59d20 | a54bf7ece3b29a956ad93af905951aadcc7a2680 | refs/heads/master | 2022-11-18T17:31:49.747642 | 2020-07-09T03:50:03 | 2020-07-09T03:50:03 | 276,696,728 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 763 | rd | xlex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xlex.R
\name{xlex}
\alias{xlex}
\title{getxl Examples}
\description{
use shiny module getxl/UI for interactive input of Excel files
}
\examples{
library(shiny)
library(modgetxl)
app<- shinyApp(
ui= uiOutput('xltest'),
server= function(input, output) {
sink(file=stderr())
options(shiny.maxRequestSize=1*1024^2) # 1MB
output$xltest<- renderUI({
getxlUI('server')
})
xl<- callModule(getxl, 'server')
# excel sheets info and data are available once user uploads excel file in UI
# returned xl has reactiveValues of two variables as below
observeEvent(xl$sheets,{
print(xl$sheets)
print(head(xl$sheetdata[[1]]))
})
}
)
\dontrun{
runApp(app)
}
}
|
6b23e1dcd96c914a9b455f62620bd909dc7c4776 | 41ccd221d0e284df19df4e7d79b320b46a85534d | /plot1.R | a7ff8d43ccb9fb024cc09ffabc94938f3d13ee42 | [] | no_license | kottawartushar/EDA_Week1_Project_Assignment | 818c5e1d91a3ad833214237da91ef7ac9109d19c | f39457e0410cdc43948d3742baf89700eaf687ad | refs/heads/master | 2022-11-13T04:32:34.945065 | 2020-06-27T14:38:47 | 2020-06-27T14:38:47 | 275,377,607 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | plot1.R | read_file <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", stringsAsFactors = FALSE, na.strings = "?")
##head(read_file)
subsetDate_data <- subset(read_file, Date %in% c("1/2/2007", "2/2/2007"))
subsetDate_data$Date <- as.Date(subsetDate_data$Date, format = "%d/%m/%Y")
hist()
|
9ba6a49b7a7262038e7d77313f77e855eaf30b8d | a6905d66cb5948f3487068a758a828c9b4fb3588 | /allCountries/analysis_visualization/helpers.R | 75d3079307c9b5102d2bf3df1af3b127ca2a4b4d | [] | no_license | rahulAgrBej/seafoodGDELT | 2d82fe7a50bf62e2f12975b43c9135732dab006c | 02dd08efa9f9b3d4547072234bb740491fb191e0 | refs/heads/master | 2023-07-15T01:00:23.057648 | 2021-08-20T11:32:31 | 2021-08-20T11:32:31 | 266,158,045 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,826 | r | helpers.R | library(tidyverse)
library(ggplot2)
library(dplyr)
shock.id <- function(dat, thresh=0.25){
# dat is your time series data and threshold is the threshold you want for Cook's D (defaulted to 0.35)
outt <- array(dim=c(length(dat), 3))
x <- 1:length(dat)
ll <- lowess(x, dat, f=(2/3)) # Fits lowess curve (can specify other options for how the curve is estimated and can change the span)
rr <- as.numeric(dat[order(x)]-ll$y) #residuals off lowess
rrp1 <- rr[2:length(rr)] # Residuals at time t
rrm1 <- rr[1:(length(rr)-1)] # Residuals at time t-1
ll2 <- lm(rrp1~rrm1) # Linear fit of the residuals
cd <- cooks.distance(ll2) # Calculate the Cook's D
outt[2:length(rr),1] <- as.numeric(cd) # Output the Cook's D
outt[,2] <- rr # Output the residuals
outt[2:length(rr),3] <- ifelse(as.numeric(cd) >= thresh,1,0) # Logical of whether point is a shock
outt <- as.data.frame(outt)
colnames(outt) <- c("cooks.d", "residual", "shock.event")
return(outt)
}
# Clean trade data to a specific country's exports
records.getCountryExports <- function(fp, country) {
countryTrades <- read_csv(fp)
countryTrades <- countryTrades %>%
filter(
str_detect(CTY_NAME, country)
)
countryTrades$MONTH <- as.numeric(countryTrades$MONTH)
countryTrades$YEAR <- as.numeric(countryTrades$YEAR)
countryTrades$ALL_VAL_MO <- as.numeric(countryTrades$ALL_VAL_MO)
countryTrades <- subset(countryTrades, select=-c(CTY_CODE, SUMMARY_LVL, COMM_LVL, E_COMMODITY))
return(countryTrades)
}
# Clean trade data to a specific country's exports
records.getCountryImports <- function(fp, country) {
countryTrades <- read_csv(fp)
countryTrades <- countryTrades %>%
filter(
str_detect(CTY_NAME, country)
)
countryTrades$MONTH <- as.numeric(countryTrades$MONTH)
countryTrades$YEAR <- as.numeric(countryTrades$YEAR)
countryTrades$GEN_VAL_MO <- as.numeric(countryTrades$GEN_VAL_MO)
countryTrades <- subset(countryTrades, select=-c(CTY_CODE, SUMMARY_LVL, COMM_LVL, I_COMMODITY))
return(countryTrades)
}
# Get all export records for a specific country
records.getFullCountryExports <- function(country) {
fp17 <- 'data/trades/countries/exportCountries2017.csv'
fp18 <- 'data/trades/countries/exportCountries2018.csv'
fp19 <- 'data/trades/countries/exportCountries2019.csv'
fp20 <- 'data/trades/countries/exportCountries2020.csv'
countryExport17 <- records.getCountryExports(fp17, country)
countryExport18 <- records.getCountryExports(fp18, country)
countryExport19 <- records.getCountryExports(fp19, country)
countryExport20 <- records.getCountryExports(fp20, country)
countryExportTotal <- rbind(countryExport17, countryExport18, countryExport19, countryExport20)
colnames(countryExportTotal) <- c('CTY_NAME', 'COUNTS', 'MONTH', 'YEAR')
return(countryExportTotal)
}
# Get all export records for a specific country
records.getFullCountryImports <- function(country) {
fp17 <- 'data/trades/countries/importCountries2017.csv'
fp18 <- 'data/trades/countries/importCountries2018.csv'
fp19 <- 'data/trades/countries/importCountries2019.csv'
fp20 <- 'data/trades/countries/importCountries2020.csv'
countryImport17 <- records.getCountryImports(fp17, country)
countryImport18 <- records.getCountryImports(fp18, country)
countryImport19 <- records.getCountryImports(fp19, country)
countryImport20 <- records.getCountryImports(fp20, country)
countryImportTotal <- rbind(countryImport17, countryImport18, countryImport19, countryImport20)
colnames(countryImportTotal) <- c('CTY_NAME', 'COUNTS', 'MONTH', 'YEAR')
return(countryImportTotal)
}
# Get all news article records for a specific country fo a specific year
records.getCountryNewsCounts <- function(fp, country, year) {
countryArticles <- read_csv(fp)
countryArticles <- countryArticles %>%
filter(
(str_detect(country1, 'US') & str_detect(country2, country)) |
(str_detect(country1, country) & str_detect(country2, 'US'))
) %>% group_by(month) %>% tally()
monthCounts <- data.frame()
for (row in 1:12) {
monthCounts <- rbind(monthCounts, c(row, 0, year))
}
colnames(monthCounts) <- c('MONTH', 'COUNTS', 'YEAR')
for (newRow in 1:nrow(countryArticles)) {
idx <- as.integer(countryArticles[newRow, 'month'])
freq <- as.integer(countryArticles[newRow, 'n'])
monthCounts[idx, 2] <- freq
}
return(monthCounts)
}
# Gets news article records for 2017-2020 for a specific country
records.getFullCountryNewsCounts <- function(country) {
fp17 <- 'data/summary_table_2017.csv'
fp18 <- 'data/summary_table_2018.csv'
fp19 <- 'data/summary_table_2019.csv'
fp20 <- 'data/summary_table_2020.csv'
countryNewsCounts17 <- records.getCountryNewsCounts(fp17, country, 2017)
countryNewsCounts18 <- records.getCountryNewsCounts(fp18, country, 2018)
countryNewsCounts19 <- records.getCountryNewsCounts(fp19, country, 2019)
countryNewsCounts20 <- records.getCountryNewsCounts(fp20, country, 2020)
countryNewsCounts <- rbind(countryNewsCounts17, countryNewsCounts18, countryNewsCounts19, countryNewsCounts20)
return(countryNewsCounts)
}
completeData <- function(data, kind) {
tradeRows <- nrow(data)
tradeKindCol <- rep(c(kind), times=tradeRows)
tradeKindDF <- data.frame(tradeKindCol)
colnames(tradeKindDF) <- c('KIND')
data <- cbind(data, tradeKindDF)
shocks <- shock.id(data$COUNT)
shocks <- data.frame(shocks$shock.event)
colnames(shocks) <- c('SHOCK')
data <- cbind(data, shocks)
data$MONTH <- seq(1,48)
data <- subset(data, select=c('MONTH', 'COUNTS', 'KIND', 'SHOCK'))
return(data)
}
tradeNewsPlots <- function(countryName, countryCode) {
# Example with data
dataExport <- records.getFullCountryExports(countryName)
dataExport <- completeData(dataExport, 'EXPORTS')
# Example with data
dataImport <- records.getFullCountryImports(countryName)
dataImport <- completeData(dataImport, 'IMPORTS')
dataNews <- records.getFullCountryNewsCounts(countryCode)
dataNews <- completeData(dataNews, 'NEWS')
dataComplete <- rbind(dataNews, dataImport, dataExport)
exportShocks = which(subset(dataComplete, KIND=='EXPORTS')$SHOCK == 1)
importShocks = which(subset(dataComplete, KIND=='IMPORTS')$SHOCK == 1)
newsShocks = which(subset(dataComplete, KIND=='NEWS')$SHOCK == 1)
p <- ggplot() +
ggtitle(countryName) +
facet_grid(rows=vars(KIND), scales='free_y') +
scale_x_continuous(breaks=seq(1,48,by=1)) +
geom_line(data=subset(dataComplete, KIND='NEWS'), aes(MONTH, COUNTS), color='red') +
geom_line(data=subset(dataComplete, KIND='EXPORTS'), aes(MONTH, COUNTS), color='blue') +
geom_line(data=subset(dataComplete, KIND='IMPORTS'), aes(MONTH, COUNTS), color='black') +
geom_vline(xintercept=newsShocks, color='blue') +
geom_vline(xintercept=exportShocks, color='green') +
geom_vline(xintercept=importShocks, color='red', linetype='dotted')
return(p)
}
shocksImports <- function(countryName) {
imports <- records.getFullCountryImports(countryName)
shocks <- shock.id(imports$COUNTS)
monthIdx <- seq(1,48)
final <- cbind(monthIdx, imports$COUNTS, shocks$shock.event)
colnames(final) <- c('MONTH', 'COUNT', 'SHOCK')
final <- data.frame(final)
return(final)
}
shocksExports <- function(countryName) {
exports <- records.getFullCountryExports(countryName)
shocks <- shock.id(exports$COUNTS)
monthIdx <- seq(1,48)
final <- cbind(monthIdx, exports$COUNTS, shocks$shock.event)
colnames(final) <- c('MONTH', 'COUNT', 'SHOCK')
final <- data.frame(final)
return(final)
}
shockPlots <- function(data, title) {
p <- ggplot() +
ggtitle(title) +
geom_line(data=data, aes(MONTH, COUNT)) +
geom_vline(xintercept=which(data$SHOCK == 1), color='red')
return(p)
}
|
ff02637a211e84b00de8376b6892607ee768a966 | 25801cec4425eb9cfa37136451af3c6a73ff843b | /src/DAPC.R | 9b6d8d98b0367f87e1a8c332e3816fc437d77dfe | [] | no_license | rameshbalan/Tribolium_GWAS | 43e8aefcf9c43a9deb55ca80fc73dc95e4a92d8a | b84943f6ae6d1a1600f18fd118183cc24f148fd6 | refs/heads/master | 2020-04-29T12:52:43.049910 | 2019-03-19T01:19:27 | 2019-03-19T01:19:27 | 176,152,644 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,036 | r | DAPC.R | # All the necessary packages
library(adegenet)
library(vcfR)
library(poppr)
library(ape)
library(RColorBrewer)
library(igraph)
library(ggplot2)
# Input file
Ordered_All_Pop_file <- read.vcfR("Ordered_All_Samples.vcf")
# "pop.txt" file has two columns separated by tab. First column is the name of the sample and the second column is the population to which it belongs.
pop.data <- read.table("pop.txt", sep = "\t", header = TRUE)
# Checking if all the samples in the vcf file are presenting the "pop.txt" file
all(colnames(Ordered_All_Pop_file@gt)[-1] == pop.data$sample)
# Coverting into a genlight object which is just a formot that other functions recognize.
All_Pop_gl <- vcfR2genlight(Ordered_All_Pop_file)
# Adding Ploidy
ploidy(All_Pop_gl) <- 2
# Adding Population names
pop(All_Pop_gl) <- pop.data$pop
# Checking if everything looks alright
print(All_Pop_gl)
# #UPGMA Based Distance Tree
# tree <- aboot(All_Pop_gl, tree = "upgma", distance = bitwise.dist, sample = 100, showtree = F, cutoff = 50, quiet = T)
cols <- brewer.pal(n = nPop(All_Pop_gl), name = "Dark2")
# plot.phylo(tree, cex = 0.8, font = 2, adj = 0, tip.color = cols[pop(All_Pop_gl)])
# nodelabels(tree$node.label, adj = c(1.3, -0.5), frame = "n", cex = 0.8,font = 3, xpd = TRUE)
# legend('topleft', legend = c("01","02","11","12","13","18","20","24"), fill = cols, border = FALSE, bty = "n", cex = 0.6)
# axis(side = 1)
# title(xlab = "Genetic distance (proportion of loci that are different)")
#
# #Another useful independent analysis to visualize population structure is a minimum spanning network (MSN). MSN clusters multilocus genotypes (MLG) by genetic distances between them
# #MSN Plot
# Pop_dist <- bitwise.dist(All_Pop_gl)
# Pop_msn <- poppr.msn(All_Pop_gl, Pop_dist, showplot = FALSE, include.ties = T)
# node.size <- rep(2, times = nInd(All_Pop_gl))
# names(node.size) <- indNames(All_Pop_gl)
# set.seed(9)
# plot_poppr_msn(All_Pop_gl, Pop_msn , palette = brewer.pal(n = nPop(All_Pop_gl), name = "Dark2"), gadj = 70)
#PCA Analysis
Pop_pca <- glPca(x = All_Pop_gl, nf = 7)
barplot(100*Pop_pca$eig/sum(Pop_pca$eig), col = heat.colors(50), main="PCA Eigenvalues")
title(ylab="Percent of variance\nexplained", line = 2)
title(xlab="Eigenvalues", line = 1)
# PCA Plot
pop.pca.scores <- as.data.frame(Pop_pca$scores)
pop.pca.scores$pop <- pop(All_Pop_gl)
set.seed(9)
p <- ggplot(pop.pca.scores, aes(x=PC1, y=PC2, colour=pop))
p <- p + geom_point(size=2)
p <- p + stat_ellipse(level = 0.95, size = 1)
p <- p + scale_color_manual(values = cols)
p <- p + geom_hline(yintercept = 0)
p <- p + geom_vline(xintercept = 0)
p <- p + theme_bw()
p
# DAPC Analysis
pop_dapc <- dapc(All_Pop_gl, n.pca = 4, n.da = 3)
pop.dapc <- dapc(All_Pop_gl, var.contrib = TRUE, scale = FALSE, n.pca = 4, n.da = nPop(All_Pop_gl) - 1)
# A scatter plot
scatter(pop_dapc, col = cols, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE,
posi.pca = "topleft", cleg = 0.75)
# Structure Plot
compoplot(pop_dapc,col = cols, posi = 'top')
|
b9904df737be80b81aebdd2413630d87bf2ae019 | 6ed0753528e9d9cc12ff9195b2e6c65409daec3e | /Scripts/quiz3.R | 7c8eaf94a01dd575a9d377475c4d48b0d205622e | [] | no_license | p234a137/JohnHopkinsR | fbf581cd1a83a0519429da6b42d0200d0fb9f0c6 | 6da57f7c3433175303091964dea5fa9a708a21d4 | refs/heads/master | 2021-01-23T21:38:29.644906 | 2014-12-23T04:28:03 | 2014-12-23T04:28:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 297 | r | quiz3.R | library(datasets)
data(iris)
?iris
# mean(iris$Sepal.Length)
apply(iris[, 1:4], 2, mean)
with(iris, tapply(Sepal.Length, Species, mean))
library(datasets)
data(mtcars)
with(mtcars, tapply(mpg, cyl, mean))
# or equivalent
tapply(mtcars$mpg, mtcars$cyl, mean)
with(mtcars, tapply(hp, cyl, mean))
|
8888f77d41fb1e2001b05f2a96e62a7389d9092c | ade25271ba3b3e47edf7cf9d9b13f30b753b1350 | /Parcial03/Notas de clase/1. Anualidades tiempo continuo-R/Ejemplo.simula.geo.lineal.r | aa41219f664f62b95d131bd46c14542936c31d21 | [] | no_license | Universidad-Nacional-Academica/Actuaria | 93659d85b2ea79015616d6eebb4cf531c1d17092 | ca01a86c9fe6ecd8fb160a3975c31534b511ffd2 | refs/heads/main | 2023-07-17T01:33:32.476569 | 2021-08-24T19:37:58 | 2021-08-24T19:37:58 | 358,053,044 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,548 | r | Ejemplo.simula.geo.lineal.r |
#--- parametros Gompertz-Makeham
M = matrix(0,4,3)
# mujeres-80-89:
a = 4.450e-03
b = 2.814e-05
C = 1.101e+00
M[1,]=c(a,b,C)
# hombres-80-89:
a = 4.918e-03
b = 1.559e-05
C = 1.107e+00
M[2,]=c(a,b,C)
# mujeres-00-08:
a = 4.444e-03
b = 5.017e-07
C = 1.143e+00
M[3,]=c(a,b,C)
# hombres-00-08:
a = 7.121e-03
b = 2.504e-06
C = 1.126e+00
M[4,]=c(a,b,C)
colnames(M)=c("a","b","c")
rownames(M)=c("m80-89","h80-89","m00-08","h00-08")
(M)
#-----------
require(eha)
#--- parametros en eha
x = 50
a2 = M[4,1]
sigma = 1/log(M[4,3])
a1 = M[4,2]*exp(x/sigma)
#------------tasas y beneficio
i = 0.06
v = (1+i)^(-1)
iq = 0.025
pago = 12
bt = function(t){
pago*(1+iq)^(floor(t))}
fn <- function(t){bt(t)*v^t}
#---------- simular
N = 1000
Bt = double(N)
require(rmutil)
for(j in 1:N){
Tx = rmakeham(1, shape = c(a1, a2), scale = sigma)
Bt[j]=int(fn, 0, Tx)
}
hist(Bt,100)
points(mean(Bt),0,pch=20,col='red')
#-------------------aritmetica
ht = function(t){
(1+floor(t*q))/q}
hn <- function(t){ht(t)*v^t}
Ht = double(N)
require(rmutil)
for(j in 1:N){
Tx = rmakeham(1, shape = c(a1, a2), scale = sigma)
Ht[j]=int(hn, 0, Tx)
}
#-------------------anualidad
dn <- function(t){v^t}
Dt = double(N)
require(rmutil)
for(j in 1:N){
Tx = rmakeham(1, shape = c(a1, a2), scale = sigma)
Dt[j]=int(dn, 0, Tx)
}
#--------------lineal
rho = 0.05
Lt = (pago - rho)*Dt+rho*q*Ht
hist(Lt,100)
points(mean(Lt),0,pch=20,col='red')
hist(Bt,100,xlim=c(0,400),col='skyblue',border=F)
hist(Lt,100,add=T,col=scales::alpha('red',.5),border=F)
|
4a1e5abfa74444ae7723e91665edf91f94fd0fd9 | 6c4191c8ddb41cd51333fbee225254bf244befc5 | /Okun-s Law/okun.r | cf283156d93eedac51e61b132514004d1b0da9dd | [] | no_license | py0717/Data-Analysis | a6d8b9b3645596ce606cb348ad2a8f4396caf10a | 713b614497293ba01242f6cda1419c823063196a | refs/heads/master | 2021-09-14T12:34:24.820685 | 2018-05-14T00:24:37 | 2018-05-14T00:24:37 | 113,804,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,746 | r | okun.r | ######################
# Testing Okun's Law #
######################
# Okun's Law (Differences Version) states that a 1 point increase in unemployment rate is associated with a 3 point decrease in
# real GDP (some texts suggest a 2 point decrease). We will test Okun's Law using annual data from Korea and Brazil. The data
# was obtained from the Federal Reserve Bank of St Louis (https://fred.stlouisfed.org/)
#########
# Korea #
#########
# setup
getwd()
korea.ue <- read.csv("Korea UE Change.csv")
korea.gdp <- read.csv("Korea Real GDP Change.csv")
# data clean
korea <- merge(korea.ue, korea.gdp)
colnames(korea) <- c("DATE", "UE.Change", "GDP.Change")
head(korea)
# exploratory
attach(korea)
plot(korea)
summary(korea)
boxplot(UE.Change, main="Change in Korea's Annual Unemployment Rate")
boxplot(GDP.Change, main="% Change in Korea's Annual Real GDP")
plot(UE.Change, GDP.Change, xlab="Change in Unemployment Rate", ylab="% Change in Real GDP", main="Korea (Annual)")
abline(h=0, v=0, col="black", lty=2)
# linear regression
# delta(GDP) = alpha + beta*delta(UE)
korea.model <- lm(GDP.Change ~ UE.Change)
summary(korea.model) # beta-hat is significant
abline(korea.model, col="red")
detach(korea)
# diagnostic plots
plot(korea.model)
# confidence interval test with 0.05 alpha -> null hypothesis: beta = -3; alternative hypothesis: beta != -3
confint(korea.model, "UE.Change", level=0.95) # fail to reject since -3 is in the CI range
##########
# Brazil #
##########
# setup
brazil.ue <- read.csv("Brazil UE Change.csv")
brazil.gdp <- read.csv("Brazil Real GDP Change.csv")
# data clean
brazil <- merge(brazil.ue, brazil.gdp)
colnames(brazil) <- c("DATE", "UE.Change", "GDP.Change")
head(brazil)
#exploratory
attach(brazil)
plot(brazil)
summary(brazil)
boxplot(UE.Change, main="Change in Brazil's Annual Unemployment Rate")
boxplot(GDP.Change, main="% Change in Brazil's Annual Real GDP")
plot(UE.Change, GDP.Change, xlab="Change in Unemployment Rate", ylab="% Change in Real GDP", main="Brazil (Annual)")
abline(h=0, v=0, col="black", lty=2)
# linear regression
# delta(GDP) = alpha + beta*delta(UE)
brazil.model <-lm(GDP.Change ~ UE.Change)
summary(brazil.model) # beta-hat is significant
abline(brazil.model, col="blue")
detach(brazil)
# diagnostic plots
plot(brazil.model)
# confidence interval test with 0.05 alpha -> null hypothesis: beta = -3; alternative hypothesis: beta != -3
confint(brazil.model, "UE.Change", level=0.95) # reject null since -3 is not in CI range -> beta does not equal -3
# okun's law does not hold in brazil
|
1fdc4111993a42a47ae05e312e13f0f86055140a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fluspect/examples/define.bands.Rd.R | b3123181438cbfe5680eab2b563f6f8368bc16b9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 142 | r | define.bands.Rd.R | library(fluspect)
### Name: define.bands
### Title: define.bands
### Aliases: define.bands
### ** Examples
spectral <- define.bands()
|
160b944f4da216d8ab781122dd5bab9533579cba | 5e6e88595a6547150ca64fa5a191c2ca7447138a | /tests/testthat/test_discrete_variance.R | 0b1088c55d0efaa06d2e8d5494b43b31b05ae8aa | [] | no_license | cran/BLModel | 2eff602cdc2c284967484239a9929252b1de0881 | 3b6b8747a4b8218df28e69a794a90be914d25c1f | refs/heads/master | 2020-12-31T00:18:03.669269 | 2017-03-29T05:38:36 | 2017-03-29T05:38:36 | 86,547,453 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 111 | r | test_discrete_variance.R | test_that("Input",{
expect_error(.discrete_variance())
expect_error(.discrete_variance(returns_coef))
}) |
8e114b68803ba9e3940888b36dc3b24c48ca9f32 | 074434c8c49b7deaf165a8345dab83e12e17f30d | /cachematrix.R | 7c6423f440ee354bf8a97e8104d9714d2850e2bc | [] | no_license | kmyoshida/ProgrammingAssignment2 | 17c2a8fa712e16d224fb9f6ba9ad08cb782ba98f | 2cf102186745453df1b11106c6067711c9c7872e | refs/heads/master | 2021-01-18T12:16:53.060031 | 2015-03-19T16:20:49 | 2015-03-19T16:20:49 | 32,326,222 | 0 | 0 | null | 2015-03-16T13:21:20 | 2015-03-16T13:21:20 | null | UTF-8 | R | false | false | 1,728 | r | cachematrix.R | ## makeCacheMatrix, cacheSolve
## Pair of functions to handle special "matrix" object that can cache its
## inverse
makeCacheMatrix <- function(m.x = matrix()) {
# Creates a special "matrix" object that can cache its inverse.
#
# Args:
# x: a matrix whose inverse is to be computed and cached.
#
# Returns:
# a list object implementing the CacheMatrix
print("makeCacheMatrix")
m.inv <- NULL
set <- function(y) {
m.x <<- y
m.inv <<- NULL
}
get <- function() m.x
setinv <- function(inv) m.inv <<- inv
getinv <- function() {
m.inv
}
print(objects())
list(set = set,
get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
# Computes the inverse of the special "matrix" returned by makeCacheMatrix
# above. If the inverse has already been calculated (and the matrix has not
# changed), then the cachesolve should retrieve the inverse from the cache
#
# Args:
# x: a CacheMatrix object.
#
# Returns:
# the inverse matrix of x
y <- x$getinv()
if(!is.null(y)) {
# inverse of matrix is already compuited and cached
# return it
message("getting cached data")
return(y)
}
# get the matrix to be inversed
data <- x$get()
# compute the inverse
y <- solve(data, ...)
# cache it
x$setinv(y)
y
}
##
## Test data:
## [,1] [,2]
## [1,] 2 -1
## [2,] -1 1
##
## Inverse:
## [,1] [,2]
## [1,] 1 1
## [2,] 1 2
##
m <- matrix(c(2,-1,-1,1), c(2,2))
print(m)
print(solve(m))
cm <- makeCacheMatrix(m)
|
a3723b0e74c115deec15ef04bf9d34d9eb2300e9 | 9a19c5f4f1cad1e506d6b76066584e4923c0a825 | /R/cleaningscript.R | 6a31358b3135cc5586d1d6aeed92f427b90520a3 | [] | no_license | despresj/stt863 | 62b52a9d6e277fea50e37c5f1683339573a31369 | bc6ef8377054c9fc186093daf0c987e91e6cab06 | refs/heads/main | 2023-03-03T14:41:55.317472 | 2021-01-25T21:11:07 | 2021-01-25T21:11:07 | 309,518,062 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,821 | r | cleaningscript.R | library(tidyverse)
# Read and Clean ----------------------------------------------------------
files <- list.files(path = here::here("raw_data"), pattern = ".csv")
cleaning <- function(df){
df <- pivot_longer(df, cols = -country, names_to = "year")
}
data <- files %>%
map(function(x) read_csv(paste0("raw_data/", x))) %>%
setNames(gsub("\\.csv$", "", files)) %>%
map(cleaning) %>%
bind_rows(.id = "id") %>%
pivot_wider(names_from = id)
# Filtering ---------------------------------------------------------------
countries <- readRDS(here::here("data", "countries.RDS"))
data <- data %>%
filter(year > 2000, year < 2019, country %in% countries) %>%
group_by(country) %>%
mutate_at(vars(-country),list(~ifelse(is.na(.), median(., na.rm = TRUE), .))) %>%
# Iceland spent 0 so I needed to manually recode that
mutate(military_spending_pct_of_gdp = replace_na(military_spending_pct_of_gdp, 0)) %>%
mutate(murder_per_mil_people = replace(murder_per_mil_people, country == "Mexico", 29.07),
murder_per_mil_people = replace(murder_per_mil_people, country == "Chile", 4.4),
murder_per_mil_people = replace(murder_per_mil_people, country == "Colombia", 25.34)) %>%
# https://en.wikipedia.org/wiki/List_of_countries_by_intentional_homicide_rate
# these murder rates were not included in the gapminder but i didnt want to
# lose 3 important countries in LA
relocate(polrights_fh) %>% mutate(polrights_fh = (8 - polrights_fh),
military_spending_pct_of_gdp = military_spending_pct_of_gdp * 100)%>%
mutate(corruption_perception_index_cpi = (corruption_perception_index_cpi - 100) * -1)
# writing -----------------------------------------------------------------
write_csv(data, here::here("data", "df.csv"))
|
19041b6334b4b97bc8d650573f8bbcd595df56f0 | b0167cf780b2b1b3f150c123aed5d4dbef2e7ff2 | /src/R/panels/console.R | faf84eb573b01fa1adaf2f24bc081650e9fab48b | [] | no_license | Wu-Li/QuoiR | 3f7cc4f25ba6bdfeb8441350b1052c58f58542bc | d0d56a15e490a48017d4b2bdf2795e821bc34e2b | refs/heads/master | 2021-06-19T12:06:56.323331 | 2017-07-10T22:22:46 | 2017-07-10T22:22:46 | 26,001,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,855 | r | console.R | #Console Panel
output$console <- renderUI({
if(is.null(console$.out)) {return(div())}
results <- console$.out$results
types <- console$.out$types
hovers <- console$.out$hovers
widths <- console$.out$widths
div(
#withMathJax(
mapply(
function(result,type,hover,width) {
tag <- 'p'
if (type=='style') { tags$style(HTML(result)) }
else { tags$p(result,class=type,title=hover,style=width) }
},results,types,hovers,widths,SIMPLIFY=F),
tags$script('Q.panels.console.trigger("change");')
#)
)
})
#JS
observe({
if(is.null(input$js)){ return(NULL) }
console$.js <- input$js
toConsole(input$js,'out','javascript')
})
observe({
if(is.null(input$jsError)){ return(NULL) }
toConsole(input$jsError,'error','javascript error')
})
toConsole <- function(values,type,hover=type){
mw <- paste0('width:',8*as.integer(options('width')),'px;')
results <- isolate(console$.out$results)
types <- isolate(console$.out$types)
hovers <- isolate(console$.out$hovers)
widths <- isolate(console$.out$widths)
lapply(values, function(result) {
results <<- c(results,result)
types <<- c(types,type)
hovers <<- c(hovers,hover)
widths <<- c(widths,mw)
})
console$.out$results <- results
console$.out$types <- types
console$.out$hovers <- hovers
console$.out$widths <- widths
return(NULL)
}
#Evaluate
#evaluate <- function(x,...) UseMethod('evaluate')
evaluate <- function(entry) {
prompt$panel <- 'console'
if (entry == 'clear') { return(clear()) }
if (entry %in% prompt$commands) { entry <- paste0(entry,'()') }
maps <- isolate(lapply(names(active$views),function(n) paste0(n,'()')))
if (entry %in% maps) { entry <- paste0('run.map("',substr(entry, 1, nchar(entry)-2),'")') }
entry <- gsub("\\$(?=\\()", "select", entry, perl=T)
tryCatch(
{
values <- isolate(eval(parse(text=entry), console ))
if ('R' %in% class(values)) {
values <- as.list(values)
rapply(values,evaluate)
} else if('CSS' %in% class(values)) {
lapply(values,evaluate.CSS)
} else if('ggplot' %in% class(values)){
prompt$panel <- 'plot'
prompt$plot <- values
} else {
toConsole(capture.output(values),'out',try(paste0(class(eval(values)),collapse=' ')))
ans <<- values
}
},
warning = function(w){
w <- sub('simpleWarning in eval(expr, envir, enclos)','warning',w,fixed=T)
toConsole(w,'warning')
isolate(values <- suppressWarnings( eval(parse(text=entry), console ) ))
toConsole(capture.output(values),'out',.try(paste0(class(eval(values)),collapse=' ')))
ans <<- values
},
error = function(e) {
e <- sub(' in eval(expr, envir, enclos)','',e,fixed=T)
e <- sub(' in parse(text = entry)','',e,fixed=T)
toConsole(e,'error')
}
)
updateTabsetPanel(session, "panels", selected = prompt$panel)
}
evaluate.R <- function(entry) rapply(values,evaluate)
evaluate.help <- function(entry) {
prompt$help <- entry
}
evaluate.JS <- function(entry) {
updateJS(session,'js',entry)
}
evaluate.CSS <- function(entry) {
mapply(function(sel,val){
mapply(function(a,v){
if(length(names(v))==0){
toConsole(values=paste0(sel,' {',a,':',v,';}'),type='style')
} else {
child <- NULL
child[[paste0(sel,' ',a)]] <- val[[a]]
evaluate.CSS(child)
}
},names(val),val)
},names(entry),entry)
} |
3b3374fe0aa49eb4593bea592bed4f7d6c597d07 | 2ff934a8435c4a0bdfb108d2fcf1d36ede49ebfc | /s1-download-data.R | 8cb485d79fc317b044f43481b4c5ff7d4f6a288b | [] | no_license | andre-wojtowicz/uci-ml-to-r | 9de3a379a3b9e06a52b750c17e3c2583ed457d73 | f7debcd1545901815278f204a7cf9a8193b18285 | refs/heads/master | 2021-01-19T04:05:34.084404 | 2016-08-19T19:51:21 | 2016-08-19T19:51:21 | 63,508,847 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,395 | r | s1-download-data.R | # ---- download-data ----
source("init.R")
source("utils.R")
setup.logger(LOGGER.OUTPUT.S1.FILE, LOGGER.OVERWRITE.EXISTING.FILES)
flog.info("Step 1: download dataset collection")
for (dir.name in dir(DATASETS.DIR))
{
flog.info(paste("Dataset:", dir.name))
dest.dir = gsub(DATASET.NAME.PATTERN, dir.name, DATASET.ORIGINAL.DIR)
config.yaml.file = file.path(DATASETS.DIR, dir.name, DATASET.CONFIG.FILE)
urls.list = yaml.load_file(config.yaml.file)$urls
if (!dir.exists(dest.dir))
{
dir.create(dest.dir)
}
for (url in urls.list)
{
flog.info(paste("URL:", url))
dest.file = URLdecode(basename(url))
dest.file.path = file.path(dest.dir, dest.file)
if (!file.exists(dest.file.path) | OVERWRITE.OUTPUT.FILES)
{
tryCatch(
raw.content <-
getBinaryURL(url, .opts = curlOptions(ssl.verifypeer =
SSL.VERIFY.PEER)),
error = function(e){flog.error(e); stop(e)}
)
writeBin(raw.content, dest.file.path)
} else {
flog.warn(paste("Target file", basename(dest.file.path),
"already exists, skipping"))
}
}
flog.info(paste(rep("*", 25), collapse = ""))
}
|
9d8c6981adb955b2e4d5a173bc35381195f1d22b | f4cbd423d21ca7dcbcc8095d8676ff245a06b14b | /scripts_sql.R | e9a25bf262a44920f1cb3092883133c7605315e9 | [] | no_license | DaoHai/R-data-carpentry | e84f1fb71e2e9a8d157d9da72767e7d0e1da3002 | b715e1df4794036c3f7e13ea00d30a028ef9f284 | refs/heads/master | 2021-01-10T18:02:19.962181 | 2015-10-30T16:08:06 | 2015-10-30T16:08:06 | 45,193,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,738 | r | scripts_sql.R | install.packages("RSQLite")
library(RSQLite)
#database connection
mydb <- "dbavecR.sqlite"
conn <- dbConnect(drv = SQLite(), dbname= mydb)
#Scriptted Querry
dbGetQuery(conn, "SELECT type, tbl_name FROM sqlite_master")
dbListTables(conn)
dbListFields(conn, "surveys")
dbGetQuery(conn, "SELECT count(*) FROM surveys")
q <- 'SELECT DISTINCT year, species_id FROM surveys'
result <- dbGetQuery(conn, q)
head(result)
q <- 'SELECT COUNT(*), ROUND(SUM(WEIGHT)) FROM surveys'
result <- dbGetQuery(conn, q)
result
#concatenate query, access to a column on a table by dot, alias a table by a shortcut
q <- "SELECT d.plot_type , c.genus, count(*)
FROM
(SELECT a.genus, b.plot_id
FROM species a
JOIN surveys b
ON a.species_id = b.species_id) c
JOIN plots d
ON c.plot_id = d.plot_id
GROUP BY d.plot_type,c.genus"
result <- dbGetQuery(conn,q)
head(result)
#Advanced Querry with Scripts
yearRange <- dbGetQuery(conn,"SELECT min(year),max(year) FROM surveys")
years <- seq(yearRange[,1],yearRange[,2],by=2)
q <- paste("
SELECT a.year,b.taxa,count(*) as count
FROM surveys a
JOIN species b
ON a.species_id = b.species_id
AND b.taxa = 'Rodent'
AND a.year in (",
paste(years,collapse=",")
,")
GROUP BY a.year, b.taxa",
sep = "" )
rCount <- dbGetQuery(conn,q)
head(rCount)
#---------Create the database from R---------------------
species <- read.csv("species.csv")
surveys <- read.csv("surveys.csv")
plots <- read.csv("plots.csv")
#Open up a connection and name a database
myDB <- "portalR.db"
myConn <- dbConnect(drv = SQLite(), dbname= myDB)
#dbListTables(myConn)
#Add table to a database
dbWriteTable(myConn,"species",species)
dbListTables(myConn)
dbGetQuery(myConn,"SELECT * from species limit 10")
|
a2b46e038e15d9fc92063bb9c454980d2e54e0e1 | b12f39ce7b0740510ea2499cad444a772ede2169 | /hypothesis_test.R | 5473ee10721650b60402d020a2f8a2100a5f9bd2 | [] | no_license | edsonfajilagot/pupmas-dp | 5940cb5ff9a3e0194e5d349d06d517a78b51a324 | 4b8041bb5efd63ace8744290303a01641952b69a | refs/heads/master | 2022-11-11T06:18:30.266457 | 2020-07-04T08:13:11 | 2020-07-04T08:13:11 | 271,491,252 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 243 | r | hypothesis_test.R |
alpha <- 0.01
M <- 68000
Mbar <- 66900
Sdev <- 5500
n <- 30
z <- (Mbar - M)/(Sdev/sqrt(30))
z.alpha = qnorm(alpha)
pval = pnorm(z)
alpha = 0.05
tc = -2.2436 #calculated t-stat
t.alpha = qt(alpha, df=13)
pval = pt(tc, df=13) #p-value
|
e4fb9a3ed7dce2c3903a2c337283dc0d036fbe55 | 189e33f9fe11b60e361e9b196045f15c21147bf1 | /inst/Shiny/LINES_posterior_viewer/server.R | 3d71eca387e2f435337df3cf488f0424bdb00025 | [] | no_license | cran/EurosarcBayes | a47ba619e45d98cec11ff342d6b11297fa00fd87 | ebc493cfc482c10bbf490dbcc96c326d2da599b4 | refs/heads/master | 2021-01-10T13:17:31.911364 | 2017-11-15T17:09:23 | 2017-11-15T17:09:23 | 48,079,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,538 | r | server.R | library(shiny)
x=0:1000/1000
cex.main=1.5
cex.axis=1.5
# Define server logic required to draw a histogram
shinyServer(function(input, output, session){
############################################################################################
# reset app on close
session$onSessionEnded(function() {
stopApp()
})
prior.data=reactive({
r0=dbeta(x,input$rpriora,input$rpriorb)
t0=dbeta(x,input$tpriora,input$tpriorb)
return(list(r0=r0,t0=t0))
})
post.data=reactive({
rdata=dbeta(x,input$rdataa,input$rdatab)
tdata=dbeta(x,input$tdataa,input$tdatab)
rposteriora=input$rpriora+input$rdataa
rposteriorb=input$rpriorb+input$rdatab
tposteriora=input$tpriora+input$tdataa
tposteriorb=input$tpriorb+input$tdatab
r1=dbeta(x,rposteriora,rposteriorb)
t1=dbeta(x,tposteriora,tposteriorb)
return(list(rdata=rdata,tdata=tdata,r1=r1,t1=t1,rposteriora=rposteriora,rposteriorb=rposteriorb,tposteriora=tposteriora,tposteriorb=tposteriorb))
})
output$prior.graph <- renderPlot({
par(mfrow=c(1,2))
# set graph limits
prior.r=max(prior.data()$r0)
data.r=max(post.data()$rdata)
prior.t=max(prior.data()$t0)
data.t=max(post.data()$tdata)
maxr=max(3,ifelse(is.na(prior.r)==F & prior.r<10^6,prior.r,3),ifelse(is.na(data.r)==F & data.r<10^6,data.r,3))
maxt=max(3,ifelse(is.na(prior.t)==F & prior.t<10^6,prior.t,3),ifelse(is.na(data.t)==F & data.t<10^6,data.t,3))
ylimr=c(0,maxr+0.6)
ylimt=c(0,maxt+0.6)
#####################################################################################################
plot(x,prior.data()$r0,type="l",col=adjustcolor("blue",alpha.f=0.3),xaxs="i",yaxs="i",ylim=ylimr,
xlab="Probability of response", ylab="Density",cex.lab=cex.axis)
lines(x,post.data()$rdata,col="blue")
title(main="Response",cex.main=cex.main)
plot(x,prior.data()$t0,type="l",col=adjustcolor("red",alpha.f=0.3),xaxs="i",yaxs="i",ylim=ylimt,
xlab="Probability of toxicity", ylab="Density",cex.lab=cex.axis)
lines(x,post.data()$tdata,col="red")
title(main="Toxicity",cex.main=cex.main)
},width = "auto", height = "auto")
output$posterior.graph <- renderPlot({
par(mfrow=c(1,2))
plot(x,post.data()$r1,type="l",xaxs="i",yaxs="i",col=1,ylim=c(0,ceiling(max(post.data()$r1)+0.6)),
xlab="Probability of response", ylab="Density",cex.lab=cex.axis)
title(main="Response",cex.main=cex.main)
abline(v=c(input$rlower,input$rupper),h=0)
if(input$resp.endpoint=="Futility"){
polygon(c(0,x[1+0:(input$rupper*1000)],x[1+(input$rupper*1000)]),c(0,post.data()$r1[1+0:(input$rupper*1000)],0),col=adjustcolor("red",alpha.f=0.5),border="red")
text(input$rupper+0.2,max(post.data()$r1)+0.3,paste0("P(R<",input$rupper,") = ",round(pbeta(input$rupper,post.data()$rposteriora,post.data()$rposteriorb),3)),cex=2,col="red")
} else if(input$resp.endpoint=="Efficacy") {
polygon(c(x[1+(input$rlower*1000)],x[1+(input$rlower*1000):1000],1),c(0,post.data()$r1[1+(input$rlower*1000):1000],0),col=adjustcolor("blue",alpha.f=0.5),border="blue")
text(input$rupper+0.2,max(post.data()$r1)+0.3,paste0("P(R>",input$rlower,") = ",round(1-pbeta(input$rlower,post.data()$rposteriora,post.data()$rposteriorb),3)),cex=2,col="blue")
}
plot(x,post.data()$t1,type="l",xaxs="i",yaxs="i",col=1,ylim=c(0,ceiling(max(post.data()$t1)+0.6)),
xlab="Probability of toxicity", ylab="Density",cex.lab=cex.axis)
title(main="Toxicity",cex.main=cex.main)
abline(v=c(input$tlower,input$tupper),h=0)
if(input$tox.endpoint=="Toxicity"){
polygon(c(x[1+(input$tlower*1000)],x[1+(input$tlower*1000):1000],1),c(0,post.data()$t1[1+(input$tlower*1000):1000],0),col=adjustcolor("red",alpha.f=0.5),border="red")
text(input$tupper+0.2,max(post.data()$t1)+0.3,paste0("P(T>",input$tlower,") = ",round(1-pbeta(input$tlower,post.data()$tposteriora,post.data()$tposteriorb),3)),cex=2,col="red")
} else if(input$tox.endpoint=="No Toxicity") {
polygon(c(0,x[1+0:(input$tupper*1000)],x[1+(input$tupper*1000)]),c(0,post.data()$t1[1+0:(input$tupper*1000)],0),col=adjustcolor("blue",alpha.f=0.5),border="blue")
text(input$tupper+0.2,max(post.data()$t1)+0.3,paste0("P(T<",input$tupper,") = ",round(pbeta(input$tupper,post.data()$tposteriora,post.data()$tposteriorb),3)),cex=2,col="blue")
}
},width = "auto", height = "auto")
})
"\u2119"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.