blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec0d0a4ccdcbbfe6fcdec5e7f77ec506446ad117
|
cab6be4f5004f4c9106e77623dfc85aec4fbeeec
|
/ballerDepHeterogenScripts/Sunny_matching_script.R
|
6a18d533a4aec13fb8b970164ee1f6942cdd9de3
|
[] |
no_license
|
PennBBL/ballerDepHeterogenScripts
|
fab351c54bb5263e1aac4d133a8f92b66df4b8f4
|
90d112fd734e41ae93ec4bd3a591885c53915359
|
refs/heads/master
| 2021-06-04T11:55:28.632889
| 2020-02-16T03:09:25
| 2020-02-16T03:09:25
| 112,241,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,119
|
r
|
Sunny_matching_script.R
|
library(MASS)
library(Matching)
library(tidyr)
library(readr)
library(effsize)
library(dplyr)
###Loading Data
alldata<-read.csv("OlfactionFromStata_2017_06-19.csv") %>%
group_by(group) %>%
arrange(group,bblid)
View(alldata)
str(alldata)
colnames(alldata)
#Cleaning up variables
alldata$anyspectrum<-as.factor(alldata$anyspectrum)
###Calculating and Z-transforming CNB Categories
# Creating normalized summary variables
zacexe <- as.vector(
scale(
rowMeans(
data.frame(alldata$zaclnb, alldata$zacpcet, alldata$zacpcpt), na.rm = TRUE)
))
zacmem <- as.vector(
scale(
rowMeans(
data.frame(alldata$zaccpf, alldata$zaccpw, alldata$zacvolt), na.rm = TRUE)
))
zaccog <- as.vector(
scale(
rowMeans(
data.frame(alldata$zacpvrt, alldata$zacpmat, alldata$zacplot), na.rm = TRUE)
))
zacsoc <- as.vector(
scale(
rowMeans(
data.frame(alldata$zacadt, alldata$zacer, alldata$zacedf), na.rm = TRUE)
))
zacall <- as.vector(
scale(
rowMeans(
data.frame(zacexe, zacmem, zaccog, zacsoc), na.rm = TRUE)
))
#Merging with existing data
alldata$zacexe = zacexe
alldata$zacmem = zacmem
alldata$zaccog = zaccog
alldata$zacsoc = zacsoc
alldata$zacall = zacall
alldata<-data.frame(alldata,zacall)
alldata$zacall.1 <- NULL
#Checking
colnames(alldata)
summary(alldata$zacall)
sd(alldata$zacall, na.rm = TRUE)
###Selecting Important Variables for Analysis
dat <- select(alldata, bblid, group, sex, race, age, edu, avgedu, hand, smoker, anysmoke,
id, disc, lyral, cit,
zacexe, zacmem, zaccog, zacsoc, zacall, wratstd, gafc, comtgenotype,
positive, negative, sipstot, anyspectrum, psyscale)
View(dat)
table(dat$group)
###Matching 22q and ND
#Creating 0/1 vs. T/F variable for groups
dat<-mutate(dat, Tr=as.logical(dat$group=="22q"))
#Alternative method
new <- dat$group
new[as.matrix(new)="22q"] <- 1
new[as.matrix(new)="NC/LR"] <- 0
data.frame(dat, new)
#Match
matchstats<-Match(Tr=dat$Tr, X=as.matrix(cbind(dat$sex,dat$age)), replace=FALSE, ties=FALSE)
str(matchstats)
#Stiching together matched rows
matchq22<-dat[matchstats$index.treated,]
matchcontrol<-dat[matchstats$index.control,]
match<-rbind(matchq22, matchcontrol)
match<-group_by(match, group)
str(match)
View(match)
View(matchq22)
View(matchcontrol)
save(match, file="/Users/ericwpetersen/Desktop/Olfaction R Files/match.R")
save(matchq22, file="/Users/ericwpetersen/Desktop/Olfaction R Files/matchq22.R")
save(matchcontrol, file="/Users/ericwpetersen/Desktop/Olfaction R Files/matchcontrol.R")
###Matched Sample Description
#Demographics
summary(match[,2:5])
summary(matchq22[,2:5])
summary(matchcontrol[,2:5])
t.test(matchq22$age, y=matchcontrol$age)
###Unmatched Sample Description ...
q22<-filter(dat, group=="22q")
control<-filter(dat, group=="NC/LR")
save(dat, file="/Users/ericwpetersen/Desktop/Olfaction R Files/dat.R")
save(q22, file="/Users/ericwpetersen/Desktop/Olfaction R Files/q22.R")
save(control, file="/Users/ericwpetersen/Desktop/Olfaction R Files/control.R")
#Demographics
table(q22$psyscale)
table(dat$group, dat$smoker)
summary(q22[,2:5])
summary(control[,2:5])
table(dat$sex, dat$group)
fisher.test(table(dat$sex, dat$group))
table(dat$race, dat$group)
fisher.test(table(dat$race, dat$group))
t.test(x=q22$age, y=control$age)
fisher.test(table(dat$group, dat$smoker))
#Olfactory measures
hist(q22$id)
hist(q22$disc)
hist(q22$age)
hist(q22$cit)
hist(q22$lyral)
hist(control$id)
hist(control$disc)
hist(control$age)
hist(control$cit)
hist(control$lyral)
plot(q22$age, q22$id)
plot(q22$age, q22$disc)
plot(q22$age, q22$cit)
plot(q22$age, q22$lyral)
plot(control$age, control$id)
plot(control$age, control$disc)
plot(control$age, control$cit)
plot(control$age, control$lyral)
cor.test(q22$age, y=q22$id, method="spearman")
cor.test(q22$age, y=q22$disc, method="spearman")
cor.test(q22$age, y=q22$cit, method="spearman")
cor.test(q22$age, y=q22$lyral, method="spearman")
cor.test(control$age, y=control$id, method="spearman")
cor.test(control$age, y=control$disc, method="spearman")
cor.test(control$age, y=control$cit, method="spearman")
cor.test(control$age, y=control$lyral, method="spearman")
###Calculating residuals for olfactory measures and age
#Updating data frame with z-transformed age variables
zage<-(dat$age - mean(dat$age))/sd(dat$age)
hist(zage)
zagesquared <- zage^2
zagecubed <- zage^3
dat<-data.frame(dat, zage, zagesquared, zagecubed)
View(dat)
#Calculating residuals
idz<-scale(lm(id ~ zage + zagesquared + zagecubed, data=dat)$residuals)
plot(dat$id, idz)
discz<-scale(
residuals(
lm(disc ~ zage + zagesquared + zagecubed, data=dat, na.action=na.exclude)
))
plot(dat$disc, discz)
citz<-scale(
residuals(
lm(cit ~ zage + zagesquared + zagecubed, data=dat, na.action = na.exclude)
))
citz<- 0-citz
plot(dat$cit, citz)
lyralz<-scale(
residuals(
lm(lyral ~ zage + zagesquared + zagecubed, data=dat, na.action = na.exclude)
))
lyralz<- 0-lyralz
plot(dat$lyral, lyralz)
#Integration into dataframe
dat<-data.frame(dat,idz, discz)
dat$citz<-NULL
dat$lyralz<-NULL
dat<-data.frame(dat,citz, lyralz)
View(dat)
q22<-filter(dat, group=="22q")
control<-filter(dat, group=="NC/LR")
save(dat, file="/Users/ericwpetersen/Desktop/Olfaction R Files/dat.R")
save(q22, file="/Users/ericwpetersen/Desktop/Olfaction R Files/q22.R")
save(control, file="/Users/ericwpetersen/Desktop/Olfaction R Files/control.R")
### Comparing olfaction measures between groups
#Characterizing data
hist(q22$idz)
hist(control$idz)
hist(q22$discz)
hist(control$discz)
hist(q22$citz)
hist(control$citz)
hist(q22$lyralz)
hist(control$lyralz)
#Comparisons with ttests
t.test(x=q22$idz, y=control$idz)
cohen.d(q22$idz, f=control$idz, na.rm=TRUE)
boxplot(q22$idz, control$idz, names = c("22q", "Control"), main="ID")
t.test(x=q22$discz, y=control$discz)
cohen.d(q22$discz, f=control$discz, na.rm = TRUE)
boxplot(q22$discz, control$discz, names = c("22q", "Control"), main="DISC")
t.test(x=q22$citz, y=control$citz)
cohen.d(q22$citz, f=control$citz, na.rm = TRUE)
boxplot(q22$citz, control$citz, names = c("22q", "Control"), main="Citralva")
t.test(x=q22$lyralz, y=control$lyralz)
cohen.d(q22$lyralz, f=control$lyralz, na.rm = TRUE)
boxplot(q22$lyralz, control$lyralz, names = c("22q", "Control"), main="Lyral")
###Sex effect
t.test(q22$idz~q22$sex)
t.test(control$idz~control$sex)
t.test(q22$discz~q22$sex)
t.test(control$discz~control$sex)
t.test(q22$citz~q22$sex)
t.test(control$citz~control$sex)
t.test(q22$lyralz~q22$sex)
t.test(control$lyralz~control$sex)
###Relation to cognition
#ID vs. cognitive domains (Sig: cog in controls, mem in 22q)
summary(lm(idz ~ sex + zacall, data=q22, na.action=na.exclude))
summary(lm(idz ~ sex + zacall, data=control, na.action=na.exclude))
summary(lm(idz ~ sex + zacexe, data=q22, na.action=na.exclude))
summary(lm(idz ~ sex + zacexe, data=control, na.action=na.exclude))
summary(lm(idz ~ sex + zacmem, data=q22, na.action=na.exclude))
summary(lm(idz ~ sex + zacmem, data=control, na.action=na.exclude))
summary(lm(idz ~ sex + zaccog, data=q22, na.action=na.exclude))
summary(lm(idz ~ sex + zaccog, data=control, na.action=na.exclude))
summary(lm(idz ~ sex + zacsoc, data=q22, na.action=na.exclude))
summary(lm(idz ~ sex + zacsoc, data=control, na.action=na.exclude))
#DISC vs. cognitive domains (NONE sig)
summary(lm(discz ~ sex + zacall, data=q22, na.action=na.exclude))
summary(lm(discz ~ sex + zacall, data=control, na.action=na.exclude))
summary(lm(discz ~ sex + zacexe, data=q22, na.action=na.exclude))
summary(lm(discz ~ sex + zacexe, data=control, na.action=na.exclude))
summary(lm(discz ~ sex + zacmem, data=q22, na.action=na.exclude))
summary(lm(discz ~ sex + zacmem, data=control, na.action=na.exclude))
summary(lm(discz ~ sex + zaccog, data=q22, na.action=na.exclude))
summary(lm(discz ~ sex + zaccog, data=control, na.action=na.exclude))
summary(lm(discz ~ sex + zacsoc, data=q22, na.action=na.exclude))
summary(lm(discz ~ sex + zacsoc, data=control, na.action=na.exclude))
#Citralva threshold vs. cognitive domains (NONE sig)
summary(lm(citz ~ sex + zacall, data=q22, na.action=na.exclude))
summary(lm(citz ~ sex + zacall, data=control, na.action=na.exclude))
summary(lm(citz ~ sex + zacexe, data=q22, na.action=na.exclude))
summary(lm(citz ~ sex + zacexe, data=control, na.action=na.exclude))
summary(lm(citz ~ sex + zacmem, data=q22, na.action=na.exclude))
summary(lm(citz ~ sex + zacmem, data=control, na.action=na.exclude))
summary(lm(citz ~ sex + zaccog, data=q22, na.action=na.exclude))
summary(lm(citz ~ sex + zaccog, data=control, na.action=na.exclude))
summary(lm(citz ~ sex + zacsoc, data=q22, na.action=na.exclude))
summary(lm(citz ~ sex + zacsoc, data=control, na.action=na.exclude))
#Lyral threshold vs. cognitive domains (Sig: overall in 22q, soc in 22q)
summary(lm(lyralz ~ sex + zacall, data=q22, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacall, data=control, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacexe, data=q22, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacexe, data=control, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacmem, data=q22, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacmem, data=control, na.action=na.exclude))
summary(lm(lyralz ~ sex + zaccog, data=q22, na.action=na.exclude))
summary(lm(lyralz ~ sex + zaccog, data=control, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacsoc, data=q22, na.action=na.exclude))
summary(lm(lyralz ~ sex + zacsoc, data=control, na.action=na.exclude))
#Group effect controlling for cognition
summary(lm(idz ~ sex + group, data=dat, na.action=na.exclude))
summary(lm(idz ~ sex + group + zacall, data=dat, na.action=na.exclude))
summary(lm(discz ~ sex + group, data=dat, na.action=na.exclude))
summary(lm(discz ~ sex + group + zacall, data=dat, na.action=na.exclude))
summary(lm(citz ~ sex + group, data=dat, na.action=na.exclude))
summary(lm(citz ~ sex + group + zacall, data=dat, na.action=na.exclude))
summary(lm(lyralz ~ sex + group, data=dat, na.action=na.exclude))
summary(lm(lyralz ~ sex + group + zacall, data=dat, na.action=na.exclude))
#Group effect controlling for cognition (not controlling for sex - same results)
summary(lm(idz ~ group, data=dat, na.action=na.exclude))
summary(lm(idz ~ group + zacall, data=dat, na.action=na.exclude))
summary(lm(discz ~ group, data=dat, na.action=na.exclude))
summary(lm(discz ~ group + zacall, data=dat, na.action=na.exclude))
summary(lm(citz ~ group, data=dat, na.action=na.exclude))
summary(lm(citz ~ group + zacall, data=dat, na.action=na.exclude))
summary(lm(lyralz ~ group, data=dat, na.action=na.exclude))
summary(lm(lyralz ~ group + zacall, data=dat, na.action=na.exclude))
###Association with COMT genotype
comtrevised <- factor(x=q22$comtgenotype, levels = c("Met", "Val"), exclude = "")
q22<-data.frame(q22,comtrevised)
save(q22, file="/Users/ericwpetersen/Desktop/Olfaction R Files/q22.R")
t.test(q22$idz~q22$comtrevised)
t.test(q22$discz~q22$comtrevised)
t.test(q22$citz~q22$comtrevised)
t.test(q22$lyralz~q22$comtrevised)
###Relationship to SIPS and negative symptoms
#SIPS total (disc significantly related to total sips)
summary(lm(idz ~ sipstot + group, data=dat, na.action = na.exclude))
summary(lm(discz ~ sipstot + group, data=dat, na.action = na.exclude))
summary(lm(citz ~ sipstot + group, data=dat, na.action = na.exclude))
summary(lm(lyralz ~ sipstot + group, data=dat, na.action = na.exclude))
#Negative symptoms (citralva threshold and disc significantly related to neg s/s, opposite directions)
summary(lm(idz ~ negative + group, data=dat, na.action = na.exclude))
summary(lm(discz ~ negative + group, data=dat, na.action = na.exclude))
summary(lm(citz ~ negative + group, data=dat, na.action = na.exclude))
summary(lm(lyralz ~ negative + group, data=dat, na.action = na.exclude))
###Midline defects
midline<-read.csv("JustMidline_2017_07_11.csv")
q22<-merge(q22, midline, by = "bblid")
q22$midlinedefect <- as.factor(q22$midlinedefect)
t.test(q22$idz ~ q22$midlinedefect)
boxplot(q22$idz ~ q22$midlinedefect, xlab="Midline Defects", ylab="ID")
t.test(q22$discz ~ q22$midlinedefect)
t.test(q22$citz ~ q22$midlinedefect)
t.test(q22$lyralz ~ q22$midlinedefect)
###Olfactory sulcus depth
sulci<-read.csv("Olfactory_Sulci_2017_07-11.csv")
sulci$psychosis<-as.factor(sulci$psychosis)
str(sulci)
View(sulci)
match(sulci$bblid, q22$bblid)
#4 matches for 22q
match(sulci$bblid, control$bblid)
#18 matches for controls
anova(lm(left_sulcus_length_DF ~ group, data=sulci))
boxplot(sulci$left_sulcus_length_DF ~ sulci$group)
anova(lm(right_sulcus_length_DF ~ group, data=sulci))
boxplot(sulci$right_sulcus_length_DF ~ sulci$group)
anova(lm(mean_sulcus_length_DF ~ group, data=sulci))
boxplot(sulci$mean_sulcus_length_DF ~ sulci$group)
###END
save(dat, file="/Users/ericwpetersen/Desktop/Olfaction R Files/dat.R")
save(q22, file="/Users/ericwpetersen/Desktop/Olfaction R Files/q22.R")
save(control, file="/Users/ericwpetersen/Desktop/Olfaction R Files/control.R")
save(sulci, file="/Users/ericwpetersen/Desktop/Olfaction R Files/sulci.R")
|
9c71f0fb29de6138b94ebabfdc69e6e140d7bd6d
|
895bdd32974b8b6f1a61fec16b3c555325e7782e
|
/Greenhouse 2017 2018/PLANT_UTILIZATION.R
|
e300c683829deab3d2c7fa17887bdef62f157495
|
[] |
no_license
|
kelseyefisher/larvalmonarchmovementandcompetition
|
9060641aff73f7c595aee2d5113934b8336fd727
|
b57117ecd42b1ef62710f1b3106dc27a319b8c86
|
refs/heads/main
| 2023-02-13T05:30:45.902043
| 2021-01-09T16:22:58
| 2021-01-09T16:22:58
| 328,189,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,139
|
r
|
PLANT_UTILIZATION.R
|
setwd("C:/Users/kefisher/Box Sync/Publications/Monarch Larval Biomass Consumption - GH 2017 & 2018/Analysis_022019")
##Leaves in first 24 hours
firstobs<-read.csv("012119_GH2018_FirstObservations_ForAnalysis.csv")
head(firstobs)
#average by number of plants
library(plyr)
ddply(firstobs,~NumPlants,summarise,mean=mean(Leaves),sd=sd(Leaves))
#Graph for manuscript
library(lattice)
library(Rmisc)
foleaves<- summarySE(firstobs, measurevar="Leaves", groupvars=c("NumPlants"))
foleaves
table(firstobs$Leaves)
with(firstobs, table(Trial, NumPlants))
#################################################
### Biomass Consumed by Instar at Abandonment ###
#################################################
Bio<-read.csv("012219_GH18_BiomassByInstar_NatalStem.csv", header=TRUE)
Bio$Trial=factor(Bio$Trial)
Bio$Block=factor(Bio$Block)
Bio$NumPlants=factor(Bio$NumPlants)
Bio$Year=factor(Bio$Year)
# Create Year-Block variable with 12 levels
Bio$YearBlock <- as.numeric(as.factor(paste(Bio$Year, Bio$Block, sep = "-")))
Bio$YearBlock <- as.factor(Bio$YearBlock)
Bio$MoveInstar <- as.factor(Bio$MoveInstar)
Bio$Biomass <- as.numeric(Bio$Biomass)
# Poisson glm
biomass <- glm(Biomass ~ YearBlock + Trial + NumPlants + MoveInstar + Trial:NumPlants + NumPlants:MoveInstar, data=Bio, family = Gamma(link = "inverse"))
#not useful, just make sure there isn't a bunch of NA
summary(biomass)
# Goodness of fit test with Resids
Resids <- residuals(biomass, type = "pearson")
Resids
#Change "type" to "deviance" for deviance resids (get the same results)
plot(fitted(biomass), Resids,
xlab = "estimated mean",
ylab = "residual",
cex.lab = 1.4,
pch = 16, col = 4)
abline(h = 0, lty = 2) #Resids look okay
hist(Resids, prob = T)
lines(density(Resids), col='orange') #Should look like a standard normal - does
#Using deviance residuals
#If the p-value is less than 0.05, you have a problem with model fit
pchisq(deviance(biomass), df.residual(biomass), lower.tail = F)
#Test for overdispersion
summary(biomass)$deviance/summary(biomass)$df.residual
#0.82 underdispersed by a bit - that's okay (inference is conservative)
library(emmeans)
# Test for significance of NumPlants
dtm.emm <- emmeans(biomass, c("NumPlants", "Trial", "YearBlock", "MoveInstar"), type='response')
joint_tests(dtm.emm)
# Test for significance of NumPlants
dtm.emm2 <- emmeans(biomass, c("MoveInstar"), type='response')
joint_tests(dtm.emm2)
pairs(dtm.emm2)
dtm.emm2
CLD(dtm.emm2)
#average by number of plants
library(plyr)
ddply(Bio,~MoveInstar,summarise,mean=mean(Biomass),sd=sd(Biomass))
#Graph for manuscript
library(lattice)
library(Rmisc)
SEB<- summarySE(Bio, measurevar="Biomass", groupvars=c("MoveInstar"))
SEB$Sig<- NA
SEB$Sig<- c("*","*","**","***")
SEB
library(ggplot2)
ggplot(SEB, aes(x=MoveInstar, y=Biomass, width=.6))+
geom_bar(stat="identity", color="black",
position=position_dodge())+
xlab("Instar At Natal Plant Abandonment") +
ylab("Milkweed Biomass (mg dry biomass)")+
theme_bw()+
geom_errorbar(aes(ymin=Biomass-sd, ymax=Biomass+sd), width=.2,
position = position_dodge(.9))+
geom_text(aes(label=Sig, y=Biomass+(sd)),vjust=-1.5, size=8)+
scale_y_continuous(expand=c(0,0), limits=c(0,1525))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
theme(axis.text.x = element_text(size=16))+
theme(axis.text.x=element_text(colour="black"))+
theme(axis.text.y = element_text(size=16))+
theme(axis.text.y=element_text(colour="black"))+
theme(axis.title = element_text(size = 15))
#stats for manuscript
Bio2=Bio$Biomass
mean(Bio2)
sd(Bio2)
max(Bio2)
min(Bio2)
#################################################
### Leaves Consumed by Instar at Abandonment ###
#################################################
Leaf<-read.csv("012219_GH18_LeavesByInstar_NatalStem.csv", header=TRUE)
Leaf$Trial=factor(Leaf$Trial)
Leaf$Block=factor(Leaf$Block)
Leaf$NumPlants=factor(Leaf$NumPlants)
Leaf$Year=factor(Leaf$Year)
# Create Year-Block variable with 12 levels
Leaf$YearBlock <- as.numeric(as.factor(paste(Leaf$Year, Leaf$Block, sep = "-")))
Leaf$YearBlock <- as.factor(Leaf$YearBlock)
Leaf$MoveInstar <- as.factor(Leaf$MoveInstar)
# Poisson glm
leaf <- glm(Leaves ~ YearBlock + Trial + NumPlants + MoveInstar + Trial:NumPlants + NumPlants:MoveInstar, data=Leaf, family = Gamma(link = "inverse"))
#not useful, just make sure there isn't a bunch of NA
summary(leaf)
# Goodness of fit test with Resids
Resids <- residuals(leaf, type = "pearson")
Resids
#Change "type" to "deviance" for deviance resids (get the same results)
plot(fitted(leaf), Resids,
xlab = "estimated mean",
ylab = "residual",
cex.lab = 1.4,
pch = 16, col = 4)
abline(h = 0, lty = 2) #Resids look okay
hist(Resids, prob = T)
lines(density(Resids), col='orange') #Should look like a standard normal - does
#Using deviance residuals
#If the p-value is less than 0.05, you have a problem with model fit
pchisq(deviance(leaf), df.residual(leaf), lower.tail = F)
# Test for significance of NumPlants
dtm.emm <- emmeans(leaf, c("NumPlants", "Trial", "YearBlock", "MoveInstar"), type='response')
joint_tests(dtm.emm)
# Test for significance of NumPlants
dtm.emm2 <- emmeans(leaf, c("MoveInstar"), type='response')
joint_tests(dtm.emm2)
pairs(dtm.emm2)
dtm.emm2
CLD(dtm.emm2)
#average by number of plants
library(plyr)
ddply(Leaf,~MoveInstar,summarise,mean=mean(Biomass),sd=sd(Biomass))
head(Leaf)
#Graph for manuscript
library(lattice)
library(Rmisc)
SEL<- summarySE(Leaf, measurevar="Leaves", groupvars=c("MoveInstar"))
head(SEL)
SEL$Sig<- NA
SEL$Sig<- c("ab","a","b","c")
SEL
library(ggplot2)
ggplot(SEL, aes(x=MoveInstar, y=Leaves, width=.6))+
geom_bar(stat="identity", color="black",
position=position_dodge())+
xlab("Instar At Natal Plant Abandonment") +
ylab("Leaves with Feeding")+
theme_bw()+
geom_errorbar(aes(ymin=Leaves-sd, ymax=Leaves+sd), width=.2,
position = position_dodge(.9))+
geom_text(aes(label=Sig, y=Leaves+(sd)),vjust=-1.5, size=8)+
scale_y_continuous(expand=c(0,0), limits=c(0,21))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
theme(axis.text.x = element_text(size=16))+
theme(axis.text.x=element_text(colour="black"))+
theme(axis.text.y = element_text(size=16))+
theme(axis.text.y=element_text(colour="black"))+
theme(axis.title = element_text(size = 15))
#stats for manuscript
leaf2=Leaf$Leaves
mean(leaf2)
sd(leaf2)
max(leaf2)
min(leaf2)
getmode <- function(Neo) {
uniqv <- unique(Neo)
uniqv[which.max(tabulate(match(Neo, uniqv)))]
}
result<-getmode(Neo)
print(result)
#Full Development Biomass Consumption
Full<-read.csv('012219_GH18_Biomass_FullDevelopment.csv')
head(Full)
Full$Trial=factor(Full$Trial)
Full$Block=factor(Full$Block)
Full$TotalBiomass=as.numeric(Full$TotalBiomass)
#Total Biomass
biomass2 = Full$TotalBiomass
length(biomass2)
mean(biomass2)
sd(biomass2)
max(biomass2)
min(biomass2)
#Leaves with feeding
leaves2=Full$TotalLeaves
length(leaves2)
mean(leaves2)
sd(leaves2)
max(leaves2)
min(leaves2)
#Biomass from natal
bionatal=Full$NatalBiomass
length(bionatal)
mean(bionatal)
sd(bionatal)
max(bionatal)
min(bionatal)
#Biomass from Subsequent
biosub=Full$SubsequentBiomass
length(biosub)
mean(biosub)
sd(biosub)
max(biosub)
min(biosub)
#is natal and subsequent different biomass?
fullt<- read.csv("012219_GH18_Biomass_FullDevelopment_ttest.csv")
t.test(Biomass ~ Plant, fullt)
t.test(Leaves ~ Plant, fullt)
head(fullt)
library(lattice)
library(Rmisc)
SET<- summarySE(fullt, measurevar="Biomass", groupvars=c("Plant"))
SET$Sig<- NA
SET$Sig<- c("a","b")
SET
##Number of plants with feeding 2018 4 plants
head(Full)
colMeans(Full)
plants = Full$PlantsWithFeeding
length(plants)
mean(plants)
sd(plants)
max(plants)
min(plants)
#Number of Plants Visited
GH1718<-read.csv("011819_GH2017&2018_FullDevelopment_ForAnalysis.csv", header=TRUE)
head(GH1718)
library(plyr)
ddply(GH1718,~NumPlants,summarise,mean=mean(NumPlantsVisited),sd=sd(NumPlantsVisited))
library(lattice)
library(Rmisc)
numplantsv<- summarySE(GH1718, measurevar="NumPlantsVisited", groupvars=c("NumPlants","Year"))
numplantsv
numplantsv<- summarySE(GH1718, measurevar="NumPlantsVisited", groupvars=c("NumPlants"))
numplantsv
GH1718<- read.csv("011819_GH2017&2018_FullDevelopment_ForAnalysis.csv")
GH1718$Trial=factor(GH1718$Trial)
GH1718$Block=factor(GH1718$Block)
GH1718$NumPlants=factor(GH1718$NumPlants)
GH1718$Year=factor(GH1718$Year)
GH1718$YearBlock <- as.numeric(as.factor(paste(GH1718$Year, GH1718$Block, sep = "-")))
GH1718$YearBlock <- as.factor(GH1718$YearBlock)
#Reduce data frame
GH1718.complete <- GH1718[which(GH1718$Trial == 1 | GH1718$Trial == 2 | GH1718$Trial == 3 | GH1718$Trial == 4),]
nrow(GH1718)
nrow(GH1718.complete)
numplantsv<- summarySE(GH1718.complete, measurevar="NumPlantsVisited", groupvars=c("NumPlants"))
numplantsv
library(ggplot2)
ggplot(numplantsv, aes(x=NumPlants, y=NumPlantsVisited, width=.6))+
geom_bar(stat="identity", color="black",
position=position_dodge())+
xlab("Number of Plants") +
ylab("Number of plants Visited")+
theme_bw()+
geom_errorbar(aes(ymin=NumPlantsVisited-sd, ymax=NumPlantsVisited+sd), width=.2,
position = position_dodge(.9))+
scale_y_continuous(expand=c(0,0), limits=c(0,4.1))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
theme(axis.text.x = element_text(size=20))+
theme(axis.text.x=element_text(colour="black"))+
theme(axis.text.y = element_text(size=20))+
theme(axis.text.y=element_text(colour="black"))+
theme(axis.title = element_text(size = 22))
library(ggplot2)
library(emmeans)
### Proportion Analyses
#Observed on Plant Material
GH1718<- read.csv("011819_GH2017&2018_FullDevelopment_ForAnalysis.csv")
GH1718$Trial=factor(GH1718$Trial)
GH1718$Block=factor(GH1718$Block)
GH1718$NumPlants=factor(GH1718$NumPlants)
GH1718$Year=factor(GH1718$Year)
GH1718$YearBlock <- as.numeric(as.factor(paste(GH1718$Year, GH1718$Block, sep = "-")))
GH1718$YearBlock <- as.factor(GH1718$YearBlock)
#Reduce data frame
GH1718.complete <- GH1718[which(GH1718$Trial == 1 | GH1718$Trial == 2 | GH1718$Trial == 3 | GH1718$Trial == 4),]
write.csv(GH1718.complete, "GH17_ONLY.csv")
GH17<- read.csv("GH17_ONLY.csv", header = TRUE)
nrow(GH17)
GH17$Trial=factor(GH17$Trial)
GH17$Block=factor(GH17$Block)
GH17$NumPlants=factor(GH17$NumPlants)
GH17$Year=factor(GH17$Year)
GH17$YearBlock <- as.numeric(as.factor(paste(GH17$Year, GH17$Block, sep = "-")))
GH17$YearBlock <- as.factor(GH17$YearBlock)
table(GH17$RelPlant)
with(GH17, table(Trial, NumPlants))
#This is the correct model. Your response needs to be c(succ,fail). Becuase the model is overdispersed, you should use a family = quasibinomial rather than a simple binomial.
relplant2 <- glm(cbind(ObsOnPlant, TimesObs-ObsOnPlant) ~ Trial + NumPlants, data=GH17, family = quasibinomial, na.action = "na.omit")
summary(relplant2)
relplant2.emm <- emmeans(relplant2, c('NumPlants', 'Trial'))
relplant2.emm
joint_tests(relplant2.emm)
relplant2.emm2 <- emmeans(relplant2, 'NumPlants', type = "response")
relplant2.emm2
pairs(relplant2.emm2)
pairs(relplant2.emm2)
relplant2.emm2
CLD(relplant2.emm2)
#Graph for manuscript
library(lattice)
library(Rmisc)
SErp<- summarySE(GH17, measurevar="RelPlant", groupvars=c("NumPlants"))
SErp
SErp$Sig<- NA
SErp$Sig<- c("a","ab","b")
SErp
library(ggplot2)
ggplot(SErp, aes(x=NumPlants, y=RelPlant, width=.6))+
geom_bar(stat="identity", color="black",
position=position_dodge())+
xlab("Number of Plants") +
ylab("Proportion of Time on Plant Material")+
theme_bw()+
geom_errorbar(aes(ymin=RelPlant-sd, ymax=RelPlant+sd), width=.2,
position = position_dodge(.9))+
geom_text(aes(label=Sig, y=RelPlant+(sd)),vjust=-1.5, size=8)+
scale_y_continuous(expand=c(0,0), limits=c(0,1.125))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
theme(axis.text.x = element_text(size=16))+
theme(axis.text.x=element_text(colour="black"))+
theme(axis.text.y = element_text(size=16))+
theme(axis.text.y=element_text(colour="black"))+
theme(axis.title = element_text(size = 15))
#stats for manuscript
relp=GH1718.complete$RelPlant
length(relp)
mean(relp)
sd(relp)
max(relp)
min(relp)
|
6dd414b4e1b127ff95dc244537ddabe8891b7895
|
d7f68113ba841857d68f2ac452bcda91fe373cf0
|
/Insight/Scripts/Reimbursement.R
|
ae816ce2dcb9f1b1945767fcee891ae15c4649b1
|
[] |
no_license
|
ramyead/Insight-Project-kNOw-Care
|
bc731398d3a49ac803af08d70ca58beb9a38d09d
|
654c0203bc51036f671a14c17e259c8fa4f17b47
|
refs/heads/master
| 2021-01-17T11:58:33.172780
| 2017-06-26T00:45:01
| 2017-06-26T00:45:01
| 95,390,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
r
|
Reimbursement.R
|
pacman::p_load(stringr, ggplot2, car, effects, lme4, lmerTest, dplyr, reshape2, tidyr, sjPlot, nlme)
reimbursement = read.csv("Data/Hospital_Revised_Flatfiles/Payment and Value of Care - Hospital.csv") %>%
select(Hospital.Name = Hospital.name, State, Payment.measure.name, Payment.measure.ID, Payment)
reimbursement2= select(reimbursement, Hospital.Name, State, Payment.measure.ID, Payment) %>%
mutate(Payment = as.numeric(gsub('[$,]', '', reimbursement$Payment)))
reimbursement2$row <- 1:nrow(reimbursement2)
reimbursement3 = spread(reimbursement2,Payment.measure.ID, Payment, fill = F) %>% select(-row) %>%
group_by(Hospital.Name, State)
reimbursement.final = group_by(reimbursement3, Hospital.Name, State) %>%
summarise_all(sum)
# save(reimbursement.final, file = "Data/reimbursement.rda")
|
fc23f686958698f56b867e1256794b0534be48ef
|
4bd57b8501d4326ecc06c1d1ea499935e1668d95
|
/MASH-dev/JohnHenry/Pf_Analysis/MT_TE.R
|
419abed1af2d4f19bb6b5bdb3d6b43bd9631723d
|
[] |
no_license
|
aucarter/MASH-Main
|
0a97eac24df1f7e6c4e01ceb4778088b2f00c194
|
d4ea6e89a9f00aa6327bed4762cba66298bb6027
|
refs/heads/master
| 2020-12-07T09:05:52.814249
| 2019-12-12T19:53:24
| 2019-12-12T19:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,857
|
r
|
MT_TE.R
|
library(readxl)
library(fitdistrplus)
library(zoib)
library(gamlss)
Mosquito_Transmission <- read_excel("~/Malaria Data Files/Mosquito_Transmission.xlsx")
MT_Days <- read_excel("~/Malaria Data Files/MT_Days.xlsx")
Gt_Col <- read_excel("~/Malaria Data Files/Gt_Col.xlsx")
#MT_GT_NP <- read_excel("~/Malaria Data Files/MT_GT_NP.xlsx")
#G = as.matrix(MT_GT_NP)
Days = as.matrix(MT_Days)
## which rows denote beginning of individual
begin = which(Days==1)
## which rows denote end of individual
end = which(Days==1)-1
end = c(end,length(Days))
end = end[2:length(end)]
Mosq = as.matrix(Mosquito_Transmission)
Mosq[which(Mosq==".")]=NaN
Mosq = as.numeric(Mosq)
Gt = as.matrix(Gt_Col)
Gt[which(Gt==".")]=NaN
Gt = as.numeric(Gt)
Pt = as.matrix(MT_PT_NP)
TE = matrix(NaN,nrow=1400,ncol=334)
MGt = TE
for(i in 1:334){
TE[1:(end[i]-begin[i]+1),i] = Mosq[begin[i]:end[i]]
MGt[1:(end[i]-begin[i]+1),i] = Gt[begin[i]:end[i]]
}
### compare average gametocyte levels to average proportion of infected mosquitoes
TEmu = rowMeans(TE,na.rm=T)
GTmu = rowMeans(G,na.rm=T)
PTmu = rowMeans(Pt,na.rm=T)
plot(TEmu,type="l",xlim=c(0,200))
rowVar = function(m){
n = dim(m)[1]
m[which(is.na(m))]=0
temp = rowSums((m-rowMeans(m,na.rm=T))^2)/(n-1)
return(temp)
}
TEvar = rowVar(TE)
GTvar = rowVar(MGt)
PTvar = rowVar(Pt)
plot(TEmu/100,type="l",ylim=c(0,5),xlim=c(0,365),xlab="days",ylab="log10 Parasite Density per microliter")
#lines(sqrt(TEvar/10^4)+TEmu/100,lty=2)
#lines(pmax(-sqrt(TEvar/10^4)+TEmu/100,0),lty=2)
lines(log10(GTmu),lty=2,col="red")
lines(log10(PTmu))
abline(h=log10(10))
title(main="Transmission Efficiency in Mean Infection Profile")
###
ccf(PTmu,GTmu,type="correlation",lag.max=20)
TEmu[which(is.na(TEmu))]=0
Mprime = Mosq[which(is.na(Mosq)==F)]
Gtprime = as.numeric(Gt[which(is.na(Mosq)==F)])
##log10 gametocyte count vs % of mosquitoes infected from bitting from them
plot(log10(Gtprime),Mprime/100,ylab="Proportion of Feeding Mosquitoes Infected",xlab="log10 Gametocyte Density per cmm")
title(main="Transmission Efficiency as a Function of Gametocyte Density")
beta1 = Mprime[which(log10(Gtprime) <= 1)]/100
logGT1 = pmax(log10(Gtprime)[which(log10(Gtprime) <= 1)],0)
logGT1mu = log10(mean(10^logGT1[which(logGT1>0)]))
beta2 = Mprime[which(log10(Gtprime) > 1 & log10(Gtprime) <= 1.5)]/100
logGT2 = pmax(log10(Gtprime)[which(log10(Gtprime) > 1 & log10(Gtprime) <= 1.5)],0)
logGT2mu = log10(mean(10^logGT2[which(logGT2>0)]))
beta3 = Mprime[which(log10(Gtprime) > 1.5 & log10(Gtprime) <= 2)]/100
logGT3 = pmax(log10(Gtprime)[which(log10(Gtprime) > 1.5 & log10(Gtprime) <= 2)],0)
logGT3mu = log10(mean(10^logGT3[which(logGT3>0)]))
beta4 = Mprime[which(log10(Gtprime) > 2 & log10(Gtprime) <= 2.5)]/100
logGT4 = pmax(log10(Gtprime)[which(log10(Gtprime) > 2 & log10(Gtprime) <= 2.5)],0)
logGT4mu = log10(mean(10^logGT4[which(logGT4>0)]))
beta5 = Mprime[which(log10(Gtprime) > 2.5 & log10(Gtprime) <= 3)]/100
logGT5 = pmax(log10(Gtprime)[which(log10(Gtprime) > 2.5 & log10(Gtprime) <= 3)],0)
logGT5mu = log10(mean(10^logGT5[which(logGT5>0)]))
beta6 = Mprime[which(log10(Gtprime) > 3 & log10(Gtprime) <= 3.5)]/100
logGT6 = pmax(log10(Gtprime)[which(log10(Gtprime) > 3 & log10(Gtprime) <= 3.5)],0)
logGT6mu = log10(mean(10^logGT6[which(logGT6>0)]))
beta7 = Mprime[which(log10(Gtprime) > 3.5)]/100
logGT7 = pmax(log10(Gtprime)[which(log10(Gtprime) > 3.5)],0)
logGT7mu = log10(mean(10^logGT7[which(logGT7>0)]))
betaRange = function(xmin){
Mprime[which(log10(Gtprime) > xmin & log10(Gtprime) <= xmin+1)]/100
}
x = seq(0,1,.01)
beta1fit = fitdist(beta1,distr="beta",method="mge")
a1 = beta1fit$estimate[1]
b1 = beta1fit$estimate[2]
hist(beta1,breaks=50,freq=F)
lines(x,dbeta(x,a1,b1))
beta1var = a1*b1/((a1+b1)^2*(a1+b1+1))
beta2fit = fitdist(beta2,distr="beta",method="mge")
a2 = beta2fit$estimate[1]
b2 = beta2fit$estimate[2]
hist(beta2,breaks=50,freq=F)
lines(x,dbeta(x,a2,b2))
beta2var = a2*b2/((a2+b2)^2*(a2+b2+1))
beta3fit = fitdist(beta3,distr="beta",method="mge")
a3 = beta3fit$estimate[1]
b3 = beta3fit$estimate[2]
hist(beta3,breaks=50,freq=F)
lines(x,dbeta(x,a3,b3))
beta3var = a3*b3/((a3+b3)^2*(a3+b3+1))
beta4fit = fitdist(beta4,distr="beta",method="mge")
a4 = beta4fit$estimate[1]
b4 = beta4fit$estimate[2]
hist(beta4,breaks=50,freq=F)
lines(x,dbeta(x,a4,b4))
beta4var = a4*b4/((a4+b4)^2*(a4+b4+1))
beta5fit = fitdist(beta5,distr="beta",method="mge")
a5 = beta5fit$estimate[1]
b5 = beta5fit$estimate[2]
hist(beta5,breaks=50,freq=F)
lines(x,dbeta(x,a5,b5))
beta5var = a5*b5/((a5+b5)^2*(a5+b5+1))
beta6fit = fitdist(beta6,distr="beta",method="mge")
a6 = beta6fit$estimate[1]
b6 = beta6fit$estimate[2]
hist(beta6,breaks=50,freq=F)
lines(x,dbeta(x,a6,b6))
beta6var = a6*b6/((a6+b6)^2*(a6+b6+1))
beta7fit = fitdist(beta7,distr="beta",method="mge")
a7 = beta7fit$estimate[1]
b7 = beta7fit$estimate[2]
hist(beta7,breaks=50,freq=F)
lines(x,dbeta(x,a7,b7))
beta7var = a7*b7/((a7+b7)^2*(a7+b7+1))
betaMeans = c(a1/(a1+b1),a2/(a2+b2),a3/(a3+b3),a4/(a4+b4),a5/(a5+b5),a6/(a6+b6),a7/(a7+b7))
betaVars = c(beta1var,beta2var,beta3var,beta4var,beta5var,beta6var,beta7var)
logGT = c(logGT1mu,logGT2mu,logGT3mu,logGT4mu,logGT5mu,logGT6mu,logGT7mu)
plot(logGT,betaMeans,ylim=c(0,1),xlim=c(.5,4.25),xlab="Log10 Gametocyte Density per Microliter",ylab="Proportion of Mosquitoes Infected",main="Transmission Efficiency")
sigfit = nls(betaMeans~p1*exp(p2*(logGT))/(p3+exp(p2*(logGT))),start=list(p1=.6,p2=.5,p3=2))
p1=.689
p2= 2.145
p3 = 144.351
#gompfit = nls(betaMeans~g1*exp(-g2*exp(-g3*logGT)),start=list(g1=1-.7,g2=.5,g3=.1))
#g1 = .7725
#g2 = 12.1837
#g3 = 1.1818
#gompTE = function(x,g1,g2,g3){
# g1*exp(-g2*exp(-g3*x))
#}
sigmoidTE = function(x,p1,p2,p3){
p1*exp(p2*x)/(p3+exp(p2*x))
}
x = seq(.8,4.1,.01)
plot(x,sigmoidTE(x,p1,p2,p3))
title(main="Mean log10 Gametocyte Density vs Mean Transmission Efficiency")
#lines(x,gompTE(x,g1,g2,g3),lty=2)
midpoint = log(p3)/p2
saturation = p1
slope = p2
#par(mfrow=c(1,2))
x = seq(-1,8,.01)
plot(x,sigmoidTE(x,p1,p2,p3),type="l",ylim=c(0,1),xlab="log10 Gametocyte Density per Microliter")
abline(v = midpoint)
abline(h=saturation)
abline(h=0)
#plot(x,gompTE(x,g1,g2,g3),type="l",ylim=c(0,1),xlab="log10 Gametocyte Density (PRBC per cmm)")
#abline(h=.7725)
#abline(h=0)
#gomphalf = x[min(which(gompTE(x,g1,g2,g3)>(.7725/2)))]
#abline(v = gomphalf)
#par(mfrow=c(1,1))
plot(logGT,betaVars,type="l")
plot(logGT,betaMeans/betaVars,type="l")
plot(logGT,betaMeans/sqrt(betaVars),type="l")
############################ zero inflation fit
pz1 = sum(beta1==0)/length(beta1)
pz2 = sum(beta2==0)/length(beta2)
pz3 = sum(beta3==0)/length(beta3)
pz4 = sum(beta4==0)/length(beta4)
pz5 = sum(beta5==0)/length(beta5)
pz6 = sum(beta6==0)/length(beta6)
pz7 = sum(beta7==0)/length(beta7)
pz = c(pz1,pz2,pz3,pz4,pz5,pz6,pz7)
po1 = sum(beta1==1)/length(beta1)
po2 = sum(beta2==1)/length(beta2)
po3 = sum(beta3==1)/length(beta3)
po4 = sum(beta4==1)/length(beta4)
po5 = sum(beta5==1)/length(beta5)
po6 = sum(beta6==1)/length(beta6)
po7 = sum(beta7==1)/length(beta7)
po = c(po1,po2,po3,po4,po5,po6,po7)
s = 1:7
plot(s,pz,type="l",ylim=c(0,1))
lines(s,po)
#################### sliding window for measuring TE from Gt ####################
betazfit = function(a){
## a should be between .5 and 4
betaz = Mprime[which(log10(Gtprime) > a & log10(Gtprime) <= a+.5)]/100
logGTz = pmax(log10(Gtprime)[which(log10(Gtprime) > a & log10(Gtprime) <= a+.5)],0)
fit = fitdist(betaz,distr="beta",method="mge")
az = fit$estimate[1]
bz = fit$estimate[2]
return(c(az,bz))
}
z = seq(.5,4,.1)
muz = 0*z
params = matrix(0,nrow=2,ncol=length(z))
for(i in 1:length(z)){
params[,i] = betazfit(z[i])
muz[i] = params[1,i]/(params[1,i]+params[2,i])
}
sigfit = nls(muz~p1*exp(p2*(z))/(p3+exp(p2*(z))),start=list(p1=.5,p2=.5,p3=10))
p1 = .6753
p2 = 2.1801
p3 = 79.8746
sigmoidTE = function(x,p1,p2,p3){
p1*exp(p2*x)/(p3+exp(p2*x))
}
w = seq(0,5,.01)
plot(z,muz,xlim=c(0,4.5),ylim=c(0,.9),ylab="Proportion of Mosquitoes Infected", xlab="log10 Gametocyte Density per Microliter", main="Transmission Efficiency")
lines(w,sigmoidTE(w,p1,p2,p3))
abline(h = p1)
resid = muz - sigmoidTE(z,p1,p2,p3)
hist(resid,breaks = 10)
qqnorm(resid)
lines(seq(-2,2,.01),.02*seq(-2,2,.01))
plot(resid)
plot(z,params[1,],col="red",type="l",ylim=c(0,3),xlab="log10 Gametocyte Density per microL",ylab="Parameter Value",main="Beta Parameterization given Gametocytemia")
lines(z,params[2,],col="blue")
legend(3,2.6,legend=c("alpha","beta"),col=c("red","blue"),lty=c(1,1))
a = params[1,]
b = params[2,]
mu = a/(a+b)
var = a*b/((a+b)^2*(a+b+1))
plot(mu,var,xlab="Mean of Fitted Beta", ylab="Variance of Fitted Beta",main="Mean-Variance Relationship for Fitted Beta Distributions",xlim=c(0,1),ylim=c(0,.2))
mu2 = mu^2
parfit = lm(var~mu+mu2)
parab = function(mu){
pmax(parfit$coefficients[1]+parfit$coefficients[2]*mu+parfit$coefficients[3]*mu^2,0)
}
lines(seq(0,1,.01),parab(seq(0,1,.01)))
## tried fitting beta-like function ( y = ax^b(1-x)^c ), seems more (locally) parabolic
## (and mean of beta will ALWAYS be below .7)
#muc = 1-mu
#lvar = log(var)
#lmu = log(mu)
#lmuc = log(1-mu)
#mvfit = lm(lvar~lmu+lmuc)
#c1 = mvfit$coefficients[1]
#c2 = mvfit$coefficients[2]
#c3 = mvfit$coefficients[3]
#mvfitcurve = function(mu){
# exp(c1)*mu^c2*(1-mu)^c3
#}
#r = seq(0,1,.01)
#lines(r,mvfitcurve(r))
plot(log10(GTmu),xlim=c(0,300))
lines(TEmu/100)
plot(TEmu/100,type="l",xlim=c(0,300),xlab="Days",ylab="Fraction of Mosquitoes Infected",main="Transmission Efficiency Time Course")
lines(sigmoidTE(log10(GTmu),p1,p2,p3),type="l",col="red")
plot(sigmoidTE(log10(GTmu),p1,p2,p3),type="l",xlim=c(0,300),ylim=c(0,1))
plot(cumsum(sigmoidTE(log10(GTmu),p1,p2,p3)),type="l",xlim=c(0,300),ylim=c(0,50))
TEweight = rep(0,300)
for(i in 1:300){TEweight[i]=sigmoidTE(log10(GTmu),p1,p2,p3)[i]*exp(-lambda*i)}
plot(cumsum(TEweight))
|
641bf4c68b058c9ce29a60ae16adf731b824b844
|
2ab25ee2419091aec254ff0a1919e252f2d176fa
|
/assignment4.R
|
1481a298cc0cfd0abfb9f607ccb1bbcf24ab5a6c
|
[] |
no_license
|
GeorgeAaronG/WallStreetBets-Reddit-Analysis
|
f3d14eac512590f04421bd117d299ea7806ad12a
|
7ae9c3fa507e0f5242d5dbe8ee2ee853162a3e8b
|
refs/heads/master
| 2020-09-11T17:50:16.148655
| 2019-11-20T05:12:22
| 2019-11-20T05:12:22
| 222,143,523
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,029
|
r
|
assignment4.R
|
# George Garcia | 11.16.19
#
# "assignment4.r": Collects Reddit forum posts from the 'WallStreetBets' community to create
# user network graphs with 'dplyr' and identify entities such as persons, organizations,
# locations, dates, monies, and percentages with 'openNLP'.
# To prevent java.lang.OutOfMemoryError, set Java heap size
options(java.parameters = "-Xmx8g")
# Load packages
library(RedditExtractoR)
library(dplyr)
library(NLP)
library(openNLP)
library(openNLPmodels.en)
#####
# 1
# Search Reddit for threads that contain the word "stocks" and return 1 page (25 results)
stocksURL <- reddit_urls(search_terms = "stocks", subreddit = "WallStreetBets", page_threshold = 1)
#####
# 2
# Filter results by URL with the smallest amount of comments, and then we use it's URL to get replies
minThreadDF <- stocksURL %>% filter(num_comments == min(stocksURL$num_comments)) %$% URL %>% reddit_content
# Extract the user network of replies, excluding the author of original post, and shows aggregated results
stocksNetwork <- minThreadDF %>% user_network(include_author=FALSE, agg=TRUE)
# Interactive plot of the user network, with messages
stocksNetwork$plot
#####
# 3
# Keep only post ID and post text
Reddit <- data.frame(minThreadDF$id, minThreadDF$comment)
colnames(Reddit) <- c('Post', 'Comment')
View(Reddit)
# Set up annotators for person, organization, location, date, money and percentage
person_annotator = Maxent_Entity_Annotator(kind = 'person')
organization_annotator = Maxent_Entity_Annotator(kind = 'organization')
location_annotator = Maxent_Entity_Annotator(kind = 'location')
date_annotator = Maxent_Entity_Annotator(kind = 'date')
money_annotator = Maxent_Entity_Annotator(kind = 'money')
percentage_annotator = Maxent_Entity_Annotator(kind = 'percentage')
# Create empty data frame to hold extracted entities with 4 columns of data: Post, Type, Entity, and Position
RedditEntities = data.frame(Post=numeric(), Type=character(), Entity=character(), Position=numeric(), stringsAsFactors=FALSE)
#repeat for each row in dataframe
#ensure post is string
#tokenize post
#annotate tokens
#extract portion of post tagged as entity and
#append to RedditEntities dataframe
for (post in 1:nrow(Reddit)) # repeat for each row in dataframe
{
RedditText=as.String(Reddit[post,2]) # retrieve text
RedditTokens=annotate(RedditText,
list(Maxent_Sent_Token_Annotator(), # Sentence token
Maxent_Word_Token_Annotator())) # Word token
RedditPersTokens=annotate(RedditText, list(person_annotator), RedditTokens) # set up annotator for persons
RedditOrgsTokens=annotate(RedditText, list(organization_annotator), RedditTokens) # set up annotator for organizations
RedditLocsTokens=annotate(RedditText, list(location_annotator), RedditTokens) # set up annotator for locations
RedditDatsTokens=annotate(RedditText, list(date_annotator), RedditTokens) # set up annotator for dates
RedditMonsTokens=annotate(RedditText, list(money_annotator), RedditTokens) # set up annotator for monies
RedditPctsTokens=annotate(RedditText, list(percentage_annotator), RedditTokens) # set up annotator for percentages
RedditPerson=subset(RedditPersTokens,RedditPersTokens$features=='list(kind = "person")') # extract persons
RedditOrganization=subset(RedditOrgsTokens,RedditOrgsTokens$features=='list(kind = "organization")') # extract organizations
RedditLocation=subset(RedditLocsTokens,RedditLocsTokens$features=='list(kind = "location")') # extract locations
RedditDate=subset(RedditDatsTokens,RedditDatsTokens$features=='list(kind = "date")') # extract dates
RedditMoney=subset(RedditMonsTokens,RedditMonsTokens$features=='list(kind = "money")') # extract monies
RedditPercentage=subset(RedditPctsTokens,RedditPctsTokens$features=='list(kind = "percentage")') # extract percentages
# Add extracted persons to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditPerson))) # repeat for each row in the persons list
{
if (nrow(as.data.frame(RedditPerson))>0) {
# add post ID, 'Person', name of person extracted, and start position in text into empty RedditEntries dataframe
RedditEntities=rbind(RedditEntities, cbind(post, 'Person', substr(paste(RedditText, collapse=' '),
RedditPerson$start[i],RedditPerson$end[i]), # These extract entire character field from start position to finish position
RedditPerson$start[i]))
}
}
# Add extracted organizations to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditOrganization)))
{
if (nrow(as.data.frame(RedditOrganization))>0) {
RedditEntities=rbind(RedditEntities, cbind(post, 'Organization', substr(paste(RedditText, collapse=' '),
RedditOrganization$start[i],RedditOrganization$end[i]),RedditOrganization$start[i]))
}
}
# Add extracted locations to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditLocation)))
{
if (nrow(as.data.frame(RedditLocation))>0) {
RedditEntities=rbind(RedditEntities, cbind(post, 'Location', substr(paste(RedditText, collapse=' '),
RedditLocation$start[i],RedditLocation$end[i]),RedditLocation$start[i]))
}
}
# Add extracted dates to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditDate)))
{
if (nrow(as.data.frame(RedditDate))>0) {
RedditEntities=rbind(RedditEntities, cbind(post, 'Date', substr(paste(RedditText, collapse=' '),
RedditDate$start[i],RedditDate$end[i]),RedditDate$start[i]))
}
}
# Add extracted monies to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditMoney)))
{
if (nrow(as.data.frame(RedditMoney))>0) {
RedditEntities=rbind(RedditEntities, cbind(post, 'Money', substr(paste(RedditText, collapse=' '),
RedditMoney$start[i],RedditMoney$end[i]),RedditMoney$start[i]))
}
}
# Add extracted percentages to dataframe containing extracted entities
for (i in 1:nrow(as.data.frame(RedditPercentage)))
{
if (nrow(as.data.frame(RedditPercentage))>0) {
RedditEntities=rbind(RedditEntities, cbind(post, 'Percentage', substr(paste(RedditText, collapse=' '),
RedditPercentage$start[i],RedditPercentage$end[i]),RedditPercentage$start[i]))
}
}
}
#rename columns
colnames(RedditEntities)=c('Post', 'Type', 'Entity', 'Position')
View(RedditEntities)
#merge entity tags with posts
RedditExtratedEntities=merge(RedditEntities, Reddit, by.x='Post', by.y='Post')
View(RedditExtratedEntities)
|
eee2adb0ad019e60b3cca78a8d48d88c9058c184
|
2aa473e524c173313ebbfc757b6a91f7c7b24f00
|
/man/mungepiece.Rd
|
0f03663284fa85b637a1fd9e4eacba6c0461b1cd
|
[
"MIT"
] |
permissive
|
syberia/mungebits2
|
64052c3756828cef6bc1139106d4929ba02c8e75
|
1b4f0cd2a360856769ccb2b11dfc42f36fa5d84c
|
refs/heads/master
| 2020-04-06T04:16:54.292474
| 2017-09-19T21:27:27
| 2017-09-19T21:27:27
| 29,579,712
| 1
| 2
| null | 2017-09-19T21:27:28
| 2015-01-21T08:32:15
|
R
|
UTF-8
|
R
| false
| true
| 299
|
rd
|
mungepiece.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mungepiece.R
\docType{class}
\name{mungepiece}
\alias{mungepiece}
\title{Mungepiece.}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
mungepiece
}
\description{
Mungepiece.
}
\keyword{datasets}
|
95e6f9749049a89f5f5a10f5871f176bf460baa3
|
cddaaa6370390e142ef8c05cfe6fb9dae79685d0
|
/funcs-common.r
|
c464719dd6faf1175fef8ba72a5d8b0e3d50b3f7
|
[
"MIT"
] |
permissive
|
matthewgthomas/ami-summarise
|
9d51c9fb0e265eb85d308c3ca72d1d040129e218
|
f5c522c622c4df235b2a0fda9abc20df955a2b99
|
refs/heads/master
| 2020-06-26T19:48:29.079484
| 2015-04-22T21:07:50
| 2015-04-22T21:07:50
| 34,285,216
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,715
|
r
|
funcs-common.r
|
##
## Process Content Mine output XML files into node/edge lists that can be visualised in network graphs.
##
## This file contains the main input, processing and output functions.
##
## By: Matthew Gwynfryn Thomas
##
## {------- email --------}
## {-- twitter --}
## mgt@matthewgthomas.co.uk
## {------ web -------}
##
##
## Copyright (c) 2015 Matthew Gwynfryn Thomas
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## ## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
#install.packages("plotrix", "RJSONIO", "XML")
#########################################################
## Create list of nodes for the network viz.
## This function takes the JSON file produced by `getpapers` and outputs a dataframe of articles.
##
## Params:
## - data.dir : the Content Mine directory containing the JSON file (and everything else)
## - filename : the JSON file containing details of all scraped/processed papers
##
## Returns: dataframe containing details of the articles in the form of a node list, with these columns:
## - id : article's ID, corresponding to subdirectories in data.dir (one subdir for every article)
## - name : article's title
## - url : article's DOI
## - type : 2=article; 1=keyword/species etc. (type 1 will come into play a bit later)
## - size : the node's radius -- set to the same value for all articles (will be modified later)
##
load_papers = function(data.dir, filename="all_results.json") {
library(RJSONIO)
# load JSON file containing results in this directory
papers = fromJSON(file.path(data.dir, filename))
# set up blank dataframe to hold all the papers
nodes = data.frame(id=rep("", length(papers)), title=rep("", length(papers)), doi=rep("", length(papers)), stringsAsFactors=F)
# loop each article in the json file and add to node list
for (i in 1:length(papers)) {
pmcid = ifelse(is.null(papers[[i]]$pmcid), "", papers[[i]]$pmcid)
title = ifelse(is.null(papers[[i]]$title), "", papers[[i]]$title)
doi = ifelse(is.null(papers[[i]]$DOI), "", papers[[i]]$DOI)
nodes[i,] = c(pmcid, title, doi)
}
# clean up the dataframe of papers and put it in the correct format to be the list of nodes
nodes$type = 2 # default for articles
nodes$size = 50 # default for articles
row.names(nodes) = NULL # get rid of row names
names(nodes) = c("id", "name", "url", "type", "size") # rename columns
return(nodes)
}
#########################################################
## Load XML results files created by AMI
##
## Params:
## - data.dir : base of the Content Mine directory to process
## - xml.files : list of .xml files to load
## - article_ids : vector of article ID numbers (found in nodes$id created by load_papers()) -- these must correspond to sub-directories in data.dir
##
## Returns: dataframe containing all results from everything in the xml.files list
##
get_results = function(data.dir, xml.files, article_ids) {
library(XML)
# open all .xml files in data directory and save them as separate variables in the workspace
for (i in 1:length(xml.files)) assign(strsplit(xml.files[i], "/")[[1]][1], # strsplit gets the publication ID, which is the first part of the firectory name
xmlRoot(xmlTreeParse(file.path(data.dir, xml.files[i]), useInternalNode = TRUE)))
# function to extract nodes and attributes from xml
# code from: http://stackoverflow.com/a/16805244/951174
dumFun <- function(x) {
xname <- xmlName(x)
xattrs <- xmlAttrs(x)
c(sapply(xmlChildren(x), xmlValue), name = xname, xattrs)
}
xml.path = "//*/result" # this should be the same for all AMI output .xml files
# place to hold all the results from all articles
results = data.frame()
# loop all xml data, storing everything in the results data frame
for (xml.data.name in article_ids) {
tmp_xml = try(get(xml.data.name), silent=T) # grab the data
if (class(tmp_xml)[1] != "try-error") { # ignore errors -- they're most likely empty or missing .xml files
# convert the results nodes to a dataframe
tmp_df = as.data.frame(t(xpathSApply(tmp_xml, xml.path, dumFun)), stringsAsFactors = FALSE)
if (ncol(tmp_df) > 0) { # check the data aren't missing/malformed
tmp_df$article = xml.data.name # add the article's name
# put results in collected dataframe
results = rbind(results, tmp_df)
}
}
}
return(results)
}
#########################################################
## Create list of nodes for the network viz.
## This function takes the JSON file produced by `getpapers` and outputs a dataframe of articles.
##
## Params:
## - nodes.freq : keywords/species/etc. dataframe output by calc_word_freq() or calc_species()
## - nodes.articles : nodes dataframe containing all articles (ouput from load_papers())
##
## Returns: dataframe containing all keywords/species/etc. and all articles in a node list
##
finalise_nodes = function(nodes.freq, nodes.articles) {
library(plotrix)
# set this variable to change the size of article nodes relative to keyword/species nodes
# = 1 means that article nodes will be the same size as the smallest keyword/species
relative_size_smallest_node = 1 # larger numbers mean smaller article nodes relative to smallest keyword/species
# add keywords in 'nodes.freq' to nodesa.articles list of papers
names(nodes.freq) = c("id", "size")
nodes.freq$name = nodes.freq$id
nodes.freq$url = "" # no url for keywords
nodes.freq$type = 1 # for keywords
# the main summariser functions all output wildly different ranges of node sizes -- rescale them
nodes.freq$size = rescale(nodes.freq$size, range(10, 100))
nodes.freq = nodes.freq[, c(1, 3, 4, 5, 2) ] # reorder columns
nodes.out = rbind(nodes.freq, nodes) # prepend keywords to articles
# scale article node sizes relative to the smallest keyword/species (but min. size = 1)
min.words = max(min(nodes.freq$size / relative_size_smallest_node), 1)
# set article node sizes to this minimum
nodes.out$size = ifelse(nodes.out$type==2, min.words, nodes.out$size)
return(nodes.out)
}
#######################################
## Output to JSON
##
output_json = function(nodes, edges, filename, output.dir = ".") {
library(RJSONIO)
# put nodes and links together for json output
# code: http://stackoverflow.com/a/13505824/951174
json_out <- list(
nodes = unname(alply(nodes, 1, identity)),
links = unname(alply(edges, 1, identity))
)
sink(file.path(output.dir, filename))
cat(toJSON(json_out))
sink()
print(paste("Written", filename))
}
|
d059f2606e1b6b5922c13a36db20dbd43456881a
|
0e3d395211cc8e6c9c2e5739ca3b6d3111683530
|
/man/baSimuError.Rd
|
58800a602bf7b02ce71621922599fecdee459160
|
[] |
no_license
|
olssol/cava
|
759a63d24a652d49faf4bc0c9c43d7aa3e434ac4
|
29a52ccf9beb6b3e32598e4369a7b3df34c668b0
|
refs/heads/master
| 2023-09-04T01:43:31.902245
| 2023-08-22T17:59:52
| 2023-08-22T17:59:52
| 240,382,223
| 0
| 2
| null | 2023-08-22T17:59:53
| 2020-02-13T22:44:17
|
R
|
UTF-8
|
R
| false
| true
| 825
|
rd
|
baSimuError.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/basim_simu.R
\name{baSimuError}
\alias{baSimuError}
\title{Simulate random error}
\usage{
baSimuError(
n,
error.type = c("normal", "skewed"),
sig = 1,
mu = 0,
skew.mu = NULL,
skew.phi = NULL,
skew.noise = 0.001
)
}
\arguments{
\item{n}{sample size}
\item{error.type}{normal or skewed}
\item{sig}{standard deviation}
\item{mu}{mean}
\item{skew.mu}{if skewed, by how much}
\item{skew.phi}{if skewed, by how much}
\item{skew.noise}{if skewed, contains how much noise}
}
\value{
check detail for exact computation
}
\description{
Simulate random error
}
\details{
Skewed distribution: Prameterization: mu, phi; RNBINOM uses (n,p)
with: phi = n, mu = n(1-p)/p; Mean: mu = n(1-p)/p #; Variances:
mu(1+mu/phi) = n (1-p)/p^2
}
|
01490d9754c3b85cee87dc1a47347b1da8864f87
|
ed4eaf6ab9dc7bc7d952589ddbf1cb0191e35bff
|
/R/kayvan.R
|
5b5a782811e3cf38f6addf46c41182ee1285a08b
|
[] |
no_license
|
cran/ggm
|
6c9cb51c79aebe7f91800afa543b538ae42f3c1a
|
809e5625d5e8f1c6f2bb3eda5c02ba18e3dddcec
|
refs/heads/master
| 2021-01-15T11:29:01.365096
| 2020-02-16T13:00:02
| 2020-02-16T13:00:02
| 17,696,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75,875
|
r
|
kayvan.R
|
#### Functions by Kayvan Sadeghi 2011-2012
## May 2012 Changed the return values of some functions to TRUE FALSE
require(graph)
require(igraph)
######################################################################
######################################################################
rem<-function(a,r){ # this is setdiff (a, r)
k<-0
b<-a
for (i in a){
k<-k+1
for(j in r){
if(i==j){
b<-b[-k]
k<-k-1
break}
}
}
return(b)
}
#############################################################################
#############################################################################
#SPl<-function(a,alpha){
# a<-sort(a)
# alpha<-sort(alpha)
# r<-c()
# if (length(alpha)>0){
# for(i in 1:length(a)){
# for(j in 1:length(alpha)){
# if(a[i]==alpha[j]){
# r<-c(r,i)
# break}}}}
# return(r)
#}
###############################################################################
# Finds indices of b in sorted a
'SPl' = function(a, b) (1:length(a))[is.element(sort(a), b)]
##############################################################################
RR<-function(a){ ## This is unique(a)
a<-sort(a)
r<-a[1]
i<-1
while(i<length(a)){
if(a[i]==a[i+1] ){
i<-i+1}
else{
r<-c(r,a[i+1])
i<-i+1
}}
return(r)
}
#################################################################################
#################################################################################
#' Graph to adjacency matrix
#'
#' \code{grMAT} generates the associated adjacency matrix to a given graph.
#'
#'
#' @param agr A graph that can be a \code{graphNEL} or an \code{\link{igraph}}
#' object or a vector of length \eqn{3e}, where \eqn{e} is the number of edges
#' of the graph, that is a sequence of triples (type, node1label, node2label).
#' The type of edge can be \code{"a"} (arrows from node1 to node2), \code{"b"}
#' (arcs), and \code{"l"} (lines).
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @keywords graphs adjacency matrix mixed graph vector
#' @examples
#'
#' ## Generating the adjacency matrix from a vector
#' exvec <-c ('b',1,2,'b',1,14,'a',9,8,'l',9,11,'a',10,8,
#' 'a',11,2,'a',11,10,'a',12,1,'b',12,14,'a',13,10,'a',13,12)
#' grMAT(exvec)
#'
#'
`grMAT` <- function(agr)
{
if (class(agr)[1] == "graphNEL") {
agr<-igraph.from.graphNEL(agr)
}
if (class(agr)[1]== "igraph"){
return(get.adjacency(agr, sparse = FALSE))
}
if(class(agr)[1] == "character"){
if (length(agr)%%3!=0){
stop("'The character object' is not in a valid form")}
seqt<-seq(1,length(agr),3)
b<-agr[seqt]
agrn<- agr[-seqt]
bn<-c()
for(i in 1:length(b)){
if(b[i]!="a" && b[i]!="l" & b[i]!="b"){
stop("'The numeric object' is not in a valid form")}
if(b[i]=="l"){
bn[i]<-10}
if(b[i]=="a"){
bn[i]<-1}
if(b[i]=="b"){
bn[i]<-100}
}
Ragr<-RR(agrn)
ma<-length(Ragr)
mat<-matrix(rep(0,(ma)^2),ma,ma)
for(i in seq(1,length(agrn),2)){
if((bn[(i+1)/2]==1 & mat[SPl(Ragr,agrn[i]),SPl(Ragr,agrn[i+1])]%%10!=1)|(bn[(i+1)/2]==10 & mat[SPl(Ragr,agrn[i]),SPl(Ragr,agrn[i+1])]%%100<10)|(bn[(i+1)/2]==100 & mat[SPl(Ragr,agrn[i]),SPl(Ragr,agrn[i+1])]<100)){
mat[SPl(Ragr,agrn[i]),SPl(Ragr,agrn[i+1])]<-mat[SPl(Ragr,agrn[i]),SPl(Ragr,agrn[i+1])]+bn[(i+1)/2]
if(bn[(i+1)/2]==10 | bn[(i+1)/2]==100){
mat[SPl(Ragr,agrn[i+1]),SPl(Ragr,agrn[i])]<-mat[SPl(Ragr,agrn[i+1]),SPl(Ragr,agrn[i])]+bn[(i+1)/2]}}
}
rownames(mat)<-Ragr
colnames(mat)<-Ragr
}
return(mat)
}
#' Ribbonless graph
#'
#' \code{RG} generates and plots ribbonless graphs (a modification of MC graph
#' to use m-separation) after marginalization and conditioning.
#'
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalized over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{AG}},, \code{\link{MRG}}, \code{\link{SG}}
#' @references Koster, J.T.A. (2002). Marginalizing and conditioning in
#' graphical models. \emph{Bernoulli}, 8(6), 817-840.
#'
#' Sadeghi, K. (2011). Stable classes of graphs containing directed acyclic
#' graphs. \emph{Submitted}.
#' @keywords graphs directed acyclic graph marginalisation and conditioning MC
#' graph ribbonless graph
#' @examples
#'
#' ex <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ##The adjacency matrix of a DAG
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0),16,16, byrow = TRUE)
#'
#' M<-c(3,5,6,15,16)
#' C<-c(4,7)
#' RG(ex,M,C,plot=TRUE)
#'
`RG` <- function (amat,M=c(),C=c(),showmat=TRUE,plot=FALSE, plotfun = plotGraph, ...)
{
if(class(amat)[1] == "igraph" || class(amat)[1] == "graphNEL" || class(amat)[1] == "character") {
amat<-grMAT(amat)}
if(is(amat,"matrix")){
if(nrow(amat)==ncol(amat)){
if(length(rownames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
}
else {
stop("'object' is not in a valid adjacency matrix form")}}
if(!is(amat,"matrix")) {
stop("'object' is not in a valid form")}
S<-C
St<-c()
while(identical(S,St)==FALSE)
{
St<-S
for(j in S){
for(i in rownames(amat)){
if(amat[i,j]%%10 == 1){
S<-c(i,S[S!=i])}
}
}
}
amatr<-amat
amatt<-2*amat
while(identical(amatr,amatt)==FALSE)
{
amatt<-amatr
############################################################1
amat21 <- amatr
for (kk in M) {
for (kkk in 1:ncol(amatr)) {
amat31<-amatr
if (amatr[kkk,kk]%%10==1|amatr[kk,kkk]%%10==1) {
amat31[kk,kkk]<-amatr[kkk,kk]
amat31[kkk,kk]<-amatr[kk,kkk]}
idx <- which(amat31[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1&amat31[kkk,kk]%%10==1)) {
for (ii in 1:(lenidx )) {
#for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], kkk]%%10==0&idx[ii]!=kkk){
amat21[idx[ii], kkk] <- TRUE
}}
}
}
}
amatr<-amat21
################################################################2
amat22 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat22[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat22+amatr
#################################################################3
amat33<- t(amatr)
amat23 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat33[, kk]%%10 == 1)
idy <- which(amat33[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]%%10==0 & idx[ii]!=idy[jj]){
amat23[idy[jj], idx[ii]] <- 1
}}
}
}
}
amatr<-amat23+amatr
##################################################################4
amat24 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat24[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat24+amatr
####################################################################5
amat35<- t(amatr)
amat25 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat35[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat25[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat25+t(amat25)+amatr
######################################################################6
amat36<- t(amatr)
amat26 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat36[, kk]%%10 == 1)
idy <- which(amat36[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]<100 & idx[ii]!=idy[jj]){
amat26[idy[jj], idx[ii]] <- 100
}}
}
}
}
amatr<-amat26+t(amat26)+amatr
#################################################################7
amat27 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]>99)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat27[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat27+t(amat27)+amatr
################################################################8
amat28 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat28[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat28+t(amat28)+amatr
#################################################################9
amat29 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%100<10 & idx[ii]!=idy[jj]){
amat29[idx[ii], idy[jj]] <- 10
}}
}
}
}
amatr<-amat29+t(amat29)+amatr
##################################################################10
amat20 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat20[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat20+t(amat20)+amatr
}
Mn<-c()
Cn<-c()
for(i in M){
Mn<-c(Mn,which(rownames(amat)==i))}
for(i in C){
Cn<-c(Cn,which(rownames(amat)==i))}
if(length(Mn)>0&length(Cn)>0){
fr<-amatr[-c(Mn,Cn),-c(Mn,Cn)]}
if(length(Mn)>0&length(Cn)==0){
fr<-amatr[-c(Mn),-c(Mn)]}
if(length(Mn)==0&length(Cn)>0){
fr<-amatr[-c(Cn),-c(Cn)]}
if(length(Mn)==0&length(Cn)==0){
fr<-amatr}
if(plot==TRUE){
plotfun(fr,...)}
if(showmat==FALSE){
invisible(fr)}
else{return(fr)}
}
##############################################################################
##############################################################################
#' summary graph
#'
#' \code{SG} generates and plots summary graphs after marginalization and
#' conditioning.
#'
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalised over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{AG}}, \code{\link{MSG}}, \code{\link{RG}}
#' @references Sadeghi, K. (2011). Stable classes of graphs containing directed
#' acyclic graphs. \emph{Submitted}.
#'
#' Wermuth, N. (2011). Probability distributions with summary graph structure.
#' \emph{Bernoulli}, 17(3),845-879.
#' @keywords graphs directed acyclic graph marginalization and conditioning
#' summary graph
#' @examples
#'
#' ex <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ##The adjacency matrix of a DAG
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0),16,16, byrow = TRUE)
#' M <- c(3,5,6,15,16)
#' C <- c(4,7)
#' SG(ex, M, C, plot = TRUE)
#' SG(ex, M, C, plot = TRUE, plotfun = drawGraph, adjust = FALSE)
#'
`SG`<-function (amat,M=c(),C=c(),showmat=TRUE, plot=FALSE, plotfun = plotGraph, ...)
{
if(class(amat)[1] == "igraph" || class(amat)[1] == "graphNEL" || class(amat)[1] == "character") {
amat<-grMAT(amat)}
if(is(amat,"matrix")){
if(nrow(amat)==ncol(amat)){
if(length(rownames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
}
else {
stop("'object' is not in a valid adjacency matrix form")}}
if(!is(amat,"matrix")) {
stop("'object' is not in a valid form")}
S<-C
St<-c()
while(identical(S,St)==FALSE)
{
St<-S
for(j in S){
for(i in rownames(amat)){
if(amat[i,j]%%10 == 1){
S<-c(i,S[S!=i])}
}
}
}
amatr<-amat
amatt<-2*amat
while(identical(amatr,amatt)==FALSE)
{
amatt<-amatr
############################################################1
amat21 <- amatr
for (kk in M) {
for (kkk in 1:ncol(amatr)) {
amat31<-amatr
if (amatr[kkk,kk]%%10==1|amatr[kk,kkk]%%10==1) {
amat31[kk,kkk]<-amatr[kkk,kk]
amat31[kkk,kk]<-amatr[kk,kkk]}
idx <- which(amat31[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1&amat31[kkk,kk]%%10==1)) {
for (ii in 1:(lenidx )) {
#for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], kkk]%%10==0&idx[ii]!=kkk){
amat21[idx[ii], kkk] <- TRUE
}}
}
}
}
amatr<-amat21
################################################################2
amat22 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat22[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat22+amatr
#################################################################3
amat33<- t(amatr)
amat23 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat33[, kk]%%10 == 1)
idy <- which(amat33[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]%%10==0 & idx[ii]!=idy[jj]){
amat23[idy[jj], idx[ii]] <- 1
}}
}
}
}
amatr<-amat23+amatr
##################################################################4
amat24 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat24[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat24+amatr
####################################################################5
amat35<- t(amatr)
amat25 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat35[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat25[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat25+t(amat25)+amatr
######################################################################6
amat36<- t(amatr)
amat26 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat36[, kk]%%10 == 1)
idy <- which(amat36[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]<100 & idx[ii]!=idy[jj]){
amat26[idy[jj], idx[ii]] <- 100
}}
}
}
}
amatr<-amat26+t(amat26)+amatr
#################################################################7
amat27 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]>99)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat27[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat27+t(amat27)+amatr
################################################################8
amat28 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat28[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat28+t(amat28)+amatr
#################################################################9
amat29 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%100<10 & idx[ii]!=idy[jj]){
amat29[idx[ii], idy[jj]] <- 10
}}
}
}
}
amatr<-amat29+t(amat29)+amatr
##################################################################10
amat20 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat20[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat20+t(amat20)+amatr
}
for(i in 1:ncol(amatr)) {
for(j in 1:ncol(amatr)) {
if(amatr[i,j]%%100>9){
amatr[i,j]<-10
for(k in 1:ncol(amatr)){
if(amatr[k,j]==100){
amatr[j,k]<-1
amatr[k,j]<-0
}}
}
}}
SS<-S
SSt<-c()
while(identical(SS,SSt)==FALSE)
{
SSt<-SS
for(j in SS){
for(i in rownames(amat)) {
if(amatr[i,j]%%10 == 1){
SS<-c(i,SS[SS!=i])}
}
}
}
for(i in SS){
for(j in SS) {
if(amatr[i,j]%%10==1){
amatr[i,j]<-10
amatr[j,i]<-10}}}
Mn<-c()
Cn<-c()
for(i in M){
Mn<-c(Mn,which(rownames(amat)==i))}
for(i in C){
Cn<-c(Cn,which(rownames(amat)==i))}
if(length(Mn)>0&length(Cn)>0){
fr<-amatr[-c(Mn,Cn),-c(Mn,Cn)]}
if(length(Mn)>0&length(Cn)==0){
fr<-amatr[-c(Mn),-c(Mn)]}
if(length(Mn)==0&length(Cn)>0){
fr<-amatr[-c(Cn),-c(Cn)]}
if(length(Mn)==0&length(Cn)==0){
fr<-amatr}
if(plot==TRUE){
plotfun(fr,...)}
if(showmat==FALSE){
invisible(fr)}
else{return(fr)}
}
##############################################################################
##############################################################################
#' Ancestral graph
#'
#' \code{AG} generates and plots ancestral graphs after marginalization and
#' conditioning.
#'
#'
#' @param amat An adjacency matrix, or a graph that can be of class
#' \code{graphNEL-class} or an \code{\link{igraph}} object, or a vector of
#' length \eqn{3e}, where \eqn{e} is the number of edges of the graph, that is
#' a sequence of triples (type, node1label, node2label). The type of edge can
#' be \code{"a"} (arrows from node1 to node2), \code{"b"} (arcs), and
#' \code{"l"} (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalized over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that is the adjacency matrix of the generated graph. It
#' consists of 4 different integers as an \eqn{ij}-element: 0 for a missing
#' edge between \eqn{i} and \eqn{j}, 1 for an arrow from \eqn{i} to \eqn{j}, 10
#' for a full line between \eqn{i} and \eqn{j}, and 100 for a bi-directed arrow
#' between \eqn{i} and \eqn{j}. These numbers are added to be associated with
#' multiple edges of different types. The matrix is symmetric w.r.t full lines
#' and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MAG}}, \code{\link{RG}}, \code{\link{SG}}
#' @references Richardson, T.S. and Spirtes, P. (2002). Ancestral graph Markov
#' models. \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. (2011). Stable classes of graphs containing directed acyclic
#' graphs. \emph{Submitted}.
#' @keywords graphs ancestral graph directed acyclic graph marginalization and
#' conditioning
#' @examples
#'
#' ##The adjacency matrix of a DAG
#' ex<-matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0),16,16,byrow=TRUE)
#' M <- c(3,5,6,15,16)
#' C <- c(4,7)
#' AG(ex, M, C, plot = TRUE)
#'
`AG`<-function (amat,M=c(),C=c(),showmat=TRUE,plot=FALSE, plotfun = plotGraph, ...)
{
if(class(amat)[1] == "igraph" || class(amat)[1] == "graphNEL" || class(amat)[1] == "character") {
amat<-grMAT(amat)}
if(is(amat,"matrix")){
if(nrow(amat)==ncol(amat)){
if(length(rownames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
}
else {
stop("'object' is not in a valid adjacency matrix form")}}
if(!is(amat,"matrix")) {
stop("'object' is not in a valid form")}
S<-C
St<-c()
while(identical(S,St)==FALSE)
{
St<-S
for(j in S){
for(i in rownames(amat)){
if(amat[i,j]%%10 == 1){
S<-c(i,S[S!=i])}
}
}
}
amatr<-amat
amatt<-2*amat
while(identical(amatr,amatt)==FALSE)
{
amatt<-amatr
############################################################1
amat21 <- amatr
for (kk in M) {
for (kkk in 1:ncol(amatr)) {
amat31<-amatr
if (amatr[kkk,kk]%%10==1|amatr[kk,kkk]%%10==1) {
amat31[kk,kkk]<-amatr[kkk,kk]
amat31[kkk,kk]<-amatr[kk,kkk]}
idx <- which(amat31[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1&amat31[kkk,kk]%%10==1)) {
for (ii in 1:(lenidx )) {
#for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], kkk]%%10==0&idx[ii]!=kkk){
amat21[idx[ii], kkk] <- TRUE
}}
}
}
}
amatr<-amat21
################################################################2
amat22 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat22[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat22+amatr
#################################################################3
amat33<- t(amatr)
amat23 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat33[, kk]%%10 == 1)
idy <- which(amat33[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]%%10==0 & idx[ii]!=idy[jj]){
amat23[idy[jj], idx[ii]] <- 1
}}
}
}
}
amatr<-amat23+amatr
##################################################################4
amat24 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
idy <- which(amatr[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]){
amat24[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatr<-amat24+amatr
####################################################################5
amat35<- t(amatr)
amat25 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat35[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat25[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat25+t(amat25)+amatr
######################################################################6
amat36<- t(amatr)
amat26 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amat36[, kk]%%10 == 1)
idy <- which(amat36[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idy[jj], idx[ii]]<100 & idx[ii]!=idy[jj]){
amat26[idy[jj], idx[ii]] <- 100
}}
}
}
}
amatr<-amat26+t(amat26)+amatr
#################################################################7
amat27 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]>99)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100){
amat27[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatr<-amat27+t(amat27)+amatr
################################################################8
amat28 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in S) {
idx <- which(amatr[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat28[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat28+t(amat28)+amatr
#################################################################9
amat29 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%10 == 1)
idy <- which(amatr[, kk]%%100> 9)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%100<10 & idx[ii]!=idy[jj]){
amat29[idx[ii], idy[jj]] <- 10
}}
}
}
}
amatr<-amat29+t(amat29)+amatr
##################################################################10
amat20 <- matrix(rep(0,length(amat)),dim(amat))
for (kk in M) {
idx <- which(amatr[, kk]%%100>9)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]%%100<10){
amat20[idx[ii], idx[jj]] <- 10
}}
}
}
}
amatr<-amat20+t(amat20)+amatr
}
for(i in 1:ncol(amatr)) {
for(j in 1:ncol(amatr)) {
if(amatr[i,j]%%100>9){
amatr[i,j]<-10
for(k in 1:ncol(amatr)){
if(amatr[k,j]==100){
amatr[j,k]<-1
amatr[k,j]<-0
}}
}
}}
SS<-S
SSt<-c()
while(identical(SS,SSt)==FALSE)
{
SSt<-SS
for(j in SS){
for(i in rownames(amat)) {
if(amatr[i,j]%%10 == 1){
SS<-c(i,SS[SS!=i])}
}
}
}
for(i in SS){
for(j in SS) {
if(amatr[i,j]%%10==1){
amatr[i,j]<-10
amatr[j,i]<-10}}}
amatn <- amatr
for (kk in 1:ncol(amatr)) {
for (kkk in 1:ncol(amatr)) {
amat3n<-amatr
if (amatr[kkk,kk]%%10==1|amatr[kk,kkk]%%10==1) {
amat3n[kk,kkk]<-amatr[kkk,kk]
amat3n[kkk,kk]<-amatr[kk,kkk]}
idx <- which(amat3n[, kk]%%10 == 1)
lenidx <- length(idx)
if ((lenidx > 1&amat3n[kkk,kk]%%10==1)) {
for (ii in 1:(lenidx )) {
#for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], kkk]%%10==0&idx[ii]!=kkk){
amatn[idx[ii], kkk] <- TRUE
}}
}
}
}
amatt<-2*amat
while(identical(amatr,amatt)==FALSE)
{
amatt<-amatr
amat27n <- matrix(rep(0,length(amat)),dim(amat))
for (kk in 1:ncol(amatr)) {
idx <- which(amatn[, kk]>99)
lenidx <- length(idx)
if ((lenidx > 1)) {
for (ii in 1:(lenidx - 1)) {
for (jj in (ii + 1):lenidx) {
if(amatr[idx[ii], idx[jj]]<100 && (amatn[kk,idx[ii]]%%10==1 || amatn[kk,idx[jj]]%%10==1)){
amat27n[idx[ii], idx[jj]] <- 100
}}
}
}
}
amatn<-amat27n+t(amat27n)+amatn
amatr<-amat27n+t(amat27n)+amatr
amat22n <- matrix(rep(0,length(amat)),dim(amat))
for (kk in 1:ncol(amatr)) {
idx <- which(amatn[, kk]%%10 == 1)
idy <- which(amatn[, kk]> 99)
lenidx <- length(idx)
lenidy <- length(idy)
if ((lenidx > 0 & lenidy >0)) {
for (ii in 1:(lenidx)) {
for (jj in 1:lenidy) {
if(amatr[idx[ii], idy[jj]]%%10==0 & idx[ii]!=idy[jj]& amatn[kk,idy[jj]]%%10==1){
amat22n[idx[ii], idy[jj]] <- 1
}}
}
}
}
amatn<-amat22n+amatn
amatr<-amat22n+amatr
}
for(i in 1:ncol(amatr)) {
for(j in 1:ncol(amatr)) {
if(amatr[i,j]==101){
amatr[i,j]<-1
amatr[j,i]<-0}}}
Mn<-c()
Cn<-c()
for(i in M){
Mn<-c(Mn,which(rownames(amat)==i))}
for(i in C){
Cn<-c(Cn,which(rownames(amat)==i))}
if(length(Mn)>0&length(Cn)>0){
fr<-amatr[-c(Mn,Cn),-c(Mn,Cn)]}
if(length(Mn)>0&length(Cn)==0){
fr<-amatr[-c(Mn),-c(Mn)]}
if(length(Mn)==0&length(Cn)>0){
fr<-amatr[-c(Cn),-c(Cn)]}
if(length(Mn)==0&length(Cn)==0){
fr<-amatr}
if(plot==TRUE){
plotfun(fr, ...)}
if(showmat==FALSE){
invisible(fr)}
else{return(fr)}
}
##############################################################################
##############################################################################
#' Maximisation for graphs
#'
#' \code{Max} generates a maximal graph that induces the same independence
#' model from a non-maximal graph.
#'
#' \code{Max} looks for non-adjacent pais of nodes that are connected by
#' primitive inducing paths, and connect such pairs by an appropriate edge.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MAG}}, \code{\link{MRG}}, \code{\link{msep}},
#' \code{\link{MSG}}
#' @references Richardson, T.S. and Spirtes, P. (2002). Ancestral graph Markov
#' models. \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. and Lauritzen, S.L. (2011). Markov properties for loopless mixed
#' graphs. \emph{Submitted}. \url{http://arxiv.org/abs/1109.5909}.
#' @keywords graphs loopless mixed graph m-separation maximality
#' @examples
#'
#' H <- matrix(c( 0,100, 1, 0,
#' 100, 0,100, 0,
#' 0,100, 0,100,
#' 0, 1,100, 0), 4, 4)
#' Max(H)
#'
Max<-function(amat)
{
if(class(amat)[1] == "igraph" || class(amat)[1] == "graphNEL" || class(amat)[1] == "character") {
amat<-grMAT(amat)}
if(is(amat,"matrix")){
if(nrow(amat)==ncol(amat)){
if(length(rownames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
}
else {
stop("'object' is not in a valid adjacency matrix form")}}
if(!is(amat,"matrix")) {
stop("'object' is not in a valid form")}
na<-ncol(amat)
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
if(dim(at)[1]>0){
for(i in 1:dim(at)[1]){
S<-at[i,]
St<-c()
while(identical(S,St)==FALSE){
St<-S
for(j in S){
for(k in 1:na){
if(amat[k,j]%%10 == 1){
S<-c(k,S[S!=k])}
}
}}
one<-at[i,1]
two<-at[i,2]
onearrow<-c()
onearc<-c()
twoarrow<-c()
twoarc<-c()
Sr<-S
Sr<-Sr[Sr!=at[i,1]]
Sr<-Sr[Sr!=at[i,2]]
if(length(Sr)>0){
for(j in Sr){
if(amat[one,j]%%10==1){
onearrow<-c(onearrow,j)}
if(amat[one,j]>99){
onearc<-c(onearc,j)}}
for(j in Sr){
if(amat[two,j]%%10==1){
for(k in onearc){
if(j==k || amat[j,k]>99){
amat[at[i,2],at[i,1]]<-1}}
twoarrow<-c(twoarrow,j)}
if(amat[two,j]>99){
for(k in onearrow){
if(j==k || amat[j,k]>99){
amat[at[i,1],at[i,2]]<-1}}
for(k in onearc){
if(j==k || amat[j,k]>99){
amat[at[i,1],at[i,2]]<-100
amat[at[i,2],at[i,1]]<-100}}
twoarc<-c(twoarc,j)}}}
if(length(c(onearc,onearrow,twoarc,twoarrow))>0){
for(j in c(onearc,onearrow,twoarc,twoarrow)){
Sr<-Sr[Sr!=j]}
onearcn<-c()
twoarcn<-c()
onearrown<-c()
twoarrown<-c()
while(length(Sr)>0){
for(l in onearc){
for(j in Sr){
if(amat[l,j]>99){
onearcn<-c(onearcn,j)}}}
for(l in onearrow){
for(j in Sr){
if(amat[l,j]>99){
onearrown<-c(onearrown,j)}}}
for(l in twoarc){
for(j in Sr){
if(amat[l,j]>99){
for(k in onearrow){
if(j==k || amat[j,k]>99){
amat[at[i,1],at[i,2]]<-1}}
for(k in onearc){
if(j==k || amat[j,k]>99){
amat[at[i,1],at[i,2]]<-100
amat[at[i,2],at[i,1]]<-100}}
twoarcn<-c(twoarcn,j)}}}
for(l in twoarrow){
for(j in Sr){
if(amat[l,j]>99){
for(k in onearc){
if(j==k || amat[j,k]>99){
amat[at[i,1],at[i,2]]<-100
amat[at[i,2],at[i,1]]<-100}}
twoarrown<-c(twoarrown,j)}}}
if(length(c(onearcn,onearrown,twoarcn,twoarrown))==0){
break}
for(j in c(onearcn,onearrown,twoarcn,twoarrown)){
Sr<-Sr[Sr!=j]}
onearc<-onearcn
twoarc<-twoarcn
onearrow<-onearrown
twoarrow<-twoarrown
onearcn<-c()
twoarcn<-c()
onearrown<-c()
twoarrown<-c()}}}}
return(amat)
}
#####################################################################################################
######################################################################################################
#' The m-separation criterion
#'
#' \code{msep} determines whether two set of nodes are m-separated by a third
#' set of nodes.
#'
#'
#' @param a An adjacency matrix, or a graph that can be a \code{graphNEL} or an
#' \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e} is
#' the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @param alpha A subset of the node set of \code{a}
#' @param beta Another disjoint subset of the node set of \code{a}
#' @param C A third disjoint subset of the node set of \code{a}
#' @return A logical value. \code{TRUE} if \code{alpha} and \code{beta} are
#' m-separated given \code{C}. \code{FALSE} otherwise.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{dSep}}, \code{\link{MarkEqMag}}
#' @references Richardson, T.S. and Spirtes, P. (2002) Ancestral graph Markov
#' models. \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. and Lauritzen, S.L. (2011). Markov properties for loopless mixed
#' graphs. \emph{Submitted}, 2011. URL \url{http://arxiv.org/abs/1109.5909}.
#' @keywords graphs d-separation m-separation mixed graph
#' @examples
#'
#' H <-matrix(c(0,0,0,0,
#' 1,0,0,1,
#' 0,1,0,0,
#' 0,0,0,0),4,4)
#' msep(H,1,4, 2)
#' msep(H,1,4, c())
#'
msep<-function(a,alpha,beta,C=c()){
if(class(a)[1] == "igraph" || class(a)[1] == "graphNEL" || class(a)[1] == "character") {
a<-grMAT(a)}
if(is(a,"matrix")){
if(nrow(a)==ncol(a)){
if(length(rownames(a))!=ncol(a)){
rownames(a)<-1:ncol(a)
colnames(a)<-1:ncol(a)}
}
else {
stop("'object' is not in a valid adjacency matrix form")}}
if(!is(a,"matrix")) {
stop("'object' is not in a valid form")}
M<-rem(rownames(a),c(alpha,beta,C))
ar<-Max(RG(a,M,C))
#aralpha<-as.matrix(ar[SPl(c(alpha,beta),alpha),SPl(c(alpha,beta),beta)])
#arbeta<-as.matrix(ar[SPl(c(alpha,beta),beta),SPl(c(alpha,beta),alpha)])
#for(i in 1:length(alpha)){
# for(j in 1:length(beta)){
# if(aralpha[j,i]!=0 || arbeta[j,i]!=0){
# return("NOT separated")
# break
# break}}}
if(max(ar[as.character(beta),as.character(alpha)]+ar[as.character(alpha),as.character(beta)]!=0)){
return(FALSE)}
return(TRUE)
}
############################################################################
############################################################################
#' Maximal ribbonless graph
#'
#' \code{MRG} generates and plots maximal ribbonless graphs (a modification of
#' MC graph to use m-separation) after marginalisation and conditioning.
#'
#' This function uses the functions \code{\link{RG}} and \code{\link{Max}}.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalized over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MAG}}, \code{\link{Max}}, \code{\link{MSG}},
#' \code{\link{RG}}
#' @references Koster, J.T.A. (2002). Marginalizing and conditioning in
#' graphical models. \emph{Bernoulli}, 8(6), 817-840.
#'
#' Richardson, T.S. and Spirtes, P. (2002). Ancestral graph Markov models.
#' \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. (2011). Stable classes of graphs containing directed acyclic
#' graphs. \emph{Submitted}.
#'
#' Sadeghi, K. and Lauritzen, S.L. (2011). Markov properties for loopless mixed
#' graphs. \emph{Submitted}. URL \url{http://arxiv.org/abs/1109.5909}.
#' @keywords graphs directed acyclic graph marginalisation and conditioning
#' maximality of graphs MC graph ribbonless graph
#' @examples
#'
#' ex <- matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ##The adjacency matrix of a DAG
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0),16,16, byrow = TRUE)
#' M <- c(3,5,6,15,16)
#' C <- c(4,7)
#' MRG(ex, M, C, plot = TRUE)
#' ###################################################
#' H <- matrix(c( 0, 100, 1, 0,
#' 100, 0, 100, 0,
#' 0, 100, 0, 100,
#' 0, 1, 100, 0), 4,4)
#' Max(H)
#'
`MRG` <- function (amat,M=c(),C=c(),showmat=TRUE,plot=FALSE, plotfun = plotGraph, ...)
{
return(Max(RG(amat,M,C,showmat,plot, plotfun = plotGraph, ...)))
}
##########################################################################
##########################################################################
#' Maximal summary graph
#'
#' \code{MAG} generates and plots maximal summary graphs after marginalization
#' and conditioning.
#'
#' This function uses the functions \code{\link{SG}} and \code{\link{Max}}.
#'
#' @param amat An adjacency matrix of a MAG, or a graph that can be a
#' \code{graphNEL} or an \code{\link{igraph}} object or a vector of length
#' \eqn{3e}, where \eqn{e} is the number of edges of the graph, that is a
#' sequence of triples (type, node1label, node2label). The type of edge can be
#' \code{"a"} (arrows from node1 to node2), \code{"b"} (arcs), and \code{"l"}
#' (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalized over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MAG}}, \code{\link{Max}}, \code{\link{MRG}},
#' \code{\link{SG}}
#' @references Richardson, T.S. and Spirtes, P. (2002). Ancestral graph Markov
#' models. \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. (2011). Stable classes of graphs containing directed acyclic
#' graphs. \emph{Submitted}.
#'
#' Sadeghi, K. and Lauritzen, S.L. (2011). Markov properties for loopless mixed
#' graphs. \emph{Submitted}. URL \url{http://arxiv.org/abs/1109.5909}.
#'
#' Wermuth, N. (2011). Probability distributions with summary graph structure.
#' \emph{Bernoulli}, 17(3), 845-879.
#' @keywords graphs directed acyclic graph marginalisation and conditioning
#' maximality of graphs summary graph
#' @examples
#'
#' ex<-matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ##The adjacency matrix of a DAG
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0), 16, 16, byrow=TRUE)
#' M <- c(3,5,6,15,16)
#' C <- c(4,7)
#' MSG(ex,M,C,plot=TRUE)
#' ###################################################
#' H<-matrix(c(0,100,1,0,100,0,100,0,0,100,0,100,0,1,100,0),4,4)
#' Max(H)
#'
`MSG` <- function (amat,M=c(),C=c(),showmat=TRUE,plot=FALSE, plotfun = plotGraph, ...)
{
return(Max(SG(amat,M,C,showmat,plot, plotfun = plotGraph, ...)))
}
############################################################################
###########################################################################
#' Maximal ancestral graph
#'
#' \code{MAG} generates and plots maximal ancestral graphs after
#' marginalisation and conditioning.
#'
#' This function uses the functions \code{\link{AG}} and \code{\link{Max}}.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @param M A subset of the node set of \code{a} that is going to be
#' marginalized over
#' @param C Another disjoint subset of the node set of \code{a} that is going
#' to be conditioned on.
#' @param showmat A logical value. \code{TRUE} (by default) to print the
#' generated matrix.
#' @param plot A logical value, \code{FALSE} (by default). \code{TRUE} to plot
#' the generated graph.
#' @param plotfun Function to plot the graph when \code{plot == TRUE}. Can be
#' \code{plotGraph} (the default) or \code{drawGraph}.
#' @param \dots Further arguments passed to \code{plotfun}.
#' @return A matrix that consists 4 different integers as an \eqn{ij}-element:
#' 0 for a missing edge between \eqn{i} and \eqn{j}, 1 for an arrow from
#' \eqn{i} to \eqn{j}, 10 for a full line between \eqn{i} and \eqn{j}, and 100
#' for a bi-directed arrow between \eqn{i} and \eqn{j}. These numbers are added
#' to be associated with multiple edges of different types. The matrix is
#' symmetric w.r.t full lines and bi-directed arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{AG}}, \code{\link{Max}}, \code{\link{MRG}},
#' \code{\link{MSG}}
#' @references Richardson, T. S. and Spirtes, P. (2002). Ancestral graph Markov
#' models. \emph{Annals of Statistics}, 30(4), 962-1030.
#'
#' Sadeghi, K. (2011). Stable classes of graphs containing directed acyclic
#' graphs. \emph{Submitted}.
#'
#' Sadeghi, K. and Lauritzen, S.L. (2011). Markov properties for loopless
#' mixed graphs. \emph{Submitted}. URL \url{http://arxiv.org/abs/1109.5909}.
#' @keywords ancestral graph directed acyclic graph marginalization and
#' conditioning maximality of graphs
#' @examples
#'
#' ex<-matrix(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ##The adjacency matrix of a DAG
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
#' 0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0), 16, 16, byrow = TRUE)
#' M <- c(3,5,6,15,16)
#' C <- c(4,7)
#' MAG(ex, M, C, plot=TRUE)
#' ###################################################
#' H <- matrix(c(0,100,1,0,100,0,100,0,0,100,0,100,0,1,100,0),4,4)
#' Max(H)
#'
`MAG`<-function (amat,M=c(),C=c(),showmat=TRUE,plot=FALSE, plotfun = plotGraph, ...)
{
return(Max(AG(amat,M,C,showmat,plot, plotfun = plotGraph, ...)))
}
############################################################################
############################################################################
#Plot<-function(a)
#{
# if(class(a)[1] == "igraph" || class(a)[1] == "graphNEL" || class(a)[1] == "character") {
# a<-grMAT(a)}
# if(is(a,"matrix")){
# if(nrow(a)==ncol(a)){
# if(length(rownames(a))!=ncol(a)){
# rownames(a)<-1:ncol(a)
# colnames(a)<-1:ncol(a)}
# l1<-c()
# l2<-c()
# for (i in 1:nrow(a)){
# for (j in i:nrow(a)){
# if (a[i,j]==1){
# l1<-c(l1,i,j)
# l2<-c(l2,2)}
# if (a[j,i]%%10==1){
# l1<-c(l1,j,i)
# l2<-c(l2,2)}
# if (a[i,j]==10){
# l1<-c(l1,i,j)
# l2<-c(l2,0)}
# if (a[i,j]==11){
# l1<-c(l1,i,j,i,j)
# l2<-c(l2,2,0)}
# if (a[i,j]==100){
# l1<-c(l1,i,j)
# l2<-c(l2,3)}
# if (a[i,j]==101){
# l1<-c(l1,i,j,i,j)
# l2<-c(l2,2,3)}
# if (a[i,j]==110){
# l1<-c(l1,i,j,i,j)
# l2<-c(l2,0,3)}
# if (a[i,j]==111){
# l1<-c(l1,i,j,i,j,i,j)
# l2<-c(l2,2,0,3)}
# }
# }
# }
# else {
# stop("'object' is not in a valid adjacency matrix form")
# }
# if(length(l1)>0){
# l1<-l1-1
# agr<-graph(l1,n=nrow(a),directed=TRUE)}
# if(length(l1)==0){
# agr<-graph.empty(n=nrow(a), directed=TRUE)
# return(plot(agr,vertex.label=rownames(a)))}
# return( tkplot(agr, layout=layout.kamada.kawai, edge.curved=FALSE:TRUE, vertex.label=rownames(a),edge.arrow.mode=l2))
# }
# else {
# stop("'object' is not in a valid format")}
#}
############################################################################
############################################################################
#' Markov equivalence for regression chain graphs.
#'
#' \code{MarkEqMag} determines whether two RCGs (or subclasses of RCGs) are
#' Markov equivalent.
#'
#' The function checks whether the two graphs have the same skeleton and
#' unshielded colliders.
#'
#' @param amat An adjacency matrix of an RCG or a graph that can be a
#' \code{graphNEL} or an \code{\link{igraph}} object or a vector of length
#' \eqn{3e}, where \eqn{e} is the number of edges of the graph, that is a
#' sequence of triples (type, node1label, node2label). The type of edge can be
#' \code{"a"} (arrows from node1 to node2), \code{"b"} (arcs), and \code{"l"}
#' (lines).
#' @param bmat The same as \code{amat}.
#' @return "Markov Equivalent" or "NOT Markov Equivalent".
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MarkEqMag}}, \code{\link{msep}}
#' @references Wermuth, N. and Sadeghi, K. (2011). Sequences of regressions and
#' their independences. \emph{TEST}, To appear.
#' \url{http://arxiv.org/abs/1103.2523}.
#' @keywords graphs bidirected graph directed acyclic graph Markov equivalence
#' regression chain graph undirected graph multivariate
#' @examples
#'
#' H1<-matrix(c(0,100,0,0,0,100,0,100,0,0,0,100,0,0,0,1,0,0,0,100,0,0,1,100,0),5,5)
#' H2<-matrix(c(0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,100,0,0,1,100,0),5,5)
#' H3<-matrix(c(0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0),5,5)
#' #MarkEqRcg(H1,H2)
#' #MarkEqRcg(H1,H3)
#' #MarkEqRcg(H2,H3)
#'
MarkEqRcg<-function(amat,bmat)
{
if(class(amat)[1] == "igraph"){
amat<-grMAT(amat)}
if(class(amat)[1] == "graphNEL"){
amat<-grMAT(amat)}
if(class(amat)[1] == "character"){
amat<-grMAT(amat)}
if( length(rownames(amat))!=ncol(amat) | length(colnames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
if(class(bmat)[1] == "igraph"){
bmat<-grMAT(bmat)}
if(class(bmat)[1] == "graphNEL"){
bmat<-grMAT(bmat)}
if(class(bmat)[1] == "character"){
bmat<-grMAT(bmat)}
if( length(rownames(bmat))!=ncol(bmat) | length(colnames(bmat))!=ncol(bmat)){
rownames(bmat)<-1:ncol(bmat)
colnames(bmat)<-1:ncol(bmat)}
bmat<-bmat[rownames(amat),colnames(amat),drop=FALSE]
na<-ncol(amat)
nb<-ncol(bmat)
if(na != nb){
return(FALSE)}
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
bt<-which(bmat+t(bmat)+diag(na)==0,arr.ind=TRUE)
if(identical(at,bt)==FALSE){
return(FALSE)}
ai<-c()
bi<-c()
if(dim(at)[1]!=0){
for(i in 1:dim(at)[1]){
for(j in 1:na){
if((amat[at[i,1],j]%%10==1 || amat[at[i,1],j]>99) &&(amat[at[i,2],j]%%10==1 || amat[at[i,2],j]>99) ){
ai<-c(ai,j)}
if((bmat[bt[i,1],j]%%10==1 || bmat[bt[i,1],j]>99) &&(bmat[bt[i,2],j]%%10==1 || bmat[bt[i,2],j]>99) ){
bi<-c(bi,j)}}
if(identical(ai,bi)==FALSE){
return(FALSE)}
ai<-c()
bi<-c()}}
return(TRUE)
}
########################################################################################
########################################################################################
#' Markov equivalence of maximal ancestral graphs
#'
#' \code{MarkEqMag} determines whether two MAGs are Markov equivalent.
#'
#' The function checks whether the two graphs have the same skeleton and
#' colliders with order.
#'
#' @param amat An adjacency matrix of a MAG, or a graph that can be a
#' \code{graphNEL} or an \code{\link{igraph}} object or a vector of length
#' \eqn{3e}, where \eqn{e} is the number of edges of the graph, that is a
#' sequence of triples (type, node1label, node2label). The type of edge can be
#' \code{"a"} (arrows from node1 to node2), \code{"b"} (arcs), and \code{"l"}
#' (lines).
#' @param bmat The same as \code{amat}.
#' @return "Markov Equivalent" or "NOT Markov Equivalent".
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MarkEqRcg}}, \code{\link{msep}}
#' @references Ali, R.A., Richardson, T.S. and Spirtes, P. (2009) Markov
#' equivalence for ancestral graphs. \emph{Annals of Statistics},
#' 37(5B),2808-2837.
#' @keywords graphs Markov equivalence maximal ancestral graphs multivariate
#' @examples
#'
#' H1<-matrix( c(0,100, 0, 0,
#' 100, 0,100, 0,
#' 0,100, 0,100,
#' 0, 1,100, 0), 4, 4)
#' H2<-matrix(c(0,0,0,0,1,0,100,0,0,100,0,100,0,1,100,0),4,4)
#' H3<-matrix(c(0,0,0,0,1,0,0,0,0,1,0,100,0,1,100,0),4,4)
#' MarkEqMag(H1,H2)
#' MarkEqMag(H1,H3)
#' MarkEqMag(H2,H3)
#'
`MarkEqMag` <- function(amat,bmat)
{
if(class(amat)[1] == "igraph"){
amat<-grMAT(amat)}
if(class(amat)[1] == "graphNEL"){
amat<-grMAT(amat)}
if(class(amat)[1] == "character"){
amat<-grMAT(amat)}
if( length(rownames(amat))!=ncol(amat) | length(colnames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
if(class(bmat)[1] == "igraph"){
bmat<-grMAT(bmat)}
if(class(bmat)[1] == "graphNEL"){
bmat<-grMAT(bmat)}
if(class(bmat)[1] == "character"){
bmat<-grMAT(bmat)}
if( length(rownames(bmat))!=ncol(bmat) | length(colnames(bmat))!=ncol(bmat)){
rownames(bmat)<-1:ncol(bmat)
colnames(bmat)<-1:ncol(bmat)}
bmat<-bmat[rownames(amat),colnames(amat),drop=FALSE]
na<-ncol(amat)
nb<-ncol(bmat)
if(na != nb){
return(FALSE)}
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
bt<-which(bmat+t(bmat)+diag(na)==0,arr.ind=TRUE)
if(identical(at,bt)==FALSE){
return(FALSE)}
a<-c()
b<-c()
if(length(at)>0){
for(i in 1:dim(at)[1]){
for(j in 1:na){
if((amat[at[i,1],j]%%10==1 || amat[at[i,1],j]>99) &&(amat[at[i,2],j]%%10==1 || amat[at[i,2],j]>99) ){
a<-rbind(a,c(at[i,1],j,at[i,2]))}
if((bmat[bt[i,1],j]%%10==1 || bmat[bt[i,1],j]>99) &&(bmat[bt[i,2],j]%%10==1 || bmat[bt[i,2],j]>99) ){
b<-rbind(b,c(bt[i,1],j,bt[i,2]))}}}
if(identical(a,b)==FALSE){
return(FALSE)}}
ar<-which(amat%%10==1,arr.ind=TRUE)
br<-which(bmat%%10==1,arr.ind=TRUE)
ap<-c()
bp<-c()
if(length(ar)>0){
for(i in 1:dim(ar)[1]){
for(j in 1:na){
if((amat[ar[i,1],j]>99) &&(amat[ar[i,2],j]>99)){
ap<-rbind(ap,c(ar[i,1],j,ar[i,2]))}}}}
if(length(br)>0){
for(i in 1:dim(br)[1]){
for(j in 1:nb){
if((bmat[br[i,1],j]>99) &&(bmat[br[i,2],j]>99)){
bp<-rbind(bp,c(br[i,1],j,br[i,2]))}}}}
if(length(ap)>0){
aptt<-ap
apt<-c(ap,1)
Qonen<-c()
Qtwon<-c()
while(length(apt)-length(aptt)>0){
apt<-aptt
for(i in (1:dim(ap)[1])){
Qone<-ap[i,1]
Qtwo<-ap[i,2]
while(length(Qone)>0){
for(l in (1:length(Qone))){
J<-which(((amat+t(amat)+diag(na))[ap[i,3],]==0) & ((amat[Qone[l],]>99) | (amat[,Qone[l]]%%100==1)))
for(j in J){
for(k in 1:dim(a)[1]){
if(min(a[k,]==c(j,Qone[l],Qtwo[l]))==1){
a<-rbind(a,ap[i,])
aptt<-aptt[(1:dim(aptt)[1])[-i],]
break
break
break
break}}}
Q<-which((amat[,ap[i,3]]%%10==1) & (amat[Qone[l],]>99))
for(q in Q){
for(k in 1:dim(a)[1]){
if(min(a[k,]==c(q,Qone[l],Qtwo[l]))==1){
Qtwon<-c(Qtwon,Qone[l])
Qonen<-c(Qonen,q)}}}}
Qtwo<-Qtwon
Qone<-Qonen
Qonen<-c()
Qtwon<-c()}}}}
if(length(bp)>0){
bptt<-bp
bpt<-c(bp,1)
Qbonen<-c()
Qbtwon<-c()
while(length(bpt)-length(bptt)>0){
bpt<-bptt
for(i in (1:dim(bp)[1])){
Qbone<-bp[i,1]
Qbtwo<-bp[i,2]
while(length(Qbone)>0){
for(l in (1:length(Qbone))){
J<-which(((bmat+t(bmat)+diag(nb))[bp[i,3],]==0) & ((bmat[Qbone[l],]>99) | (bmat[,Qbone[l]]%%100==1)))
for(j in J){
for(k in 1:dim(b)[1]){
if(min(b[k,]==c(j,Qbone[l],Qbtwo[l]))==1){
b<-rbind(b,bp[i,])
bptt<-bptt[(1:dim(bptt)[1])[-i],]
break
break
break
break}}}
Qb<-which((bmat[,bp[i,3]]%%10==1) & (bmat[Qbone[l],]>99))
for(q in Qb){
for(k in 1:dim(b)[1]){
if(min(b[k,]==c(q,Qbone[l],Qbtwo[l]))==1){
Qbtwon<-c(Qbtwon,Qbone[l])
Qbonen<-c(Qbonen,q)}}}}
Qbtwo<-Qbtwon
Qbone<-Qbonen
Qbonen<-c()
Qbtwon<-c()}}}}
if(length(a)!=length(b)){
return(FALSE)}
f<-c()
if((length(a)>0) && (length(b)>0)){
for(i in 1:dim(a)[1]){
for(j in 1:dim(b)[1]){
f<-c(f,min(a[i,]==b[j,]))}
if(max(f)==0){
return(FALSE)}}}
return(TRUE)
}
##########################################################################################
###########################################################################################
#' Representational Markov equivalence to undirected graphs.
#'
#' \code{RepMarUG} determines whether a given maximal ancestral graph can be
#' Markov equivalent to an undirected graph, and if that is the case, it finds
#' an undirected graph that is Markov equivalent to the given graph.
#'
#' \code{RepMarBG} looks for presence of an unshielded collider V-configuration
#' in graph.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @return A list with two components: \code{verify} and \code{amat}.
#' \code{verify} is a logical value, \code{TRUE} if there is a representational
#' Markov equivalence and \code{FALSE} otherwise. \code{amat} is either
#' \code{NA} if \code{verify == FALSE} or the adjacency matrix of the generated
#' graph, if \code{verify == TRUE}. In this case it consists of 4 different
#' integers as an \eqn{ij}-element: 0 for a missing edge between \eqn{i} and
#' \eqn{j}, 1 for an arrow from \eqn{i} to \eqn{j}, 10 for a full line between
#' \eqn{i} and \eqn{j}, and 100 for a bi-directed arrow between \eqn{i} and
#' \eqn{j}. These numbers are added to be associated with multiple edges of
#' different types. The matrix is symmetric w.r.t full lines and bi-directed
#' arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MarkEqMag}}, \code{\link{MarkEqRcg}},
#' \code{\link{RepMarBG}}, \code{\link{RepMarDAG}}
#' @references Sadeghi, K. (2011). Markov equivalences for subclasses of
#' loopless mixed graphs. \emph{Submitted}, 2011.
#' @keywords graphs bidirected graph Markov equivalence maximal ancestral graph
#' representational Markov equivalence
#' @examples
#'
#' H<-matrix(c(0,10,0,0,10,0,0,0,0,1,0,100,0,0,100,0),4,4)
#' RepMarUG(H)
#'
RepMarUG<-function(amat)
{
if(class(amat)[1] == "igraph"){
amat<-grMAT(amat)}
if(class(amat)[1] == "graphNEL"){
amat<-grMAT(amat)}
if(class(amat)[1] == "character"){
amat<-grMAT(amat)}
if( length(rownames(amat))!=ncol(amat) | length(colnames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
na<-ncol(amat)
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
if(dim(at)[1]!=0){
for(i in 1:dim(at)[1]){
for(j in 1:na){
if((amat[at[i,1],j]%%10==1 || amat[at[i,1],j]>99) &&(amat[at[i,2],j]%%10==1 || amat[at[i,2],j]>99) ){
return(list(verify = FALSE, amat = NA))}}}}
for(i in 1:na){
for(j in 1:na){
if(amat[i,j]==100){
amat[i,j]<-10}
if(amat[i,j]==1){
amat[i,j]<-10
amat[j,i]<-10}}}
return(list(verify = TRUE, amat = amat))
}
########################################################################################
#########################################################################################
#' Representational Markov equivalence to bidirected graphs.
#'
#' \code{RepMarBG} determines whether a given maximal ancestral graph can be
#' Markov equivalent to a bidirected graph, and if that is the case, it finds a
#' bidirected graph that is Markov equivalent to the given graph.
#'
#' \code{RepMarBG} looks for presence of an unshielded non-collider
#' V-configuration in graph.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @return A list with two components: \code{verify} and \code{amat}.
#' \code{verify} is a logical value, \code{TRUE} if there is a representational
#' Markov equivalence and \code{FALSE} otherwise. \code{amat} is either
#' \code{NA} if \code{verify == FALSE} or the adjacency matrix of the generated
#' graph, if \code{verify == TRUE}. In this case it consists of 4 different
#' integers as an \eqn{ij}-element: 0 for a missing edge between \eqn{i} and
#' \eqn{j}, 1 for an arrow from \eqn{i} to \eqn{j}, 10 for a full line between
#' \eqn{i} and \eqn{j}, and 100 for a bi-directed arrow between \eqn{i} and
#' \eqn{j}. These numbers are added to be associated with multiple edges of
#' different types. The matrix is symmetric w.r.t full lines and bi-directed
#' arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MarkEqMag}}, \code{\link{MarkEqRcg}},
#' \code{\link{RepMarDAG}}, \code{\link{RepMarUG}}
#' @references Sadeghi, K. (2011). Markov equivalences for subclasses of
#' loopless mixed graphs. \emph{Submitted}, 2011.
#' @keywords graphs bidirected graph Markov equivalence maximal ancestral graph
#' representational Markov equivalence
#' @examples
#'
#' H<-matrix(c(0,10,0,0,10,0,0,0,0,1,0,100,0,0,100,0),4,4)
#' RepMarBG(H)
#'
RepMarBG<-function(amat)
{
if(class(amat)[1] == "igraph"){
amat<-grMAT(amat)}
if(class(amat)[1] == "graphNEL"){
amat<-grMAT(amat)}
if(class(amat)[1] == "character"){
amat<-grMAT(amat)}
if( length(rownames(amat))!=ncol(amat) | length(colnames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
na<-ncol(amat)
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
if(dim(at)[1]!=0){
for(i in 1:dim(at)[1]){
for(j in 1:na){
if(amat[at[i,1],j]%%100>9 || amat[j,at[i,1]]%%10==1 || amat[at[i,2],j]%%100>9 || amat[j,at[i,2]]%%10==1){
return(list(verify= FALSE, amat = NA))}}}}
for(i in 1:na){
for(j in 1:na){
if(amat[i,j]==10){
amat[i,j]<-100}
if(amat[i,j]==1){
amat[i,j]<-100
amat[j,i]<-100}}}
return(list(verify = TRUE, amat = amat))
}
########################################################################################
########################################################################################
#' Representational Markov equivalence to directed acyclic graphs.
#'
#' \code{RepMarDAG} determines whether a given maximal ancestral graph can be
#' Markov equivalent to a directed acyclic graph, and if that is the case, it
#' finds a directed acyclic graph that is Markov equivalent to the given graph.
#'
#' \code{RepMarDAG} first looks whether the subgraph induced by full lines is
#' chordal and whether there is a minimal collider path or cycle of length 4 in
#' graph.
#'
#' @param amat An adjacency matrix, or a graph that can be a \code{graphNEL} or
#' an \code{\link{igraph}} object or a vector of length \eqn{3e}, where \eqn{e}
#' is the number of edges of the graph, that is a sequence of triples (type,
#' node1label, node2label). The type of edge can be \code{"a"} (arrows from
#' node1 to node2), \code{"b"} (arcs), and \code{"l"} (lines).
#' @return A list with two components: \code{verify} and \code{amat}.
#' \code{verify} is a logical value, \code{TRUE} if there is a representational
#' Markov equivalence and \code{FALSE} otherwise. \code{amat} is either
#' \code{NA} if \code{verify == FALSE} or the adjacency matrix of the generated
#' graph, if \code{verify == TRUE}. In this case it consists of 4 different
#' integers as an \eqn{ij}-element: 0 for a missing edge between \eqn{i} and
#' \eqn{j}, 1 for an arrow from \eqn{i} to \eqn{j}, 10 for a full line between
#' \eqn{i} and \eqn{j}, and 100 for a bi-directed arrow between \eqn{i} and
#' \eqn{j}. These numbers are added to be associated with multiple edges of
#' different types. The matrix is symmetric w.r.t full lines and bi-directed
#' arrows.
#' @author Kayvan Sadeghi
#' @seealso \code{\link{MarkEqMag}}, \code{\link{MarkEqRcg}},
#' \code{\link{RepMarBG}}, \code{\link{RepMarUG}}
#' @references Sadeghi, K. (2011). Markov equivalences for subclasses of
#' loopless mixed graphs. \emph{Submitted}, 2011.
#' @keywords graphs bidirected graph Markov equivalence maximal ancestral graph
#' representational Markov equivalence
#' @examples
#'
#' H<-matrix(c(0,10,0,0,10,0,0,0,0,1,0,100,0,0,100,0),4,4)
#' RepMarBG(H)
#'
RepMarDAG<-function(amat)
{
if(class(amat)[1] == "igraph"){
amat<-grMAT(amat)}
if(class(amat)[1] == "graphNEL"){
amat<-grMAT(amat)}
if(class(amat)[1] == "character"){
amat<-grMAT(amat)}
if(length(rownames(amat))!=ncol(amat) | length(colnames(amat))!=ncol(amat)){
rownames(amat)<-1:ncol(amat)
colnames(amat)<-1:ncol(amat)}
na<-ncol(amat)
full<-sort(unique(which(amat%%100>9,arr.ind=TRUE)[,1]))
arc<-sort(unique(which(amat>99,arr.ind=TRUE)[,1]))
arrow<-sort(unique(as.vector(which(amat%%10==1,arr.ind=TRUE))))
S<-full[full!=full[1]]
Ma<-full[1]
while(length(S)>0){
dim<-c()
for(i in S){
dim<-c(dim,length(which(amat[i,Ma]%%100>9,arr.ind=TRUE)))}
s<-S[which(dim==max(dim))[1]]
ns<-which(amat[s,Ma]%%100>9,arr.ind=TRUE)
if(min(amat[ns,ns]+diag(length(ns)))==0){
return(FALSE)}
Ma<-c(Ma,s)
S<-S[S!=s]}
at<-which(amat+t(amat)==0,arr.ind=TRUE)
ai<-c()
if(length(at[,1])!=0){
for(i in (1:length(at[,1]))){
for(j in 1:na){
if((amat[at[i,1],j]%%10==1 || amat[at[i,1],j]>99) &&(amat[at[i,2],j]%%10!=1) && (amat[at[i,2],j]<100) ){
ai<-c(ai,j)}}
if(length(ai)==0){
break}
for(j in ((1:na)[-ai])){
if((max(amat[ai,j]>99)==1) &&(amat[at[i,2],j]%%10==1 || amat[at[i,2],j]>99) && (amat[at[i,1],j]%%10!=1) && (amat[at[i,1],j]<100)){
return(list(verify = FALSE, amat = NA))}}
ai<-c()}}
for(i in Ma){
v<-which(amat[i,]%%100>9)
for(j in v){
amat[i,j]<-1
amat[j,i]<-0}}
at<-which(amat+t(amat)+diag(na)==0,arr.ind=TRUE)
if(length(at[,1])!=0){
for(i in (1:length(at[,1]))){
for(j in 1:na){
if((amat[at[i,1],j]%%10==1 || amat[at[i,1],j]>99) &&(amat[at[i,2],j]%%10==1 || amat[at[i,2],j]>99) ){
amat[at[i,1],j]<-1
amat[at[i,2],j]<-1}}}}
O<-c()
Oc<-arrow
while(length(Oc)>0){
for(i in Oc){
if(max(amat[i,Oc]%%10)==0){
O<-c(O,i)
break}}
Oc<-Oc[Oc!=i]}
for(i in arc){
if(length(which(arrow==i))==0){
O<-c(O,i)}}
aarc<-which(amat>99,arr.ind=TRUE)
if(length(aarc)[1]>0){
for(i in 1:(length(aarc[,1]))){
# if(length(O)==0){
# amat[aarc[i,1],aarc[i,2]]<-0
if(which(O==aarc[i,1])>which(O==aarc[i,2])){
amat[aarc[i,1],aarc[i,2]]<-1}
else{
amat[aarc[i,1],aarc[i,2]]<-0}}}
return(list(verify = TRUE, amat = amat))
}
##################################################################################
|
0c6e377187d488679c425e0426f69b7e36afc740
|
aa5a0d32a413a832e2cf6a68d2003185246ee3ae
|
/man/genIndex.Rd
|
cb85bd1b62aeb7594aa6c91b7b8e180a94ef32f6
|
[] |
no_license
|
lvclark/polysat
|
62fb4ddcad15db6886d1b1d38e04c6892998dd9f
|
ab6f69af3310e102846faa1c8ea4242eae3e87d8
|
refs/heads/master
| 2022-09-19T04:23:37.375464
| 2022-08-23T12:52:08
| 2022-08-23T12:52:08
| 39,743,630
| 10
| 7
| null | 2018-09-10T15:24:09
| 2015-07-26T21:58:14
|
R
|
UTF-8
|
R
| false
| false
| 1,793
|
rd
|
genIndex.Rd
|
\name{genIndex}
\alias{genIndex}
\alias{genIndex,genambig-method}
\alias{genIndex,array-method}
\title{
Find All Unique Genotypes for a Locus
}
\description{
This function will return all unique genotypes for a given locus (ignoring allele
order, but taking copy number into account) and return those genotypes as well
as an index indicating which genotype(s) each individual has. This is a generic function
with methods for \code{"\linkS4class{genambig}"} objects and for arrays. The array
method is primarily intended for internal use with \code{\link{meandistance.matrix2}},
processing the output of \code{\link{genotypeProbs}}.
}
\usage{
genIndex(object, locus)
}
\arguments{
\item{object}{
Typically, a \code{"genambig"} object. A two-dimentional list (array) can also be
used here, where samples are in the first dimension and loci in the second dimension
and each element of the list is output from \code{genotypeProbs}.
}
\item{locus}{
A character string or integer indicating which locus to process.
}
}
\value{
A list with two elements:
\item{uniquegen }{A list, where each element in the list is a vector indicating a
unique genotype that was found.}
\item{genindex }{For \code{"genambig"} objects, an integer vector, with one value per sample.
This is the index of that sample's genotype in \code{uniquegen}. For arrays, a list with
one element per sample. Each element is a vector of indices of that sample's possible
genotypes in \code{uniquegen}, in the same order as in the \code{genotypeProbs} output.}
}
\author{
Lindsay V. Clark
}
\seealso{
\code{\link{meandistance.matrix}} uses the \code{"genambig"} method internally.
\code{\link{.unal1loc}}, \code{\link{assignClones}}
}
\examples{
data(simgen)
genIndex(simgen, 1)
}
\keyword{ methods }
\keyword{ manip }
|
2267d68f37a372658c3afce5793ff1606a1eab8c
|
05af69e8ba746112a1682d183950d24a592ddae7
|
/man/forecast_NWP.Rd
|
839f48c339ed9220d8ce8f86153e509eaa7401f1
|
[
"MIT"
] |
permissive
|
mhdella/solarbenchmarks
|
ccbceea99c33baf8210883821b2c5262090dc15c
|
00c13a4b7729e82e2796282b4a690b7244ff2da5
|
refs/heads/master
| 2022-07-25T03:30:45.608815
| 2020-05-18T18:49:45
| 2020-05-18T18:49:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,113
|
rd
|
forecast_NWP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecast_methods.R
\name{forecast_NWP}
\alias{forecast_NWP}
\title{Do raw numerical weather prediction ensemble forecast}
\usage{
forecast_NWP(nwp, percentiles, sun_up)
}
\arguments{
\item{nwp}{A [day x issue time x lead time x member] matrix of NWP ensemble
forecasts}
\item{percentiles}{A vector of the percentiles corresponding to the desired
forecast quantiles}
\item{sun_up}{A [day x hour] matrix of logicals, indicating whether the sun
is up}
}
\value{
a matrix of quantile forecasts at each valid time in the input data
}
\description{
Generates a probabilistic forecast from a NwP ensemble by applying an
empirical CDF.
}
\details{
Valid at the resolution of the NWP ensemble (e.g., hourly)
}
\seealso{
Other forecast functions: \code{\link{forecast_CH_PeEn}},
\code{\link{forecast_Gaussian_hourly}},
\code{\link{forecast_Gaussian_intrahour}},
\code{\link{forecast_PeEn_hourly}},
\code{\link{forecast_PeEn_intrahour}},
\code{\link{forecast_climatology}},
\code{\link{forecast_mcm}}
}
\concept{forecast functions}
|
be96b02081b5146be96ad4b7e013388dadb107c6
|
1675291d2a606bd1de7fcd677aea9bf112fd0300
|
/R/github.R
|
e626735254dfa51279fc651dd1d2c50c8818de36
|
[] |
no_license
|
rtobar/remotes
|
b78931ef435e7e90efb40d0c339e1efa55d457e6
|
faaae2da7d3371c7be9d0e4aaff8f9fa585d6beb
|
refs/heads/master
| 2021-06-20T07:19:21.535220
| 2017-08-03T03:51:55
| 2017-08-03T03:51:55
| 99,062,373
| 1
| 0
| null | 2017-08-02T02:18:33
| 2017-08-02T02:18:33
| null |
UTF-8
|
R
| false
| false
| 763
|
r
|
github.R
|
github_GET <- function(path, ..., pat = github_pat()) {
url <- paste0("https://api.github.com/", path)
tmp <- tempfile()
download(tmp, url, auth_token = pat)
fromJSONFile(tmp)
}
github_commit <- function(username, repo, ref = "master") {
url <- file.path("https://api.github.com",
"repos", username, repo, "commits", ref)
tmp <- tempfile()
download(tmp, url, auth_token = github_pat())
fromJSONFile(tmp)
}
#' Retrieve Github personal access token.
#'
#' A github personal access token
#' Looks in env var \code{GITHUB_PAT}
#'
#' @keywords internal
#' @noRd
github_pat <- function() {
pat <- Sys.getenv('GITHUB_PAT')
if (identical(pat, "")) return(NULL)
message("Using github PAT from envvar GITHUB_PAT")
pat
}
|
2a62b921fc7f4f5e9c7d55f0790b72bbb4aab764
|
842d1b35c31962b75ad8f3d61481570bc53904e9
|
/knn_zoo.R
|
c889547ede349a142afb64d0dbe8c4261a0bab20
|
[] |
no_license
|
MGPraveen07/K-NN
|
5d76a307b2a8fd2559ef523a59135a4b4600ade0
|
586e91400c2593973774a0576e7a81b7d10d1fe8
|
refs/heads/master
| 2020-12-09T06:22:58.321384
| 2020-01-11T11:31:38
| 2020-01-11T11:31:38
| 233,221,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,206
|
r
|
knn_zoo.R
|
library(readr)
Zoo <- read_csv("C:/Users/Admin/Desktop/Assignments/k_nn/Zoo.csv")
view(Zoo)
str(Zoo)
summary(Zoo)
#Create a function to normalize the data
Zoo$type <- factor(Zoo$type, levels = c("1","2","3","4","5","6","7"), labels = c("Animal 1","Animal 2","Animal 3","Animal 4","Animal 5","Animal 6","Animal 7"))
Zoo$type
#Create a function to normalize the data
norm <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
Zoo_n <- as.data.frame(lapply(Zoo[2:17], norm))
set.seed(1234)
indexes = sample(2, nrow(Zoo), replace=TRUE, prob=c(0.7, 0.3))
indexes
Zoo_train = Zoo[indexes==1, 2:17]
Zoo_test = Zoo[indexes==2, 2:17]
Zoo_train_labels = Zoo[indexes==1, 18]
Zoo_test_labels = Zoo[indexes==2, 18]
Zoo_train_labels<-Zoo_train_labels[["type"]]
Zoo_test_labels<-Zoo_test_labels[["type"]]
library(class)
Zoo_test_pred = knn(train=Zoo_train, test=Zoo_test, cl=Zoo_train_labels, k=14)
table(Zoo_test_labels,Zoo_test_pred)
view(Zoo)
library(gmodels)
CrossTable( x = Zoo_test_labels, y = Zoo_test_pred,prop.chisq = FALSE)
CM = table(Zoo_test_labels, Zoo_test_pred)
accuracy = (sum(diag(CM)))/sum(CM)
accuracy
############# or
library(readr)
Zoo <- read_csv("C:/Users/Admin/Desktop/Assignments/k_nn/Zoo.csv")
view(Zoo)
str(Zoo)
summary(Zoo)
#Create a function to normalize the data
Zoo$type <- factor(Zoo$type, levels = c("1","2","3","4","5","6","7"), labels = c("Animal 1","Animal 2","Animal 3","Animal 4","Animal 5","Animal 6","Animal 7"))
Zoo$type
#Create a function to normalize the data
norm <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
Zoo_n <- as.data.frame(lapply(Zoo[2:17], norm))
Zoo_train = Zoo_n[1:70,]
Zoo_test = Zoo_n[71:101,]
Zoo_train_labels = Zoo[1:70,18]
Zoo_test_labels = Zoo[71:101,18]
Zoo_train_labels<-Zoo_train_labels[["type"]]
Zoo_test_labels<-Zoo_test_labels[["type"]]
library(class)
Zoo_test_pred = knn(train=Zoo_train, test=Zoo_test, cl=Zoo_train_labels, k=8)
table(Zoo_test_labels,Zoo_test_pred)
view(Zoo)
library(gmodels)
CrossTable( x = Zoo_test_labels, y = Zoo_test_pred,prop.chisq = FALSE)
CM = table(Zoo_test_labels, Zoo_test_pred)
accuracy = (sum(diag(CM)))/sum(CM)
accuracy
|
fd508f1289458ac0e15e0233151a7cb56c92c3a9
|
7124b867ea78c31a56cae27bb491ed89ae5baf01
|
/plot3.r
|
57ed44629fc98f4c7ecfe4839351a6f38a931dab
|
[] |
no_license
|
cnik343/exploratoryDataAnalysis-Assgn02
|
a7b7228f4f60a6daaa037bf216d294d2240edba0
|
172c8090196a9a5da9490529ed6a7abf61dbc1a1
|
refs/heads/master
| 2021-01-18T20:10:30.079631
| 2016-06-22T19:29:16
| 2016-06-22T19:29:16
| 61,240,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,892
|
r
|
plot3.r
|
# Exploratory Data Analysis - Assignment 02
# -----------------------------------------------------------------------------
# Assignment
# The overall goal of this assignment is to explore the National Emissions
# Inventory database and see what it say about fine particulate matter pollution
# in the United states over the 10-year period 1999–2008. You may use any R
# package you want to support your analysis. You must address a number of
# questions and tasks in your exploratory analysis. For each question/task you
# will need to make a single plot. Unless specified, you can use any plotting
# system in R to make your plot.
#
# Question 3 : plot3.R -> plot3.png
#
# Of the four types of sources indicated by the type (point, nonpoint, onroad,
# nonroad) variable, which of these four sources have seen decreases in emissions
# from 1999–2008 for Baltimore City? Which have seen increases in emissions from
# 1999–2008? Use the ggplot2 plotting system to make a plot answer this question.
# -----------------------------------------------------------------------------
# Setup the working environment...
library(plyr)
library(ggplot2)
library(reshape2)
# Remove everything from the workspace and set the working directory...
rm(list = ls())
setwd('W://code//R-Stats//Coursera//04 - ExploratoryDataAnalysis Assgn02')
# Define the data directory and files and download the data if necessary...
dataDir <- "./data"
dataZip <- paste(dataDir, "exdata%2Fdata%2FNEI_data.zip", sep="/")
dataNEI <- paste(dataDir, "summarySCC_PM25.rds", sep="/")
dataSCC <- paste(dataDir, "Source_Classification_Code.rds", sep="/")
# Download and Unzip the data if necessary...
if(!file.exists(dataNEI) || !file.exists(dataSCC)){
if(!file.exists(dataZip)){
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,destfile = dataZip)
}
unzip(dataZip, exdir=dataDir)
}
# Load the data - The first line will likely take a few seconds. Be patient!
NEI <- readRDS(dataNEI)
SCC <- readRDS(dataSCC)
# Make Plot3 on screen...
# Use the subset function to restrict data to Baltimore City, then use a
# combination of melt and ddply from the reshape2 and plyr packages to summarise
# total emissions as a function of both year and type. The plot was generated
# using the ggplot package.
sub.NEI <- subset(NEI, fips == "24510")
melt.sub.NEI <- melt(sub.NEI, id.vars=c("type", "year"), measure.vars="Emissions")
sum.data.NEI <- ddply(melt.sub.NEI, .(type, year), summarise, sum=sum(value))
g <- ggplot(sum.data.NEI, aes(x=factor(year), y=sum))
g + geom_bar(stat="identity") +
facet_wrap(~type) +
xlab("Year") +
ylab("Baltimore PM25 Emissions (tons)")
# Copy Plots3 to PNG file...
dev.copy(png, file="plot3.png")
dev.off()
|
756229b151c052bdc1c8995af82ee0db5a87bedf
|
2eadca37495f2fb1a1c9c594bf67bb50ce9bf029
|
/man/harrypotter.Rd
|
d6d9b1c2104c2b9620fd8848412a1f3816a03254
|
[] |
no_license
|
bradleyboehmke/harrypotter
|
34e52612bc262fa6581c4c969d92c6520f85c3bf
|
51f714619350e6685534c8a9c892ff1870e56558
|
refs/heads/master
| 2022-11-01T18:02:51.868320
| 2016-12-30T13:56:50
| 2016-12-30T13:56:50
| 77,556,514
| 71
| 18
| null | 2022-10-26T13:45:15
| 2016-12-28T19:34:28
|
R
|
UTF-8
|
R
| false
| true
| 368
|
rd
|
harrypotter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/books.R
\docType{package}
\name{harrypotter}
\alias{harrypotter}
\alias{harrypotter-package}
\title{J.K. Rowling's Harry Potter Series (Books 1-7)}
\description{
This package contains the complete text of the first seven Harry Potter books,
formatted to be convenient for text analysis.
}
|
0b8dbefd8439046fb82a6ffbea4351814b6f1e02
|
75cf6a9fd035883b64ca2309382e0178cf370b43
|
/Empirical/r/doc-R/Social/ergm/divers/spark.r
|
25d4634b4d9ffe033881c8ff6ccb5de65c5b6837
|
[] |
no_license
|
ygtfrdes/Program
|
171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d
|
1c1e30230f0df50733b160ca73510c41d777edb9
|
refs/heads/master
| 2022-10-08T13:13:17.861152
| 2019-11-06T04:53:27
| 2019-11-06T04:53:27
| 219,560,170
| 1
| 2
| null | 2022-09-30T19:51:17
| 2019-11-04T17:39:52
|
HTML
|
UTF-8
|
R
| false
| false
| 2,983
|
r
|
spark.r
|
library(sparklyr)
library(dplyr)
sc <- spark_connect(master = "local")
# Using dplyr #############################################################################"
iris_tbl <- copy_to(sc, iris)
flights_tbl <- copy_to(sc, nycflights13::flights, "flights")
batting_tbl <- copy_to(sc, Lahman::Batting, "batting")
src_tbls(sc)
# filter by departure delay and print the first few records
flights_tbl %>% filter(dep_delay == 2)
delay <- flights_tbl %>%
group_by(tailnum) %>%
summarise(count = n(), dist = mean(distance), delay = mean(arr_delay)) %>%
filter(count > 20, dist < 2000, !is.na(delay)) %>%
collect
# plot delays
library(ggplot2)
ggplot(delay, aes(dist, delay)) +
geom_point(aes(size = count), alpha = 1/2) +
geom_smooth() +
scale_size_area(max_size = 2)
# Machine Learning #####################################################################################"
# copy mtcars into spark
mtcars_tbl <- copy_to(sc, mtcars)
# transform our data set, and then partition into 'training', 'test'
partitions <- mtcars_tbl %>%
filter(hp >= 100) %>%
mutate(cyl8 = cyl == 8) %>%
sdf_partition(training = 0.5, test = 0.5, seed = 1099)
# fit a linear model to the training dataset
fit <- partitions$training %>%
ml_linear_regression(response = "mpg", features = c("wt", "cyl"))
fit
summary(fit)
# Reading and Writing Data #####################################################################""
temp_csv <- tempfile(fileext = ".csv")
temp_parquet <- tempfile(fileext = ".parquet")
temp_json <- tempfile(fileext = ".json")
spark_write_csv(iris_tbl, temp_csv)
iris_csv_tbl <- spark_read_csv(sc, "iris_csv", temp_csv)
spark_write_parquet(iris_tbl, temp_parquet)
iris_parquet_tbl <- spark_read_parquet(sc, "iris_parquet", temp_parquet)
spark_write_json(iris_tbl, temp_json)
iris_json_tbl <- spark_read_json(sc, "iris_json", temp_json)
src_tbls(sc)
# Distributed R ################################################################################"""
spark_apply(iris_tbl, function(data) {
data[1:4] + rgamma(1,2)
})
spark_apply(
iris_tbl,
function(e) broom::tidy(lm(Petal_Width ~ Petal_Length, e)),
names = c("term", "estimate", "std.error", "statistic", "p.value"),
group_by = "Species"
)
# Extensions #####################################################################################"
# write a CSV
tempfile <- tempfile(fileext = ".csv")
write.csv(nycflights13::flights, tempfile, row.names = FALSE, na = "")
# define an R interface to Spark line counting
count_lines <- function(sc, path) {
spark_context(sc) %>%
invoke("textFile", path, 1L) %>%
invoke("count")
}
# call spark to count the lines of the CSV
count_lines(sc, tempfile)
# Livy ###################################################################################################
# livy_install()
livy_service_start()
sc <- spark_connect(master = "http://localhost:8998", method = "livy")
copy_to(sc, iris)
spark_disconnect(sc)
livy_service_stop()
|
b3d7dca3a2dae570045b22712f6d1f3fe207d6c0
|
0878fefd2602360c2b08f77b36d7fc713c055e62
|
/man/traitextract.Rd
|
09a58a97f92a031aa1d44447f4e755a521f422e6
|
[] |
no_license
|
Koalha/sidtraits
|
8f340051b4c5eda915b7095e4953a25f49308061
|
24594f0c57d5e93806ea5c60753886f849e37e38
|
refs/heads/master
| 2021-01-19T08:48:55.461754
| 2015-02-10T11:08:05
| 2015-02-10T11:08:05
| 19,442,693
| 1
| 0
| null | 2015-02-10T11:08:05
| 2014-05-05T03:25:30
|
R
|
UTF-8
|
R
| false
| false
| 435
|
rd
|
traitextract.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{traitextract}
\alias{traitextract}
\title{Extract seed traits from SID}
\usage{
traitextract(queryresult)
}
\arguments{
\item{queryresult}{A single row result from the function sidspecurl or sidspecurls}
}
\description{
This function checks a sidspecurl result and returns traits, if the queried
species has them listed in the database. Intended for use with multiextract.
}
|
bc210d53d12497966cd57aa5392edd97e01e99b4
|
c8639992711286c4f3313345049a4fceceb1f072
|
/LPmerge_concensus_map.R
|
5bbc42a3c672ab4a250cd30e8ae3402aee3732e8
|
[] |
no_license
|
rabbit-tooth/R
|
aacf954a13ad016009aa38a962f9b36e1587d1c2
|
0a52e3da74efd6ce1b0ccb651311cfc105a0524b
|
refs/heads/master
| 2021-01-19T16:37:19.159223
| 2019-04-26T14:26:27
| 2019-04-26T14:26:27
| 101,012,953
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
LPmerge_concensus_map.R
|
#Author by 2017/9/1 R --vanilla --slave --args chr < LPmerge_concensusmap.R
library("LPmerge")
args <- commandArgs()
chr <- args[5]
getMap <- function(chromosome,population){
inp <- paste0(chromosome,population)
map <- read.delim(inp, header=T, sep=",", comment.char="", fill=T, stringsAsFactors=F)#no return
}
mapI <- getMap(chr,"_ak.csv")
mapII <- getMap(chr,"_xia.csv")
mapIII <- getMap(chr,"_jing.csv")
mapIV <- getMap(chr,"_zhang.csv")
maps <- list(I=mapI,II=mapII,III=mapIII,IV=mapIV)
ans <- LPmerge(maps)
ans <- as.data.frame(ans)
ans <- data.frame(marker=ans[,"marker.1"],position=ans[,"consensus.1"],ak=ans[,"I.1"],xia=ans[,"II.1"],jing=ans[,"III.1"],zhang=ans[,"IV.1"])
write.csv(ans, file=paste0(chr,"_c.csv"), row.names=F, quote=F)
ans <- ans[,c(1,2)]
write.csv(ans, file=paste0(chr,".csv"), row.names=F, quote=F)
|
67902da13360164e82bb62ee2b8b3a7f89e6ff2e
|
1c3cb8bed8c0aa9837e3f1e6bf0cd3a9e23faef1
|
/R/3SelectRunFromFolder.R
|
63de8802cbccdb415ebf949635cd38cc9394c454
|
[] |
no_license
|
Peccary-PMX/PeccaResult
|
2c2b08665bdc7f59c02e2ee64f6ca09cda395efd
|
726deec236ac1a6b9ee5b10056f9c1cb8d3ed0c4
|
refs/heads/main
| 2023-04-15T08:21:11.764143
| 2022-01-29T00:16:51
| 2022-01-29T00:16:51
| 453,237,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
3SelectRunFromFolder.R
|
#' @export
setGeneric("select_run",
function(object, value = 0){standardGeneric("select_run")}
)
#' From a folder give the run
#' Author: Thibaud Derippe
#' Arguments: object, the dossier object
#' Arguments: value, the run you want to create from
#' Output: a run object
#' @export
# object <- createFolder("file:///D:/Peccary/Exemple_demo")(folder = T)
#
# value = 15
# dossier() %>%
# select(-files)
# dossier(15)
# results(test(1:2))
# object <- test(1:2)
# dossier
# object <- createFolder("file:///C:/Users/titi7/lixoft/monolix/monolix2019R2/demos/2.models_for_continuous_outcomes/2.1.residual_error_model/warfarinPK_project")
# object <- object(folder = T); value = 1
setMethod(f = "select_run",
signature = "folder",
definition = function(object, value = 0){
# Search if the run have already been created
test <- try(object@RunsStorage[[value]], silent = T)
if( class(test) == "run"){
return( test )
}else{
# If value equal 0, we launch the most recent one
if(value == 0){
temp <- object@summary %>%
arrange(desc(Date)) %>%
slice(1)
file <- paste0(object@racine,"/",temp$files)
}else{
print("la")
#else the name
file <- paste0(object@racine,"/", subset(object@summary, number == value)$files)
object@summary %>%
filter(number == value) %>% pull(software) -> software
if(software == "NLMIXR") file <- paste0(file, ".nlmixr")
if(software == "NONMEM") file <- paste0(file, ".res")
if(software == "ADAPT") file <- paste0(file, ".run")
}
run_output <- createRun(file)
return(run_output)
}
})
# select_run(object, 1)
|
65466a4a3b7f1fb12952c55b66deb538c91a17f3
|
0812d024c1cbe7a0429ad4de5767a6046db8acad
|
/ALY6015_FinalProject_GroupEpsilon.R
|
83f8bed990ee50bf7d2a2f33e27e04a26d466eca
|
[] |
no_license
|
binarynex/NEUProjects
|
2ba9f7281783fb662d8cab48ac611db2b4c6f6a8
|
49056a504ff5ffd25a09393868ea55d6bdbdf774
|
refs/heads/main
| 2023-05-26T21:15:33.779712
| 2021-06-04T19:05:37
| 2021-06-04T19:05:37
| 347,789,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,834
|
r
|
ALY6015_FinalProject_GroupEpsilon.R
|
###-------------------------------------------------------------------
### Group EPSILON:
### Nicole Castonguay, Ryan Goebel, Tim Przybylowicz, Brad Viles
###
### ALY6015.23546
### Final Project
###-------------------------------------------------------------------
###----------------------------------------------------------------------------
### Dependency Installation/Loading & Saving Existing Graphical Parameters
###----------------------------------------------------------------------------
# Install and load required packages
install.packages("corrplot") #used for creating correlograms
install.packages("glmnet") #glmnet used to implement LASSO
install.packages("dplyr") # for dataset wrangling
install.packages("gginference") # for hypothesis test plots
install.packages("randomForest") # used for random forest analysis
install.packages("vip") # used for variable importance plots
install.packages("ggplot2") # used for visuals
library("corrplot")
library("glmnet")
library("gginference")
library("dplyr")
library("randomForest")
library("vip")
library("ggplot2")
# Save old graphics parameters prior to start
opar <- par(no.readonly = TRUE) #Define Original Parameters
###----------------------------------------------------------------------------
### Data Input & Clean-up
### ---------------------------------------------------------------------------
# Bikedataset.csv located at: https://archive.ics.uci.edu/ml/machine-learning-databases/00560/
# Open "SeoulBikeData.csv" dataset
path <- file.choose() # Set file path on local computer
bikes <- read.csv(path, stringsAsFactors = FALSE, # import the data
header=TRUE, check.names = TRUE # check and adjust special characters in header
)
# Examine initial dataset structure
str(bikes)
# Simplify column/variable names
colnames(bikes) <- c("calendar", "count", "hour", "temp", "humidity", "wind",
"visibility", "dewpoint", "solarrad", "rain", "snow",
"season", "holiday", "functioning")
# Change date format
bikes$calendar <- as.Date(bikes$calendar, format="%d/%m/%Y")
# Introduce variable that identifies days of the week
bikes$day <- as.factor(weekdays(bikes$calendar, abbreviate=TRUE))
# Create order of day and season
bikes$day <- ordered(bikes$day, levels=c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"))
bikes$season <- ordered(bikes$season, levels=c("Spring", "Summer", "Autumn", "Winter"))
# Convert day and season to factors
class(bikes$day) <- "factor"
class(bikes$season) <- "factor"
# Change holiday, functioning, and hour to Factors
bikes$holiday <- as.factor(bikes$holiday)
bikes$functioning <- as.factor(bikes$functioning)
bikes$hour <- as.factor(bikes$hour)
# Unit conversions
bikes$temp <- (bikes$temp * 9/5) + 32 # temperature degrees C to F
bikes$wind <- bikes$wind * 2.23693629 # wind speed m/s to mph
bikes$visibility <- (bikes$visibility * 10) / 1609.344 # visibility from 10m to miles
bikes$dewpoint <- (bikes$dewpoint * 9/5) + 32 # dew point temperature from degrees C to F
bikes$solarrad <- bikes$solarrad * 0.277778 # solar radiation from MJ/m^2 to kWh/m^2
bikes$rain <- bikes$rain / 25.4 # rainfall millimeters to inches
bikes$snow <- bikes$snow / 2.54 # snow centimeters to inches
# Create separate dataframe for functioning variable (plot purposes only, see Data Viz section)
#nofunct <- bikes[c("calendar", "hour", "count", "functioning")] ## Not used in current version of paper
# Remove observations where functioning == No (no count data available for these hours)
bikes <- bikes[bikes$functioning=="Yes",]
# Remove calendar and functioning columns (irrelevant info)
bikes <- subset(bikes, select = -c(calendar, functioning))
# Reorder variables
bikes <- bikes[c("count", "season", "day", "hour", "holiday",
"temp", "humidity", "wind", "visibility", "dewpoint",
"solarrad", "rain", "snow")]
# Examine revised/cleaned dataset structure
#str(bikes)
# Examine data summary
summary(bikes)
###----------------------------------------------------------------------------
### Preliminary Data Visualization
### ---------------------------------------------------------------------------
# Scatterplots of numerical variables
par(mfrow=c(2,4)) # make 2x4 grid of plots
for (n in c(6:13)){
plot(bikes[ ,n], bikes$count,
xlab=colnames(bikes)[n],
ylab="",
col="cadetblue2",
pch=1,
cex.lab=2, cex.axis=2
)
abline(lm(bikes$count~bikes[,n]), col="lightsalmon3", lwd=2) # regression best fit line
}
par(oma = c(0, 0, 1.5, 0)) # define outer margin area
par(mfrow=c(1,1)) # return grid to 1 x 1
mtext("Bike Counts per Hour vs. Each Numerical Variable", outer=TRUE, cex=1.5) # title for grid of plots
par(opar) # restore graphical parameters
# Histogram of counts
hist(bikes$count, breaks = seq(0,3600,100), col="cadetblue2", border=FALSE,
ylim=c(0,1200), xlab="Bikes per Hour",
main="Histogram of Bike Rentals per Hour")
# Boxplots of Categorical Variables:
par(mfrow=c(2,2)) # make 2x2 grid of plots
## Counts vs Season
boxplot(bikes$count~bikes$season,
xlab="",
ylab="Bike Count", ylim=c(0,5000),
col=c("cadetblue2","cadetblue2","cadetblue2","lightsalmon3"),
main="Season"
)
abline(median(bikes$count),0, col="gray", lty="dotted", lwd=3) # line at median bike count
## Counts vs Day
boxplot(bikes$count~bikes$day,
xlab="",
ylab="Bike Count", ylim=c(0,5000),
col=c("cadetblue2","cadetblue2","cadetblue2","cadetblue2","cadetblue2",
"lightsalmon3", "lightsalmon3"),
main="Day of the Week"
)
abline(median(bikes$count),0, col="gray", lty="dotted", lwd=3) # line at median bike count
legend("topright", legend=c("Weekday", "Weekend"),
fill =c("cadetblue2", "lightsalmon3"), box.lty=0 )
## Counts vs Hour
boxplot(bikes$count~bikes$hour,
xlab="",
ylab="Bike Count", ylim=c(0,5000),
col=c("cadetblue2","cadetblue2","cadetblue2","cadetblue2","cadetblue2",
"cadetblue2","cadetblue2","lightsalmon3","lightsalmon3","lightsalmon3",
"cadetblue2","cadetblue2","cadetblue2","cadetblue2","cadetblue2",
"cadetblue2","cadetblue2","lightsalmon3","lightsalmon3","lightsalmon3",
"cadetblue2","cadetblue2","cadetblue2","cadetblue2","cadetblue2"),
main="Time of Day (0 = Midnight)"
)
abline(median(bikes$count),0, col="gray", lty="dotted", lwd=3) # line at median bike count
legend("topleft", legend=c("Non-Commuting Hours", "Commuting Hours"),
fill =c("cadetblue2", "lightsalmon3"), box.lty=0)
## Counts vs Holiday
boxplot(bikes$count~bikes$holiday,
xlab="",
ylab="Bike Count", ylim=c(0,5000),
col=c("cadetblue2", "lightsalmon3"),
main="Holiday or Not"
)
abline(median(bikes$count),0, col="gray", lty="dotted", lwd=3) # line at median bike count
#Make legend to define Median line on the categorical variable box plots
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0,0, type="n", bty="n", xaxt="n", yaxt="n", xlab="", ylab="") # blank plot to overlay
legend("center", legend="Median", col="gray", lty="dotted", lwd=3,
xpd=TRUE, title="Legend")
par(opar) # restore graphical parameters
# Plot non-functioning hours ### not used in current version of paper
#nofunct <- transform(nofunct, count = ifelse(functioning=="Yes", 0, 1))
#nofunct <- nofunct %>% group_by(calendar) %>% summarise(total = sum(count))
#plot(nofunct[nofunct$total > 0,], ylim=c(0,30), pch=19, col="lightsalmon3",
# main="Daily Non-Functioning Hours", ylab="", yaxs="i",
# xlim=c(min(nofunct$calendar), max(nofunct$calendar)),
# xlab="Calendar: Dec 1, 2017 to Nov 30, 2018"
#)
#segments(x0=nofunct$calendar, y0=nofunct$total, y1=0, col="gray80")
###-------------------------------------------------------------------
### Hypothesis Testing
###-------------------------------------------------------------------
#Null hypothesis: Mean # of riders on weekdays = Mean # of riders on weekends
#Alternative hypothesis: Mean # of riders on weekdays ≠ Mean # of riders on weekends
hypoData <- bikes #copying the data for the hypothesis test
hypoData$weekend = ifelse(hypoData$day %in% c("Sat", "Sun"), "weekend", "weekday") #creating new field to check if weekend
#taking a sample of 500 weekdays and 500 weekends
set.seed(17)
hypoSample <- hypoData %>% group_by(weekend) %>% sample_n(500)
#testing for equal variances
ftest <- var.test(count ~ weekend, data = hypoData)
ftest # p-value < 0.05, reject null hyp. accept alt hyp. vars not equal.
#testing for normality
shapiro.test(hypoSample$count)
#splitting into 2 groups
WD = hypoSample$count[hypoSample$weekend == 'weekday']
WE = hypoSample$count[hypoSample$weekend == 'weekend']
#performing t test
myttest = t.test(WD, WE,
alternative = "two.sided",
paired = FALSE,
mu = 0, #assuming there is no difference in means
var.equal = FALSE, #unequal variances
conf.level = 0.95) #using 95% confidence
#plotting our results with gginference package
ggttest(myttest, colaccept="cadetblue2", colreject="seagreen2", colstat="navyblue")
# display t test results
myttest
###-------------------------------------------------------------------
### Regression Analysis
###-------------------------------------------------------------------
# Copy bikes to variable for regression analysis
regData <- bikes[-1] # remove counts response variable
#Choose random indexes for assigning data to Training and Testing
set.seed(123) # set.seed for repeatable results
nRecords <- dim(regData)[1] # count the number of records in bikes
testIndexes <- sample(nRecords, round(nRecords/5)) #randomly assign the indexes for testing
bikesTrain <- regData[-testIndexes,] #training dataframe, bikes dataset minus testIndexes
bikesTest <- regData[testIndexes,] #test dataframe, bikes dataset using testIndexes
yR <- bikes[,"count"] # define the dependent variable for regression analysis
xR <- model.matrix(yR~.,data=regData)[,-1] # define the dependent variables in matrix form
#Note: The default model.matrix() creates an (Intercept) col, the [,-1] removes this col
#For more information on model.matrix():
#https://stat.ethz.ch/R-manual/R-patched/library/stats/html/model.matrix.html
#Apply the random indexes to the dependent and independent variables
xR.train <- xR[-testIndexes,] # independent var training matrix, xR matrix minus testIndexes
xR.test <- xR[testIndexes,] # independent var test matrix, xR matrix using testIndexes
yR.train <- yR[-testIndexes] # dependent var training matrix, yR matrix minus testIndexes
yR.test <- yR[testIndexes] # dependent var test matrix, yR matrix using testIndexes
#Perform OLS and computer MSE to serve as Baseline:
mod.OLS <- lm(yR.train ~. ,data=bikesTrain) # linear model definition
pred.OLS <- predict(mod.OLS,bikesTest) # feed test dataframe into the linear model
#This pred object contains 'estimated' bike counts for each set of variables in the bikesTest
#OLS Model Performance Parameters
MSE.OLS <- round(mean((yR.test - pred.OLS)^2),2) # calculates the mean squared errors for OLS
RMSE.OLS <- round(sqrt(MSE.OLS),2) # calculates the root mean squared error for OLS
#mae.ols <- round(mean((abs(yR.test - pred.OLS))),2) # mean absolute error
#RSS.OLS <- round(sum((yR.test - pred.OLS)^2),2) # calculates the sum squared errors
#Note these are all proportionately related to one another, can choose one over the others
# OLS coeeficients
coef.OLS <- round(coef(summary(mod.OLS)),2) # rounded coefficients table for report purposes
summary(mod.OLS) #to see significance levels
#Correlation Matrix for numberical variables
corMatrix <- cor(bikes[,c("temp","humidity","wind","visibility","dewpoint","solarrad","rain","snow")])
corrplot(corMatrix, method="square",order="hclust",addrect = 2, # plot the matrix
title="Correlation Matrix of Independent Variables",mar=c(0,0,1,0))
par(opar) # restore graphical parameters
#Perform RIDGE Regression and compute MSE:
set.seed(123) # set.seed for repeatable results
cv.ridge <- cv.glmnet(xR.train, yR.train, alpha = 0) # cross-validation on training data to get lambdas
mod.ridge <- glmnet(xR.train, yR.train, alpha = 0, lambda = cv.ridge$lambda.1se) # model creation using lambda 1se
pred.ridge <- predict(mod.ridge, s=cv.ridge$lambda.1se, newx=xR.test) # prediction model on the test data
#Create cross-validation plot to visualize values of lambda.min and lambda.1se
plot(cv.ridge)
title("Cross-Validation Plot to Determine Lambda.min and Lambda.1se", line = 3) # add title for context with spacing 3
#RIDGE Model Performance Parameters
MSE.ridge <- round(mean((yR.test - pred.ridge)^2),2) # calculates the mean squared errors for RIDGE
RMSE.ridge <- round(sqrt(MSE.ridge),2) # calculates the root mean squared error for RIDGE
#mae.ridge <- round(mean((yR.test - abs(pred.ridge))),2) # mean absolute error
#RSS.ridge <- round(sum((yR.test - pred.ridge)^2),2) # calculates the sum squared errors
#Note these are all proportionately related to one another, can choose one over the others
coef.ridge <- round(as.matrix(coef(cv.ridge,s=cv.ridge$lambda.1se)),2) # rounded coefficients table
#Perform LASSO Regression and computer MSE:
set.seed(123) # set.seed for repeatable results
plot(glmnet(xR.train, yR.train, alpha = 1),xvar="lambda") #plot lasso prior to defining lambda
title("Lasso Coefficient Trace Plot", line = 3)
cv.lasso <- cv.glmnet(xR.train, yR.train, alpha = 1) # cross-validation on training data to get lambdas
mod.lasso <- glmnet(xR.train, yR.train, alpha = 1, lambda = cv.lasso$lambda.1se) # model creation using lambda 1se
pred.lasso <- predict(mod.lasso, s=cv.lasso$lambda.1se, newx=xR.test) # prediction model on the test data
#LASSO Model Performance Parameters
MSE.lasso <- round(mean((yR.test - pred.lasso)^2),2) # calculates the mean squared errors for LASSO
RMSE.lasso <- round(sqrt(MSE.lasso),2) # calculates the root mean squared error for LASSO
#mae.lasso <- round(mean((yR.test - abs(pred.lasso))),2) # mean absolute error
#RSS.lasso <- round(sum((yR.test - pred.lasso)^2),2) # calculates the sum squared errors
#Note these are all proportionately related to one another, can choose one over the other two
coef.lasso <- round(as.matrix(coef(cv.lasso,s=cv.lasso$lambda.1se)),2) # rounded coefficients table
###-------------------------------------------------------------------
### ELASTIC NET LOOP
###-------------------------------------------------------------------
#As a means of trying to "optimize" which alpha value creates the best model
# this section will loop through lambda.min and 1se for values of alpha
# ranging from 0 to 1, in 0.01 increments:
loop.result <- data.frame() # create a blank dataframe to be used in the loop
for (a in seq(0, 1, .01)){ # loop through a values from 0 to 1 in 0.01 increments
set.seed(123) # set seed for repeatability
cv.loop <- cv.glmnet(xR.train, yR.train, alpha = a) # perform cross-validation to determine lambda values
for (val in c(cv.loop$lambda.1se)){ # for both lambda.min and lambda.1se loop through models
mod.loop <- glmnet(xR.train, yR.train, alpha = a, lambda = val) # create model object using loop values for a and lambda
pred.loop <- predict(mod.loop, s=val, newx=xR.test) # apply model to test data
MSE.loop <- round(mean((yR.test - pred.loop)^2),2) # calculate differences between test and actual values
RMSE.loop <- round(sqrt(MSE.loop),2) # calculate RMSE based on MSE
loop.result <- rbind(loop.result,c(a,round(val,2),RMSE.loop,round(100*(RMSE.OLS-RMSE.loop)/RMSE.OLS,3))) #create a result table
}
}
colnames(loop.result)<-c("alpha","lambda","RMSE","%Improvement") # assign headers to the result table
loop.result <- loop.result[order(loop.result$RMSE),] # sort the table by lowest RSME
head(loop.result,5) # return the five model parameters coinciding with the lowest RMSE
# recreate a model based on parameters coinciding with the lowest RSME
mod.enet <- glmnet(xR.train, yR.train,
alpha = loop.result[1,1],
lambda = loop.result[1,2])
# Predictions on test data from final elastic net model
pred.enet <- predict(mod.enet, s=val, newx=xR.test)
#to alleviate any issues from using rounded values for alpha and lambda, return the lowest RMSE from the table
RMSE.enet <- min(loop.result$RMSE)
#mae.enet <- round(mean((yR.test - abs(pred.enet))),2) # mean absolute error # need to calc mae in loop
###-------------------------------------------------------------------
### Regularization Summary
###-------------------------------------------------------------------
#Results Table:
reg.results = data.frame(
"Model" = c("OLS", "Ridge Regression", "Lasso","Elastic Net"),
"RMSE" = c(RMSE.OLS,RMSE.ridge,RMSE.lasso,RMSE.enet))
reg.results
# Variable importance plots for regression models
vip.lasso <- vip(mod.lasso,num_features=41) + ggtitle("Lasso Model Predictors")
vip.ridge <- vip(mod.ridge,num_features=41) + ggtitle("Ridge Model Predictors")
vip.OLS <- vip(mod.OLS,num_features=41) + ggtitle("OLS Model Predictors")
vip.enet <- vip(mod.enet,num_features=41) + ggtitle("Elastic Net Model Predictors")
grid.arrange(vip.OLS,vip.ridge,vip.lasso, vip.enet, ncol=4)
###-------------------------------------------------------------------
### Random Forest Regression Model
###-------------------------------------------------------------------
# Create new dataframe for random forest analysis
rf.bikes <- bikes
# Divide data into train and test sets
set.seed(123) # set.seed for repeatable results
testrows.rf <- sample(nrow(rf.bikes), round(nrow(rf.bikes)/5)) #randomly assign rows/observations for testing
rf.train <- rf.bikes[-testrows.rf,] # train dataframe, only non-"testrows"
rf.test <- rf.bikes[testrows.rf,] #test dataframe, only testrows
# New objects for storing error values from optimization loop
oob.err <- double(ncol(rf.bikes)-1) # new object for storing Out of Bag errors
###### Loop to find optimal value of mtry - WARNING: this takes a few minutes ###
for (mtry in 1:(ncol(rf.bikes)-1) ){
set.seed(42)
fit.rf <- randomForest(count ~ ., data=rf.train, mtry=mtry, ntree=100) # reduce ntree to faster loop
oob.err[mtry] <- sqrt(fit.rf$mse[100])
}
# What are the RMSE and mtry values associated w/ the best mtry (min RMSE)
best.mtry <- which(oob.err==min(oob.err)) # find which model gives min test error
rmse.best.mtry <- oob.err[best.mtry] # depending on set.seed, seems to be 9 or 10
par(mfrow=c(1,2)) # make 1x2 grid of RF plots (mtry and ntree)
# Plot RMSE from OOB vs mtry value
plot(1:mtry, oob.err,
pch = 23,
cex=ifelse(oob.err==rmse.best.mtry, 2, 1),
bg=ifelse(oob.err==rmse.best.mtry, "red", "white"),
col=ifelse(oob.err==rmse.best.mtry, "red", "blue"),
type = "b",
ylab="OOB Root Mean Squared Error",
xlab="mtry",
main="OOB RMSE vs. Value of mtry",
lwd=2
)
# Rerun Random Forest with best.mtry, larger number of trees, and importance=TRUE # takes a few minutes
set.seed(42)
rf.model <- randomForest(count ~ ., data=rf.train,
mtry=best.mtry, ntree=500, importance=TRUE)
# Revise default plot to use RMSE instead of MSE
plot(1:rf.model$ntree, sqrt(rf.model$mse), type="l", col="blue", lwd=2,
main="OOB RMSE vs. Number of Trees",
xlab="ntree",
ylab="OOB Root Mean Squared Error",
ylim=c(180,280)
)
# Plot shows error decreasing with number of trees. Looks pretty stable by 300
par(mfrow=c(1,1)) # return plotting to normal 1x1 layout
# # Modified plots for presentation ##
# par(mar=c(5,6,4,2))
# plot(1:mtry, oob.err,
# pch = 23,
# cex=ifelse(oob.err==rmse.best.mtry, 3, 1.5),
# bg=ifelse(oob.err==rmse.best.mtry, "red", "white"),
# col=ifelse(oob.err==rmse.best.mtry, "red", "blue"),
# type = "b",
# ylab="OOB Root Mean Squared Error",
# xlab="mtry",
# main="OOB RMSE vs. Value of mtry",
# lwd=2,
# cex.main=2,
# cex.lab=2,
# cex.axis=2,
# )
# #Revised plot for presentation
# plot(1:rf.model$ntree, sqrt(rf.model$mse), type="l", col="blue", lwd=2,
# main="OOB RMSE vs. Number of Trees",
# xlab="ntree",
# ylab="OOB Root Mean Squared Error",
# cex.main=2, cex.axis=2, cex.lab=2
# )
# par(opar) # Restore original graphics parameters
# variable importance plot, shows which vars have larger effect on the model
varImpPlot(rf.model,
main="Random Forest Variable Importance Plots",
pch=19, col="blue")
# # modified variable imp plot for presentation
# varImpPlot(rf.model,
# main="",
# pch=19, col="blue")
# Final Random Forest regression model
print(rf.model)
# Input test data into Random Forest Model
pred.rf <- predict(rf.model, rf.test)
#RF Model Performance Parameters
mse.rf <- round(mean((rf.test$count - pred.rf)^2),2) # calculates the mean squared errors for RF
rmse.rf <- round(sqrt(mse.rf), 2) # calculates the root mean squared error for RF
#mae.rf <- round(mean((abs(yR.test - pred.rf))),2) # mean absolute error
#rss.rf <- round(sum((rf.test$count - pred.rf)^2),2) # calculates the sum squared errors for RF
###-------------------------------------------------------------------
### Compare Regression Models
###-------------------------------------------------------------------
# Comparison of model errors
err.results = data.frame(
"Model" = c("OLS Model", "Ridge Regression Model", "Lasso Model", "Elastic Net Model","Random Forest"),
"RMSE" = c(RMSE.OLS,RMSE.ridge,RMSE.lasso, RMSE.enet, rmse.rf)
)
err.results
# Plot Predicted values vs Actual Values
layout(matrix(c(1,1,2,2,3,3,4,4,0,5,5,0), ncol=4, byrow=TRUE)) # create layout matrix for next 5 plots
# OLS
plot(yR.test, pred.OLS, col="blue",
xlim=c(0,3500), ylim=c(0,3500),
main="OLS Model",
xlab="Actual Test Value of Count",
ylab="Predicted Value from Model"
)
abline(0,1, lty="dotted") # Line indicated Predicted = Actual
abline(lm(pred.OLS~yR.test), col="blue", lwd=3) # best fit line of model
text(3000, 500, paste("RMSE = ", RMSE.OLS), cex=1.5) # RMSE of model
# Ridge
plot(yR.test, pred.ridge, col="blue",
xlim=c(0,3500), ylim=c(0,3500),
main="Ridge Model",
xlab="Actual Test Value of Count",
ylab="Predicted Value from Model"
)
abline(0,1, lty="dotted") # Line indicated Predicted = Actual
abline(lm(pred.ridge~yR.test), col="blue", lwd=3) # best fit line of model
text(3000, 500, paste("RMSE = ", RMSE.ridge), cex=1.5) # RMSE of model
# Lasso
plot(yR.test, pred.lasso, col="blue",
xlim=c(0,3500), ylim=c(0,3500),
main="LASSO Model",
xlab="Actual Test Value of Count",
ylab="Predicted Value from Model"
)
abline(0,1, lty="dotted") # Line indicated Predicted = Actual
abline(lm(pred.lasso~yR.test), col="blue", lwd=3) # best fit line of model
text(3000, 500, paste("RMSE = ", RMSE.lasso), cex=1.5) # RMSE of model
# Elastic Net
plot(yR.test, pred.enet, col="blue",
xlim=c(0,3500), ylim=c(0,3500),
main="Elastic Net Model",
xlab="Actual Test Value of Count",
ylab="Predicted Value from Model"
)
abline(0,1, lty="dotted") # Line indicated Predicted = Actual
abline(lm(pred.enet~yR.test), col="blue", lwd=3) # best fit line of model
text(3000, 500, paste("RMSE = ", RMSE.enet), cex=1.5) # RMSE of model
# Random Forest
plot(rf.test$count, pred.rf,
col="blue",
xlim=c(0,3500), ylim=c(0,3500),
main="Random Forest Model",
xlab="Actual Test Value of Count",
ylab="Predicted Value from Model"
)
abline(0,1, lty="dotted") # Line indicated Predicted = Actual
abline(lm(pred.rf~rf.test$count), col="blue", lwd=3) # best fit line of model
text(3000, 500, paste("RMSE = ", rmse.rf), cex=1.5) # RMSE of model
layout(1) # return plot parameter to just one at a time
###-------------------------------------------------------------------
### Random Forest Classification Model
###-------------------------------------------------------------------
# Create new dataframe for random forest classification
bclass <- bikes
# Calculate cutoffs for 15th percentile and 85th percentile
div <- quantile(bclass$count, c(0.15,0.85))
# div # 15%: 137; 85%: 1424.4
# Create new column for "demand" split at 15th/85th percentile and named low, mid, and high
bclass$demand <- cut(bclass$count, breaks=c(0,div[1],div[2], max(bclass$count)),
labels=c("low", "mid", "high"), include.lowest = TRUE)
# Remove count variable, since its directly correlated to demand level
bclass <- subset(bclass, select= -count)
# Split data into train and test sets
set.seed(123) # set seed for reproducibility
testrows.bclass <- sample(nrow(bclass), round(nrow(bclass)/5)) #randomly assign rows/observations for testing
bclass.train <- bclass[-testrows.bclass,] # train dataframe, only non-"testrows"
bclass.test <- bclass[testrows.bclass,] #test dataframe, only testrows
# New objects for storing error values from optimization loop
ooberr.rfcl <- double(ncol(bclass)-1) # new object for storing estimated Out of Bag errors
# Loop for optimizing mtry
for (mtrycl in 1:(ncol(bclass)-1) ){
set.seed(123)
fit <- randomForest(demand ~ ., data=bclass.train, mtry=mtrycl, ntree=100) # reduce ntree to run faster
ooberr.rfcl[mtrycl] <- fit$err.rate[fit$ntree]
}
# What is the oob error and mtry values associated w/ the best mtry (min err)
bestmtry.rfcl <- which(ooberr.rfcl==min(ooberr.rfcl)) # find which model gives min test error
errbestmtry.rfcl <- ooberr.rfcl[bestmtry.rfcl]
par(mfrow=c(1,2)) # make 1x2 grid of RF plots (mtry and ntree)
# Plot estimate of error rate from OOB vs mtry value
plot(1:mtrycl, ooberr.rfcl,
pch = 23,
cex=ifelse(ooberr.rfcl==errbestmtry.rfcl, 2, 1),
bg=ifelse(ooberr.rfcl==errbestmtry.rfcl, "red", "white"),
col=ifelse(ooberr.rfcl==errbestmtry.rfcl, "red", "blue"),
type = "b",
ylab="OOB Estimate of Error Rate",
xlab="Value of mtry",
main="OOB Estimate of Error Rate vs. Value of mtry",
lwd=2
)
# Run final classification random forest model
set.seed(123)
rfclass <- randomForest(demand~., data=bclass.train, mtry=bestmtry.rfcl, ntree=500, type=classification, importance=TRUE)
# Plot model
plot((1:rfclass$ntree), rfclass$err.rate[,1], type="l", lwd=2, col="black",
main="Estimate of Error vs. Number of Trees in Model",
xlab="ntree",
ylab="Estimate of Error",
ylim=c(0.05,0.35)
)
lines((1:rfclass$ntree), rfclass$err.rate[,2], lwd=2, col="red", lty="dashed")
lines((1:rfclass$ntree), rfclass$err.rate[,3], lwd=2, col="green", lty="dotted")
lines((1:rfclass$ntree), rfclass$err.rate[,4], lwd=2, col="blue", lty="dotdash")
# error looks like it's stabilized at a low level
# add legend
legend("topright", colnames(rfclass$err.rate), col=1:4, lty=1:4, lwd=2) # add legend
layout(1)
# par(mar=c(5,6,4,2))
# # Plot estimate of error rate from OOB vs mtry value (revised for presentation)
# plot(1:mtrycl, ooberr.rfcl,
# pch = 23,
# cex=ifelse(ooberr.rfcl==errbestmtry.rfcl, 3, 1.5),
# bg=ifelse(ooberr.rfcl==errbestmtry.rfcl, "red", "white"),
# col=ifelse(ooberr.rfcl==errbestmtry.rfcl, "red", "blue"),
# type = "b",
# ylab="OOB Estimate of Error Rate",
# xlab="Value of mtry",
# main="OOB Estimate of Error Rate vs. Value of mtry",
# lwd=2, cex.main=2, cex.lab=2, cex.axis=2
# )
# # Plot model (revised for presentation)
# plot((1:rfclass$ntree), rfclass$err.rate[,1], type="l", lwd=2, col="black",
# main="Estimate of Error vs. Number of Trees in Model",
# xlab="ntree",
# ylab="Estimate of Error",
# ylim=c(0.05,0.35),
# cex.main=2, cex.axis=2, cex.lab=2
# )
# lines((1:rfclass$ntree), rfclass$err.rate[,2], lwd=2, col="red", lty="dashed", cex=2)
# lines((1:rfclass$ntree), rfclass$err.rate[,3], lwd=2, col="green", lty="dotted", cex=2)
# lines((1:rfclass$ntree), rfclass$err.rate[,4], lwd=2, col="blue", lty="dotdash", cex=2)
# # error looks like it's stabilized at a low level
# # add legend
# legend("topright", colnames(rfclass$err.rate), col=1:4, lty=1:4, lwd=2, cex=1.3) # add legend
#
# par(opar) # Restore original graphics parameters
# Print model summary
print(rfclass)
# variable importance plot for RF classification model.
varImpPlot(rfclass, scale=FALSE, main="Random Forest Variable Importance Plots",
pch=19, col="blue")
## variable importance plot for RF classification model. modified for presentation
#varImpPlot(rfclass, scale=FALSE, main="",
# pch=19, col="blue")
# Input test data into Random Forest Model
predcl <- predict(rfclass, bclass.test)
# Confusion Matrix / Accuracy of predicted results
results <- table(bclass.test[,"demand"], predcl) # make table of predicted results
#results
class.error <- c(0,0,0) # dummy values
class.error[1] <- (results[1,2] + results[1,3]) / rowSums(results[,1:3])[1] # Type 1 class error
class.error[2] <- (results[2,1] + results[2,3]) / rowSums(results[,1:3])[2] # Type 2 class error
class.error[3] <- (results[3,1] + results[3,2]) / rowSums(results[,1:3])[3] # Type 3 class error
results <- cbind(results, class.error) # combine class.error into results table
rownames(results) <- c("Actual low", "Actual mid","Actual high") # rename rows
print(results) # display
# Calculate Model Prediction Accuracy
acc <- (results[1,1] + results[2,2] + results[3,3]) / sum(results)
noquote(paste("Model Prediction Accuracy on Test Data is ", round(acc, 4))) # print result
# Restore original graphics parameters [LAST LINE OF CODE]
par(opar)
|
f04a621f2e3add441495ed657d7a584ac7ad91a6
|
c2cc9e7bde76f30f0c63f7068bdde39cf91aa0f8
|
/Unit 2 - Linear Regression/Unit_2_homework.R
|
65519ed01a6f1a85774f2b83e8da77841814b9ad
|
[] |
no_license
|
arubino322/The_Analytics_Edge
|
5319e3d538c682ace9c8c077792935581841cdfb
|
a6137fe80a8023eaab63a77700fb274f0785d4b6
|
refs/heads/master
| 2016-09-06T10:55:51.039815
| 2015-08-26T22:06:50
| 2015-08-26T22:06:50
| 41,329,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,202
|
r
|
Unit_2_homework.R
|
##CLIMATE CHANGE
climate <- read.csv("climate_change.csv")
summary(climate)
str(climate)
training <- subset(climate, Year < 2007)
testing <- subset(climate, Year > 2006)
model1 <- lm(Temp ~ MEI + CO2 + CH4 + N2O + CFC.11 + CFC.12 + TSI + Aerosols, data = training)
summary(model1)
model2 <- lm(Temp ~ MEI + TSI + Aerosols + N2O, data = training)
summary(model2)
model1Step = step(model1)
stepPredict = predict(model1Step, newdata = testing)
SSE = sum((stepPredict - testing$Temp)^2)
SST = sum((mean(training$Temp) - testing$Temp)^2)
R2 = 1 - SSE/SST
###READING TEST SCORES
pisaTrain = read.csv("pisa2009train.csv")
pisaTest = read.csv("pisa2009test.csv")
str(pisaTrain)
tapply(pisaTrain$readingScore, pisaTrain$male, mean)
summary(pisaTrain)
#remove observations with any missing value
pisaTrain = na.omit(pisaTrain)
pisaTest = na.omit(pisaTest)
nrow(pisaTrain)
nrow(pisaTest)
str(pisaTrain)
#include unordered factors in regression models
#Set the reference level for raceeth (the most common factor aka white)
pisaTrain$raceeth = relevel(pisaTrain$raceeth, "White")
pisaTest$raceeth = relevel(pisaTest$raceeth, "White")
lmScore <- lm(readingScore ~ ., data = pisaTrain)
summary(lmScore)
SSE_pt = sum(lmScore$residuals^2)
RMSE_pt = sqrt(SSE_pt/nrow(pisaTrain))
summary(lmScore)
predTest = predict(lmScore, newdata = pisaTest)
summary(predTest)
SSE_t = sum((predTest - pisaTest$readingScore)^2)
SST_t = sum((mean(pisaTrain$readingScore) - pisaTest$readingScore)^2)
RMSE_t = sqrt(SSE_t/nrow(pisaTest))
mean(pisaTrain$readingScore)
1 - SSE_t/SST_t
#####DETECTING FLU EPIDEMICS VIA SEARCH ENGINE QUERY DATA#######
FluTrain <- read.csv("FluTrain.csv")
summary(FluTrain)
which.max(FluTrain$ILI) #this finds the row of the max ILI
FluTrain$Week[303] #this outputs the week for the row with highest ILI
which.max(FluTrain$Queries)
FluTrain$Queries[303]
hist(FluTrain$ILI)
plot(log(FluTrain$ILI), FluTrain$Queries)
#find the best models
modelx <- lm(log(ILI) ~ Queries, data = FluTrain)
modely <- lm(Queries ~ log(ILI), data = FluTrain)
summary(modelx)
summary(modely)
#modelx is the best
FluTrend1 <- lm(log(ILI) ~ Queries, data = FluTrain)
summary(FluTrend1)
cor(log(FluTrain$ILI),FluTrain$Queries)
#correlation^2 = R2
FluTest <- read.csv("FluTest.csv")
PredTest1 = exp(predict(FluTrend1, newdata = FluTest))
which(FluTest$Week == "2012-03-11 - 2012-03-17") #11
PredTest1[11]
FluTest$ILI[11]
SSE_fluTest = sum((PredTest1 - FluTest$ILI)^2)
RMSE_flutest = sqrt(SSE_fluTest/nrow(FluTest))
install.packages("zoo")
library(zoo)
ILILag2 = lag(zoo(FluTrain$ILI), -2, na.pad = TRUE)
FluTrain$ILILag2 = coredata(ILILag2)
summary(FluTrain$ILILag2)
plot(FluTrain$ILILag2, log(FluTrain$ILI))
FluTrend2 = lm(log(ILI) ~ Queries + log(ILILag2), data = FluTrain)
summary(FluTrend2)
#add ILILag2 to test data set
ILILag2 = lag(zoo(FluTest$ILI), -2, na.pad = TRUE)
FluTest$ILILag2 = coredata(ILILag2)
summary(FluTest$ILILag2)
which(FluTest$ILILag2 == "NA")
FluTest$ILILag2
FluTest$ILILag2[1] = FluTrain$ILI[416]
FluTest$ILILag2[1] #1.852736
FluTest$ILILag2[2] = FluTrain$ILI[417]
FluTest$ILILag2[2] #2.12413
PredTest2 = exp(predict(FluTrend2, newdata = FluTest))
SSE_fluTest2 = sum((PredTest2 - FluTest$ILI)^2)
RMSE_flutest2 = sqrt(SSE_fluTest2/nrow(FluTest))
###################################OPTIONAL#######################################
data(state)
statedata = cbind(data.frame(state.x77), state.abb, state.area, state.center, state.division, state.name, state.region)
str(statedata)
plot(statedata$x, statedata$y)
tapply(statedata$HS.Grad, statedata$state.region, mean)
boxplot(statedata$Murder ~ statedata$state.region)
northeast <- subset(statedata, state.region == "Northeast")
summary(northeast)
which.max(northeast$Murder)
northeast$state.name[6]
stateModel <- lm(Life.Exp ~ Population + Income + Illiteracy + Murder + HS.Grad + Frost + Area, data = statedata)
summary(stateModel)
plot(statedata$Income, statedata$Life.Exp)
#test more models by removing variables
stateModel2 = lm(Life.Exp ~ Population + Murder + HS.Grad + Frost, data = statedata)
summary(stateModel2)
#it would be up to you to leave population, but we'll leave it cuz it has just enough statistical relevancy to be okay
predState <- predict(stateModel2)
summary(predState)
#find lowest Life.Exp
sort(predict(stateModel2))
which.min(statedata$Life.Exp)
statedata$state.abb[40]
which.max(statedata$Life.Exp)
statedata$state.abb[11]
SSE = sum((predState - statedata$Life.Exp)^2)
sort(abs(stateModel2$residuals))
###########FORECASTING ELANTRA SALES##############
elantra = read.csv("elantra.csv")
elantraTrain = subset(elantra, Year < 2013)
elantraTest = subset(elantra, Year > 2012)
elantraModel <- lm(ElantraSales ~ Unemployment + CPI_all + CPI_energy + Queries, data = elantraTrain)
summary(elantraModel)
emodel2 <- lm(ElantraSales ~ Unemployment + CPI_all + CPI_energy + Queries + Month, data = elantraTrain)
summary(emodel2)
elantraTrain$MonthFactor = as.factor(elantraTrain$Month)
emodel3 <- lm(ElantraSales ~ Unemployment + CPI_all + CPI_energy + Queries + MonthFactor, data = elantraTrain)
summary(emodel3)
cor(elantraTrain$CPI_energy, elantraTrain$Month)
cor(elantraTrain$CPI_energy, elantraTrain$Unemployment)
cor(elantraTrain$CPI_energy, elantraTrain$Queries)
cor(elantraTrain$CPI_energy, elantraTrain$CPI_all)
#OR A BETTER WAY TO WRITE THIS
cor(elantraTrain[c("Unemployment","Month","Queries","CPI_energy","CPI_all")])
#remove variables from our model to make it more significant
emodel3 <- lm(ElantraSales ~ Unemployment + CPI_all + CPI_energy + MonthFactor, data = elantraTrain)
elantraTest$MonthFactor = as.factor(elantraTest$Month)
elantraPredict = predict(emodel3, newdata = elantraTest)
SSE = sum((elantraPredict - elantraTest$ElantraSales)^2)
mean(elantraTrain$ElantraSales) #baseline
SST = sum((mean(elantraTrain$ElantraSales) - elantraTest$ElantraSales)^2)
R2 = 1 - SSE/SST
max(abs(elantraPredict - elantraTest$ElantraSales))
which.max(abs(elantraPredict - elantraTest$ElantraSales))
elantraTest$Month[5]
elantraTest$Year[5]
|
1446a932f64bfd2a52895d75c01224fa0197095c
|
90fe73abb06f5bec5aef3f1f5d41af328f5e359c
|
/complete.R
|
df4b8200d50edabce05e89f47807d644727aaf0d
|
[] |
no_license
|
mcfornier/Rprogramming
|
7e108d395ee5f83a03388ba61650cf6c560b5a09
|
dc21d4fd6725c53cc1adb2e988a30273d066af6e
|
refs/heads/master
| 2021-01-01T18:37:55.214773
| 2015-02-22T12:19:29
| 2015-02-22T12:19:29
| 31,159,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,185
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
idcount<-length(id) ##get id count
path<-"C:\\Users\\carmen.fornier\\Documents\\Rworkingdirectory\\"
vid<-vector("numeric")
vnobs<-vector("numeric")
for (i in 1:idcount)##every id file
{
if(id[i]<10)
filename<-paste("00",as.character(id[i]),sep='')
else if(id[i]>=10 && id[i]<100)
filename<-paste("0",as.character(id[i]),sep='')
else
filename<-as.character(id[i])
fid <- read.csv(file=paste(path,directory,"\\",filename,".csv",sep=''),header=TRUE) ##load frame to read
vOK<-complete.cases(fid) ##index
num<-length(vOK[vOK==TRUE]) ## num complete cases
vid<-c(vid,i)
vnobs<-c(vnobs,num)
}
data.frame(id=vid,nobs=vnobs) ##results frame
}
|
9cf21e983641d0e311b0c9b9b2e9de50883b1895
|
48e3c473d637b9eef3592471acdf6ee9d37650f8
|
/10.02_CalcSimilarityMatrices.Recon.r
|
3f1a815dfc24a36584ccdb5301bf1ee0d0a40da2
|
[] |
no_license
|
bishopsqueeze/k_soc
|
a82906c652791db044e8134f86722a8fe38b3f70
|
916e7e1ef5f1f47daeb7fa8fe693afeae493cb12
|
refs/heads/master
| 2021-01-10T00:53:58.127310
| 2014-06-21T19:00:19
| 2014-06-21T19:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,948
|
r
|
10.02_CalcSimilarityMatrices.Recon.r
|
##------------------------------------------------------------------
## This script is used to compute similarity matrices for the
## et of "reconstructed" egonets that were based on the original
## data provided by kaggle. See "06_CalcReconstructedEdgeNetwork"
## for details of the reconstruction process.
##
## This script takes the "reconstructed" egonets and computes
## similarity matrices for those networks.
##------------------------------------------------------------------
##------------------------------------------------------------------
## Load libraries
##------------------------------------------------------------------
library(igraph) ## contains graph functions
library(caTools)
library(linkcomm)
library(data.table)
##------------------------------------------------------------------
## Clear the workspace
##------------------------------------------------------------------
rm(list=ls())
##------------------------------------------------------------------
## Define the parallel flag
##------------------------------------------------------------------
DOPARALLEL <- TRUE
##------------------------------------------------------------------
## Register the clusters
##------------------------------------------------------------------
if (DOPARALLEL) {
library(foreach)
library(doMC)
registerDoMC(4)
}
##------------------------------------------------------------------
## Set the working directory
##------------------------------------------------------------------
setwd("/Users/alexstephens/Development/kaggle/social_circle/data/inputs")
##------------------------------------------------------------------
## Source the utility functions
##------------------------------------------------------------------
source("/Users/alexstephens/Development/kaggle/social_circle/k_soc/00_Utilities.r")
##------------------------------------------------------------------
## Read in the raw data files
##------------------------------------------------------------------
load("01_SocialCircle_RawData.Rdata") ## raw data
##load("02_SocialCircle_Edges.Rdata") ## edge lists
load("06_SocialCircle_ReconEdges.Rdata") ## reconstructed network edge lists
##------------------------------------------------------------------
## Loop over the egonets, define graph objects, save to file
##------------------------------------------------------------------
## extract egonet data
ego.names <- names(recon_egoedges.list)
ego.num <- length(ego.names)
## get the edge count for each, so we can work from smallest to largest
recon_egoedges.count <- unlist(lapply(recon_egoedges.list, ecount))
recon_egoedges.order <- order(recon_egoedges.count)
## define the ouput directory for individual edges files
output.dir <- paste(getwd(),"recon.sim.topo",sep="/")
## loop over each egonet and compute the similarity matrices
for (i in 110:1) {
## set-up
tmp.id <- ego.names[recon_egoedges.order[i]]
tmp.edges <- recon_egoedges.list[[tmp.id]]
## echo progress
cat("Iteration", i, "of", ego.num, " :: Similarity Matrix for", tmp.id, " :: <Reconstructed> # Edges =", ecount(tmp.edges), "\n")
## output files
tmp.rdataName <- paste(output.dir, paste0(tmp.id,".ReconSimilarityMatrix.Rdata"), sep="/")
## compute the matrices
tmp.reconSim <- calcSimilarityMatrix(tmp.edges)
## write intermediate results to a file
save(tmp.reconSim, file=tmp.rdataName)
}
##------------------------------------------------------------------
## double check the results for the test case (i==40)
##------------------------------------------------------------------
#tmp.lc <- getLinkCommunities(get.data.frame(recon_egoedges.list[[tmp.id]]), hcmethod="single", plot=FALSE, verbose=FALSE)
#tmp.d <- as.dist(1-tmp.sim)
#tmp.h <- hclust(tmp.d, method="single")
#cbind(tmp.h$height, tmp.lc$hclust$height)
|
bb4a27729f78595ee13d04151dfed776c8648581
|
b7dbc8fa280edb6215a6260e1401e0f83b9954b0
|
/Optiver/Optd_l2.R
|
23808d8b6dcce85c692cacf25f82638de87c429f
|
[] |
no_license
|
cwcomiskey/Misc
|
071c290dad38e2c2e6a5523d366ea9602c4c4e44
|
1fad457c3a93a5429a96dede88ee8b70ea916132
|
refs/heads/master
| 2021-05-14T18:10:35.612035
| 2020-02-17T15:09:51
| 2020-02-17T15:09:51
| 116,065,072
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
r
|
Optd_l2.R
|
Optd_l2 <- function(winner, p1, p2, p3){
if("I1" %in% winner & "I2" %in% winner){
d <- (p1 + p2)/2
} else if("I1" %in% winner & "I3" %in% winner){
d <- (p2 + p3)/2
} else if("I1" %in% winner & "I4" %in% winner){
d <- c(p1 - .Machine$double.eps, p3 + .Machine$double.eps)
} else if("I2" %in% winner & "I3" %in% winner){
d <- c((p1 + p2)/2, (p2 + p3)/2)
} else if("I2" %in% winner & "I4" %in% winner){
d <- (p1 + p2)/2
} else if("I3" %in% winner & "I4" %in% winner){
d <- (p2 + p3)/2
} else{
stop("Something went wrong for Optd_l2")
}
return(d)
}
|
ac13034aa6065de584a398f8048021562c45d948
|
902037115141ead7b315e7b63e437ec61c01c2c1
|
/man/rowHWEs.Rd
|
7487b0081164e2fe4fca4185d3ae1a0510274daf
|
[] |
no_license
|
cran/scrime
|
4bdc7e989ba9e648d004ca47cd2d10bb5e78a717
|
cf0033dbfe2a6fa807593a460ef4bcb0931db96a
|
refs/heads/master
| 2021-06-02T21:50:17.706604
| 2018-12-01T10:00:03
| 2018-12-01T10:00:03
| 17,699,500
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
rd
|
rowHWEs.Rd
|
\name{rowHWEs}
\alias{rowHWEs}
\title{Rowwise Test for Hardy-Weinberg Equilibrium}
\description{
Tests for each row of a matrix whether the Hardy-Weinberg Equilibrium holds for the SNP
represented by the row.
}
\usage{
rowHWEs(x, levels = 1:3, affy = FALSE, check = TRUE)
}
\arguments{
\item{x}{a matrix in which each row represents a SNP and each column a subject,
where the SNPs can take the values specified by \code{levels}. NAs are allowed.}
\item{levels}{a vector of length three specifying the values with which the three
genotypes of each SNP are represented. It is assumed that the second element of
\code{levels} represents the heterozygous genotype, whereas the first and the
third element represent the homozygous genotypes. Ignored if \code{affy = TRUE}.}
\item{affy}{logical specifying whether the SNPs in \code{x} are coded as in the Affymetrix
standard output. If \code{TRUE}, \code{levels = c("AA", "AB", "BB")} will be used.}
\item{check}{should some checks be done if, e.g., other than the specified \code{levels}
are used in \code{x}? It is highly recommended to leave \code{check = TRUE}. Setting
\code{check = FALSE} will reduce the computation time slightly.}
}
\value{
A list containing the values of the ChiSquare statistic for testing for deviation from HWE
(\code{stats}) and the raw p-values (\code{rawp}) computed by employing the ChiSquare distribution
with 1 degree of freedom.
}
\author{
Holger Schwender, \email{holger.schwender@udo.edu}
}
\keyword{htest}
\keyword{array}
|
70ef5fda02ec086aa3dbe8601ad99db1dccfb4c1
|
324e429f6047f9adb9c197f05c7677040c9275a8
|
/scripts/Geochemical-water_conditions/molecule_amount.R
|
78bad7f1488eb9fe708c5332bf43175982635b12
|
[] |
no_license
|
amunzur/Microbial_Diversity_of_the_Saanich_Inlet
|
f0dc9a7e20e3a064e8aae0f4eab7d7913bd8c4a5
|
5b5a7cb7ba1b201411c2da1ab5e4f85da3ea3d1f
|
refs/heads/master
| 2023-01-29T08:43:44.223217
| 2020-12-12T02:14:02
| 2020-12-12T02:14:02
| 320,462,384
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,638
|
r
|
molecule_amount.R
|
Saanich_Data <- read_csv("~/Desktop/MICB_405/MICB405_proj/Saanich_Data.csv")
df <- Saanich_Data %>% filter(Cruise == 72, Depth == 0.100 | Depth == 0.120 | Depth == 0.200)
# subset data
idx <- c(5, 6, 7, 9, 10, 12, 14, 23, 25)
df <- df[, idx]
element_list <- c("O2", "PO4", "NO3", "NH4", "NO2", "H2S", "N20", "CH4")
element_list <- lapply(element_list, function(some_name) rep(some_name, 3))
depth <- rep(c("100 m", "120 m", "200 m"), length(elemen_list)) # repeat depth info for new df
# element_list <- as.list(names(df))
# element_list[[1]] <- NULL
# element_list <- lapply(element_list, function(some_name) rep(some_name, 3))
# collapse all values into one
values <- unlist(df, use.names = FALSE)
values <- values[-c(1, 2, 3)]
df <- data.frame(values = values,
depth = as.factor(depth),
element_list = unlist(element_list))
df[22, 1] <- df[22, 1]/1000
df[23, 1] <- df[23, 1]/1000
df[24, 1] <- df[24, 1]/1000
p1 <- ggplot(data = df, aes(x = element_list, y = values, fill = depth)) +
geom_bar(stat = "identity", position="dodge") +
scale_fill_manual(values = wes_palette(9, name = "Cavalcanti1", type = "continuous"), name = "") +
geom_text(aes(label=round(values, 0)), vjust=-.5, color="black",
position = position_dodge(width=1), size=4)+
theme_classic() +
xlab("molecule name") +
ylab("amount") +
ggtitle(expression(paste("Amount of molecules in water ", "(", mu, "M", ")"))) +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1),
axis.ticks = element_line(colour = "black", size = 2),
axis.text = element_text(size=12),
axis.text.x = element_text(vjust=0.5, colour = "black", size=15, angle = 45),
axis.text.y = element_text(vjust=0.5, colour = "black", size=15),
axis.title = element_text(size=15),
legend.title = element_text(color = "black", size = 12),
legend.text = element_text(color = "black", size = 12),
axis.ticks.length=unit(0.15, "cm"),
axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 0, l = 20)),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0)),
plot.title = element_text(color="black", size=14, face="bold"),
strip.text.x = element_text(size=10, color="black",
face="bold"))
figure_path <- paste("../FINAL_PROJ/figures/molecule_amount.pdf", sep = "/")
p1
ggsave(figure_path, width = 19, height = 15, units = "cm")
|
511afbcd2957cc5757c71618187cf56074e43801
|
f73694b79bdebb1e686c4abe8164282453ea7ce2
|
/pkg/virta/R/init_virtualtables.R
|
8941a85fd3560c4d23ce8b77279e3dff5789dbbb
|
[] |
no_license
|
rsund/SurvoR
|
c664c64c14f830a6480074ae49628a75e8ea51a3
|
bc1fe2fe8cdc184bc2fd2981612454faf27043c6
|
refs/heads/master
| 2023-03-02T14:54:10.147124
| 2023-02-27T23:49:04
| 2023-02-27T23:49:04
| 190,526,434
| 4
| 0
| null | 2022-06-29T11:42:43
| 2019-06-06T06:25:32
|
C
|
UTF-8
|
R
| false
| false
| 1,092
|
r
|
init_virtualtables.R
|
# Borrowed from RSqlite.extfuns-package by Seth Falcon
#.allows_extensions <- function(db)
#{
# v <- dbGetInfo(db)[["loadableExtensions"]]
# isTRUE(v) || (v == "on")
#}
.lib_path <- function()
{
## this is a bit of a trick, but the NAMESPACE code
## puts .packageName in the package environment and this
## seems slightly better than hard-coding.
##
## This also relies on the DLL being loaded even though only SQLite
## actually needs to load the library. It does not appear that
## loading it causes any harm and it makes finding the path easy
## (don't have to worry about arch issues).
getLoadedDLLs()[[.packageName]][["path"]]
}
init_virtualtables <- function(db) # RS 7.2.2013 CHA init_extensions <- function(db)
{
ans <- FALSE
# if (.allows_extensions(db)) {
res <- DBI::dbGetQuery(db, sprintf("SELECT load_extension('%s')",
.lib_path()))
ans <- all(dim(res) == c(1, 1))
# } else {
# stop("loadable extensions are not enabled for this db connection")
# }
ans
}
|
cef30d6e19982e9a55ba40de65bc4e3e1e53a0bf
|
fa42803a6d4079f67d989885a024ab66b3e13a03
|
/R_Programming/multivariate-dataviz/m5/5-2 - Quantiatiative Trivariate Analysis (Lattice).R
|
35050a648e852d9eb735c0ee4e35b1506ebc6d31
|
[] |
no_license
|
AnanyaPandey/QuarantineUpskill
|
1a5c75e9b57273be7f73f0e2bb4f8e227c36af68
|
77e51cb698848820ff069bb2c98a34eb210ec282
|
refs/heads/master
| 2023-02-24T06:45:57.006282
| 2021-01-21T13:38:48
| 2021-01-21T13:38:48
| 254,535,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,708
|
r
|
5-2 - Quantiatiative Trivariate Analysis (Lattice).R
|
# Three numerical variables
# Create a gradient color-scale scatterplot
xyplot(
x = Critic.Score ~ Runtime,
data = movies2014,
col = gradient[cut(movies2014$Box.Office, 5)],
pch = 16,
main = "Runtime, Critic Score, and Box Office Revenue",
xlab = "Runtime (min)",
ylab = "Critic Score (%)",
key = list(
corner = c(0.05, 0.05),
title = "Box Office ($M)",
cex = 0.75,
text = list(levels(cut(movies2014$Box.Office, 5))),
points = list(
pch = 16,
col = gradient)))
# Create a divergent color-scale scatterplot
xyplot(
x = Critic.Score ~ Runtime,
data = movies2014,
col = divergent[cut(movies2014$Box.Office, 5)],
pch = 16,
main = "Runtime, Critic Score, and Box Office Revenue",
xlab = "Runtime (min)",
ylab = "Critic Score (%)",
key = list(
corner = c(0.05, 0.05),
title = "Box Office ($M)",
cex = 0.75,
text = list(levels(cut(movies2014$Box.Office, 5))),
points = list(
pch = 16,
col = divergent)))
# Create bubble chart with lattice
xyplot(
x = Critic.Score ~ Runtime,
data = movies2014,
cex = getSize(movies$Box.Office, 5),
main = "Runtime, Critic Score, and Box Office Revenue",
xlab = "Runtime (min)",
ylab = "Critic Score (%)")
# Create 3D scatterplot
cloud(
x = Box.Office ~ Critic.Score * Runtime,
data = movies2014,
type = c("p", "h"),
pch = 16,
main = "Runtime, Critic Score, and Box Office Revenue",
xlab = "Runtime (min)",
ylab = "Critic Score (%)",
zlab = "Box Office\nRevenue\n($M)")
|
e24b0123f627b272c30c8ba9650162d49dc9248f
|
dfc914acd4a06aa611be1d39d5ca7a1751405eaf
|
/plot4.R
|
f41745b7bc817dce70b17e3d8df7a579127ee0c7
|
[] |
no_license
|
evdy13/ExData_Plotting1
|
ebe1dcbde239938d856e709671c076484446ade6
|
f1d750183d179b608e2efc262813bc87a6702e6d
|
refs/heads/master
| 2021-01-18T11:52:42.901923
| 2014-10-12T23:36:45
| 2014-10-12T23:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
plot4.R
|
## create a 2 by 2 matrix for plots
## assign margin size
par(mfrow = c(2, 2), mar= c(2, 2, 1, 1))
## create the four plots
with(hpc, {
plot(hpc$Time_Date, hpc$Global_active_power, type = "n", xlab = "", ylab="Global Active Power")
lines(hpc$Time_Date, hpc$Global_active_power, type ="l")
plot(hpc$Time_Date, hpc$Voltage, type = "n", xlab = "", ylab="Voltage")
lines(hpc$Time_Date, hpc$Voltage, type ="l")
with(hpc, plot(hpc$Time_Date, Sub_metering_1, type = "n", xlab = "", ylab="Energy sub Metering"))
lines(hpc$Time_Date, hpc$Sub_metering_1, col = "black")
lines(hpc$Time_Date, hpc$Sub_metering_2, col = "red")
lines(hpc$Time_Date, hpc$Sub_metering_3, col = "blue")
legend("topright", pch = "|", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2" , "Sub_metering_3"))
plot(hpc$Time_Date, hpc$Global_reactive_power, type = "n", xlab = "", ylab="Global_reactive_power")
lines(hpc$Time_Date, hpc$Global_reactive_power, type ="l")
})
## create a png of the plots
dev.copy(png, file = "plot4.png")
dev.off()
|
5904d7879591f65fbc5ef9e686d580865ecb78be
|
c37f1410153d7704ec13a4abaaf144214feec863
|
/app.R
|
e7afb48343091e0a73766df3145db974f5985a4a
|
[] |
no_license
|
MayaGans/testthat_example
|
15eaf78f7b433a0738c3f8fa6daa2ceee175ae95
|
cadf0a3b6da9d843c54ca286225d799a1c5412d2
|
refs/heads/master
| 2021-01-04T13:48:20.589273
| 2020-02-17T01:13:21
| 2020-02-17T01:13:21
| 240,582,265
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
app.R
|
library(shiny)
library(tidyverse)
ui <- fluidRow(numericInput("num_input", "Sepal Length Greater Than", min = 4.3, value = 6, max = 7.9),
textOutput("text_test"),
tableOutput("summary"))
server <- function(input, output) {
output$summary <- renderTable(iris %>% filter(Sepal.Length > input$num_input))
output$text_test <- renderText(paste("Filtering by ", input$num_input))
}
shinyApp(ui = ui, server = server)
|
421268b7234bf3888f086ec62d1a4d884c7ade99
|
b9a7317a4f83ec4d51f00cc574c7e492e5e5659f
|
/R/np.svar.R
|
34a1c82c245da9129289af7f0c2cae213e8b9284
|
[] |
no_license
|
rubenfcasal/npsp
|
98120f2d1196e1f96941d2a874b41fcbf5fd9694
|
9655e881102c642219cb792607de93062bf138a2
|
refs/heads/master
| 2023-05-02T01:14:01.909236
| 2023-04-22T09:59:19
| 2023-04-22T09:59:19
| 64,307,277
| 5
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,825
|
r
|
np.svar.R
|
#····································································
# np.svar.R (npsp package)
#····································································
# np.svar S3 class and methods
# np.svar() S3 generic
# np.svar.default(x, y, h, maxlag, nlags, minlag, degree,
# drv, hat.bin, ncv, ...)
# np.svar.svar.bin(x, h, degree, drv, hat.bin, ncv, ...)
# np.svariso(x, y, h, maxlag, nlags, minlag, degree,
# drv, hat.bin, ncv, ...)
# np.svariso.hcv(x, y, maxlag, nlags, minlag, degree,
# drv, hat.bin, loss, ncv, warn, ...)
# np.svariso.corr(lp, x, h, maxlag, nlags, minlag, degree, drv, hat.bin,
# tol, max.iter, plot, ylim)
#
# (c) R. Fernandez-Casal
#
# NOTE: Press Ctrl + Shift + O to show document outline in RStudio
#····································································
# PENDENTE:
# - svarisohcv o final da documentacion
# - engadir aniso, 2iso, niso
#····································································
#····································································
#' Local polynomial estimation of the semivariogram
#'
#' Estimates a multidimensional semivariogram (and its first derivatives)
#' using local polynomial kernel smoothing of linearly binned semivariances.
#' @aliases np.svar-class
#' @param x object used to select a method. Usually a matrix with the
#' coordinates of the data locations (columns correspond with dimensions and
#' rows with data).
#' @param ... further arguments passed to or from other methods.
#' @details Currently, only isotropic semivariogram estimation is supported.
#'
#' If parameter \code{nlags} is not specified is set to \code{101}.
# If parameter \code{nlags} is not specified is set to \code{max(50, rule.svar(x))}.
#'
#' The computation of the hat matrix of the binned semivariances (\code{hat.bin = TRUE})
#' allows for the computation of approximated estimation variances (e.g. in \code{\link{fitsvar.sb.iso}}).
#'
#' A multiplicative triweight kernel is used to compute the weights.
#' @return Returns an S3 object of class \code{np.svar} (locpol svar + binned svar + grid par.),
#' extends \code{\link{svar.bin}}, with the additional (some optional) 3 components:
#' \item{est}{vector or array with the
#' local polynomial semivariogram estimates. }
#' \item{locpol}{a list of 6 components:
#' \itemize{
#' \item{\code{degree} degree of the local polinomial used.}
#' \item{\code{h} smoothing matrix.}
#' \item{\code{rm} mean of residual semivariances.}
#' \item{\code{rss} sum of squared residual semivariances.}
#' \item{\code{ncv} number of cells ignored in each direction.}
#' \item{\code{hat} (if requested) hat matrix of the binned semivariances.}
#' \item{\code{nrl0} (if appropriate) number of cells with \code{binw > 0}
#' and \code{est == NA}.}
#' }}
#' \item{deriv}{(if requested) matrix of estimated first semivariogram derivatives.}
#' @seealso \code{\link{svar.bin}}, \code{\link{data.grid}}, \code{\link{locpol}}.
#' @references
#' Fernandez Casal R., Gonzalez Manteiga W. and Febrero Bande M. (2003)
#' Space-time dependency modeling using general classes of flexible stationary
#' variogram models, \emph{J. Geophys. Res.}, \bold{108}, 8779,
#' doi:10.1029/2002JD002909.
#'
#' Garcia-Soidan P.H., Gonzalez-Manteiga W. and Febrero-Bande M. (2003)
#' Local linear regression estimation of the variogram,
#' \emph{Stat. Prob. Lett.}, \bold{64}, 169-179.
#' @export
np.svar <- function(x, ...) {
#····································································
UseMethod("np.svar")
} # S3 generic function
# Non parametric pilot estimation of an isotropic semivariogram
# Returns an S3 object of class "np.svar" (extends "svar.bin")
#····································································
#' @rdname np.svar
#' @aliases iso.np.svar
#' @method np.svar default
#' @param y vector of data (response variable).
#' @inheritParams locpol.default
#' @param maxlag maximum lag. Defaults to 55\% of largest lag.
#' @param nlags number of lags. Defaults to 101.
#' @param minlag minimun lag.
#' @param hat.bin logical; if \code{TRUE}, the hat matrix of the binned semivariances is returned.
# @param cov.bin covariance matrix of the binned semivariances.
# Defaults to identity.
#' @export
np.svar.default <- function(x, y, h = NULL, maxlag = NULL, nlags = NULL,
minlag = maxlag/nlags, degree = 1,
drv = FALSE, hat.bin = TRUE, ncv = 0, ...) {
# binning cells without data are set to missing.
# Devuelve estimador np del semivariograma y rejilla binning
# Interfaz para la rutina de fortran "svar_iso_np"
#····································································
y <- as.numeric(y)
ny <- length(y) # number of data
x <- as.matrix(x)
if ( !identical(ny, nrow(x)) )
stop("arguments 'y' and 'x' have incompatible dimensions")
drv <- as.logical(drv)
degree <- as.integer(degree)
if(!(degree %in% 0:2))
stop("argument 'degree' must be 0, 1 or 2")
if (drv && degree==0)
stop("'degree' must be greater than or equal to 1 if 'drv == TRUE'")
# Remove missing values
ok <- complete.cases(x, y) # observations having no missing values across x and y
# ok <- !rowSums(!is.finite(x)) & is.finite(y)
# if (!all(ok)) {
# warning("not finite values removed")
if (any(!ok)) {
warning("missing values removed")
x <- x[ok,]
y <- y[ok]
ny <- length(y)
}
nd <- ncol(x) # number of dimensions
if (is.null(maxlag))
maxlag <- 0.55*sqrt(sum(diff(apply(x, 2, range))^2)) # 55% of largest lag
if (is.null(nlags)) nlags <- 101 # dimension of the binning grid
if(is.null(h)) {
stop("argument 'h' (bandwith) must be provided")
# h <- 1 # bandwith matrix PENDENTE
} else if (!is.numeric(h) || length(h)!= 1L)
stop("bandwith 'h' is not a numeric value")
hat.bin <- as.logical(hat.bin)
hat <- if (hat.bin) double(nlags*nlags) else NA_real_
deriv <- if (drv) rep(NA_real_, nlags) else NA_real_
# Let's go FORTRAN!
# subroutine svar_iso_np( nd, x, ny, y, nlags, minlag, maxlag,
# bin_lag, bin_med, bin_y, bin_w,
# h, lpe, degree, ideriv, deriv, ihat, hatlp,
# ndelcv, rm, rss, nrl0)
ret <-.Fortran("svar_iso_np", nd = as.integer(nd), x = as.double(t(x)),
ny = as.integer(ny), y = as.double(y), nlags = as.integer(nlags),
minlag = as.double(minlag), maxlag = as.double(maxlag),
lag = double(1), med = double(1), biny = double(nlags),
binw = double(nlags), h = as.double(h),
elp = as.double(rep(NA_real_, nlags)), degree = as.integer(degree),
ideriv = as.integer(drv), deriv = deriv, ihat = as.integer(hat.bin),
hat = hat, ncv = as.integer(ncv), rm = double(1), rss = double(1),
nrl0 = integer(1), NAOK = TRUE, PACKAGE = "npsp")
if (ret$nrl0 > 0)
warning("Not enough data in some neighborhoods ('NRL < NINDRL'): ", ret$nrl0)
# Construir o resultado
is.na(ret$biny) <- ret$binw == 0 # biny[binw == 0] <- NA
names(ret$min) <- "h"
result <- with( ret,
data.grid(est = elp, biny = biny, binw = binw,
grid = grid.par(n = nlags, min = minlag, lag = lag, dimnames = "h")) )
result$data <- list(x = x, y = y, med = ret$med)
result$svar <- list(type = "isotropic", estimator = "classical")
result$locpol <- with( ret,
list( degree = degree, h = h, rm = rm, rss = rss, ncv = ncv ))
if (hat.bin) result$locpol$hat <- matrix(ret$hat, nrow = nlags)
if (ret$nrl0 > 0) {
warning("Not enough data in some neighborhoods ('NRL < NINDRL'): ", ret$nrl0)
result$locpol$nrl0 <- ret$nrl0
}
if (drv) result$deriv <- ret$deriv
oldClass(result) <- c("np.svar", "svar.bin", "bin.data", "bin.den", "data.grid")
return(result)
#····································································
} # svarisonp, iso.np.svar, np.svar.default
#····································································
# np.svar.svar.bin(x, h, degree, drv, hat.bin, ncv, ...) ----
#····································································
#' @rdname np.svar
#' @method np.svar svar.bin
#' @export
np.svar.svar.bin <- locpol.svar.bin
#····································································
# np.svariso(x, y, h, maxlag, nlags, minlag, degree, ----
# drv, hat.bin, ncv, ...)
#····································································
#' @rdname np.svar
#' @export
np.svariso <- np.svar.default
#····································································
#' @rdname np.svar
#' @inheritParams h.cv.svar.bin
#' @details \code{np.svariso.hcv} calls \code{\link{h.cv}} to obtain an "optimal"
#' bandwith (additional arguments \code{...} are passed to this function).
#' Argument \code{ncv} is only used here at the bandwith selection stage
#' (estimation is done with all the data).
#' @export
np.svariso.hcv <- function(x, y, maxlag = NULL, nlags = NULL, minlag = maxlag/nlags,
degree = 1, drv = FALSE, hat.bin = TRUE,
loss = c("MRSE", "MRAE", "MSE", "MAE"), ncv = 1, warn = FALSE, ...) {
#····································································
loss <- match.arg(loss)
if (is.null(maxlag))
maxlag <- 0.55*sqrt(sum(diff(apply(x, 2, range))^2)) # 55% of largest lag
if (is.null(nlags)) nlags <- 101 # dimension of the binning grid
bin <- svariso(x, y, maxlag = maxlag, nlags = nlags, minlag = minlag,
estimator = "classical")
hopt <- h.cv.svar.bin(bin, loss = loss, degree = degree,
ncv = ncv , warn = warn, ...)$h
return(locpol(bin, h = hopt, degree = degree, drv = drv, hat.bin = hat.bin))
}
#····································································
#' @rdname np.svar
#' @param lp local polynomial estimate of the trend function (object of class
#' \code{\link{locpol.bin}}).
#' @param tol convergence tolerance. The algorithm stops if the average of the
#' relative squared diferences is less than \code{tol}. Defaults to 0.04.
#' @param max.iter maximum number of iterations. Defaults to 10.
#' @param plot logical; if \code{TRUE}, the estimates obtained at each iteration
#' are plotted.
#' @param verbose logical; if \code{TRUE}, the errors (averages of the
#' relative squared diferences) at each iteration are printed.
#' @param ylim y-limits of the plot (if \code{plot == TRUE}).
## @param col colors for lines and points if \code{plot == TRUE}.
#' @details
#' \code{np.svariso.corr} computes a bias-corrected nonparametric semivariogram
#' estimate using an iterative algorithm similar to that described in
#' Fernandez-Casal and Francisco-Fernandez (2014). This procedure tries to correct
#' the bias due to the direct use of residuals (obtained in this case from a
#' nonparametric estimation of the trend function) in semivariogram estimation.
# (additional arguments \code{...} are passed to \code{plot}).
#' @references
#' Fernandez-Casal R. and Francisco-Fernandez M. (2014)
#' Nonparametric bias-corrected variogram estimation under non-constant trend,
#' \emph{Stoch. Environ. Res. Ris. Assess}, \bold{28}, 1247-1259.
#' @export
np.svariso.corr <- function(lp, x = lp$data$x, h = NULL, maxlag = NULL, nlags = NULL,
minlag = maxlag/nlags, degree = 1, drv = FALSE, hat.bin = TRUE,
tol = 0.05, max.iter = 10, plot = FALSE, verbose = plot,
ylim = c(0,2*max(svar$biny, na.rm = TRUE))) {
#····································································
if (!inherits(lp, "locpol.bin"))
stop("function only works for objects of class (or extending) 'locpol.bin'")
if (is.null(lp$locpol$hat))
stop("'lp' must have a '$locpol$hat' component")
ny <- nrow(x)
nd <- ncol(x) # number of dimensions
if (is.null(maxlag))
maxlag <- 0.55*sqrt(sum(diff(apply(x, 2, range))^2)) # 55% of largest lag
if (is.null(nlags)) nlags <- 101 # dimension of the binning grid
if(is.null(h)) {
stop("argument 'h' (bandwith) must be provided")
# h <- 1 # bandwith matrix PENDENTE
} else if (!is.numeric(h) || length(h)!= 1L)
stop("bandwith 'h' is not a numeric value")
lpdat <- predict(lp, hat.data = TRUE)
lp.resid <- lp$data$y - lpdat$y.est
hat.trend <- lpdat$y.hat
svar <- np.svariso(x, lp.resid, h = h, maxlag = maxlag, nlags = nlags,
degree = degree, drv = FALSE, hat.bin = FALSE)
sv.lags <- coords(svar)
if(plot) {
col <- rainbow(max.iter)
plot(sv.lags, svar$biny, xlab = "distance", ylab = "semivariance",
ylim = ylim, col = col[1])
lines(sv.lags, svar$est, col = col[1])
}
svar.biased <- svar$biny
svarold <- 0
dists <- c(0, dist(x)) # 0 + lower triangle of the distance matrix
for (iter in 2:max.iter) {
# iter <- 1; iter <- iter +1
# cov.est <- varcov(svar, coords = x, sill = 2*max(svar$est))
cov.est <- varcov(svar, coords = x)
cov.bias.est <- hat.trend %*% cov.est
cov.bias.est <- cov.bias.est %*% t(hat.trend) - cov.bias.est - t(cov.bias.est)
cov.bias.diag <- diag(cov.bias.est) / 2
svar.bias <- cov.bias.diag + rep(cov.bias.diag, each = ny) - cov.bias.est
svar.bias <- svar.bias[lower.tri(svar.bias)]
tmp <- binning(dists, c(0, svar.bias), nbin = 2*svar$grid$n)
tmp <- approx(coords(tmp), tmp$biny, sv.lags)$y
svar$biny <- svar.biased - tmp
# if (hat.bin && !drv) svar$est <- svar$locpol$hat %*% svar$biny
svar <- locpol(svar, h = h,
degree = degree, drv = drv, hat.bin = hat.bin)
# COIDADO posible division por 0
# PENDENTE ponderar por num de saltos
error <- sqrt(mean((svarold/svar$est - 1)^2, na.rm = TRUE))
if(plot) {
lines(sv.lags, svar$est, col = col[iter])
points(sv.lags, svar$biny, col = col[iter])
}
if (verbose) cat('Iteration ', iter, ': ', error, '\n')
if (error < tol) break
svarold <- svar$est
} # for (iter in 2:sv.niter)
if (iter == max.iter)
warning('The maximun number of iterations has been reached.')
if(plot) {
lines(sv.lags, svar$est, lwd = 2)
points(sv.lags, svar$biny)
legend("topleft", paste("Iteration", seq(iter)), col = c(col[seq(iter-1)], "black"), lty = 1)
}
svar$svar$estimator <- "bias-corrected (residuals based)"
svar$svar$iter <- iter
svar$svar$error <- error
return(svar)
#····································································
} # np.svariso.corr
|
254a82d10adb9a8cb7927cf5f7370a085d5f0645
|
a6fd85360b899ee98d79a4080c05bcefc01f009b
|
/SourceCode/Analysis/Old/ExploreCrisis.R
|
cd05cfc2c818a54f59586393261df36ed21cdb5d
|
[] |
no_license
|
FGCH/amcData
|
951f4aeb9ab217e40509a9657efc1f5a934443a4
|
2209173c3011aa17aac95a36006f1a1d22304048
|
refs/heads/master
| 2020-05-15T01:17:35.288389
| 2014-05-12T09:41:56
| 2014-05-12T09:41:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,719
|
r
|
ExploreCrisis.R
|
#########
# Exploritory AMC Analysis with amcCrisisYear dataset
# Christopher Gandrud
# Updated 30 July 2012
#########
# Load required packages
library(RCurl)
library(ggplot2)
library(Zelig)
# Load data from the GitHub site
url <- "https://raw.github.com/christophergandrud/amcData/master/MainData/amcCrisisYear.csv"
mainCrisis <- getURL(url)
mainCrisis <- read.csv(textConnection(mainCrisis))
# Create new analysis variables
mainCrisis$AMCDummy[mainCrisis$AMC == "NoAMC"] <- 0
mainCrisis$AMCDummy[mainCrisis$AMC == "AMCCreated"] <- 1
# Create new analysis variables
mainCrisis$AMCTypeNum[mainCrisis$AMCType == "None"] <- 0
mainCrisis$AMCTypeNum[mainCrisis$AMCType == "Decentralised"] <- 1
mainCrisis$AMCTypeNum[mainCrisis$AMCType == "Centralised"] <- 2
mainCrisis$SingleParty[mainCrisis$govfrac > 0] <- "MultiParty"
mainCrisis$SingleParty[mainCrisis$govfrac == 0] <- "SingleParty"
##### Basic Boxplots ####
qplot(AMC, execrlc, geom = "jitter", data = mainCrisis) + theme_bw()
#### Exploratory Models ####
M1 <- zelig(AMCType ~ IMFProgram, model = "mlogit.bayes", data = mainCrisis)
ImfValues <- c("N", "Y")
M1.set <- setx(M1, IMFProgram = ImfValues)
M1.sim <- sim(M1, x = M1.set)
plot.ci(M1.sim)
M2 <- zelig(AMCDummy ~ SingleParty, model = "logit.bayes", data = mainCrisis)
M3 <- zelig(AMCDummy ~ IMFProgram + execrlc, model = "logit.bayes", data = mainCrisis)
M4 <- zelig(AMCDummy ~ PeakNPLs, model = "logit.bayes", data = mainCrisis)
M5 <- zelig(as.factor(AMCType) ~ UDS, model = "mlogit.bayes", data = mainCrisis)
M6 <- zelig(AMCType ~ govfrac, model = "mlogit.bayes", data = mainCrisis)
M7 <- zelig(AMCDummy ~ GDPperCapita + IMFProgram, model = "logit.bayes", data = mainCrisis)
|
2ff72bb2ee61eead8ef5540e8ab12306512282d8
|
3b801d00b90dee6d58f4f2c68fb7b4a242a16071
|
/pre_made_scripts/ant_code/1_data_post_processing/source/3_apply_rotation_to_datfiles.R
|
969cde8b14c72def3262f5b0a032b2f8beba5ce7
|
[] |
no_license
|
connor-klopfer/animal_response_nets
|
421fe4319ad8524fd42432eab5967162dd7df032
|
31f9a561196f46a2a71224f77bf52c0deeb6e76e
|
refs/heads/master
| 2023-06-14T17:21:42.976099
| 2021-06-29T18:18:51
| 2021-06-29T18:18:51
| 327,428,275
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
3_apply_rotation_to_datfiles.R
|
####1_apply_rotation_to_datfiles.R#####
### Calls C++ functions datcorr created when compiling anttrackingUNIL/Antorient (https://github.com/laurentkeller/anttrackingUNIL)
### Takes tracking datfile and tagfile as an input and returns an oriented datfile
### Note that the tag files used as an input must have been oriented using Antorient (https://github.com/laurentkeller/anttrackingUNIL),
### had false detections removed,
### and include the frame of death / tag loss information
### The final processed tag files are provided in the subfolder original_data/tag_files and will be used in all later steps of the analysis
###Created by Nathalie Stroeymeyt
###################################
### define directories
tag_path <- paste(data_path,"original_data/tag_files",sep="/")
input_path <- paste(data_path,"intermediary_analysis_steps/datfiles_uncorrected",sep="/")
output_path <- paste(data_path,"intermediary_analysis_steps/dat_files",sep="/")
####If necessary, create output folder
if(!exists(output_path)){
dir.create(output_path,recursive = T,showWarnings = F)
}
####Navigate to input folder and list datfiles
setwd(input_path)
dat_list <- paste(input_path,list.files(pattern="\\.dat"),sep="/")
####Navigate to tag folder and list tagfiles
setwd(tag_path)
tag_list <- paste(tag_path,list.files(pattern="\\.tags"),sep="/")
for (dat_file in dat_list){
name_root <- gsub("\\.dat","",unlist(strsplit(dat_file,split="\\/"))[grepl("\\.dat",unlist(strsplit(dat_file,split="\\/")))])
tag_file <- paste(unlist(strsplit(name_root,"_"))[!grepl("Treatment",unlist(strsplit(name_root,"_")))],collapse="_")
tag_file <- tag_list[grepl(tag_file,tag_list)]
if (length(tag_file)>1){
tag_file <- tag_file[grepl(unlist(strsplit(name_root,"_"))[grepl("Treatment",unlist(strsplit(name_root,"_")))],tag_file)]
}
new_dat_file <- paste(output_path,"/",name_root,".dat",sep="")
log_file <- paste(input_path,"/",name_root,".log",sep="")
command_line <- paste(paste(executables_path,"/datcorr",sep=""),dat_file,tag_file,new_dat_file,log_file,sep=" ")
print(command_line)
system(command_line)
}
|
a4bf7b90b27962169492da290e33508c069addaf
|
8d6d6c66658ad8cd4b72d69d24b895a8cdb9a09c
|
/Using-R/Affair (Logi_Reg).R
|
8be8307d945f82c51603bca408ca38058da48676
|
[] |
no_license
|
hvgollar/Logistic_Regression
|
edac4d020a32a5651686143e064b6373bae83b38
|
03ccb84271952d26ac7f3a999f79093ed1981e1d
|
refs/heads/master
| 2022-04-11T18:04:07.724773
| 2020-04-03T08:39:37
| 2020-04-03T08:39:37
| 250,188,744
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,713
|
r
|
Affair (Logi_Reg).R
|
affair <- read.csv(file.choose()) # Choose the claimants Data set
str(affair)
summary(affair)
View(affair)
attach(affair)
table(affair$affairs) ## 0.625
#sum(is.na(affair))
#affair <- na.omit(affair) # Omitting NA values from the Data
# na.omit => will omit the rows which has atleast 1 NA value
dim(affair)
install.packages("caTools")
library(caTools)
table(affair$affairs) ## 451/601 = 0.75
split=sample.split(affair$affairs,SplitRatio = 0.75)
split
## Creating Training and Testing Setz
affairtrain=subset(affair,split==TRUE)
affairtest=subset(affair,split==FALSE)
View(affairtrain)
dim(affairtrain)
nrow(affairtrain) ## 450
nrow(affairtest) ## 151
## Logistic regression model
install.packages("AER")
library(AER)
??AER
install.packages("plyr")
library(plyr)
affairtrain$affairs[affairtrain$affairs > 0] <- 1
affairtrain$affairs[affairtrain$affairs == 0] <- 0
affairtrain$gender <- as.factor(revalue(affairtrain$gender,c("male"=1,"female"=0)))
affairtrain$children <- as.factor(revalue(affairtrain$children,c("yes"=1,"no"=0)))
## Building the logistic model
affairlogi <- glm(affairs~.,data=affairtrain,family = binomial)
summary(affairlogi)
## Making prediction on Training data set
predicttrain = predict(affairlogi,type="response")
summary(predicttrain)
#datatframe <- as.data.frame(claimants)
#datatframe
#View(datatframe)
?tapply
tapply(predicttrain,affairtrain$affairs,mean)
## "tapply" function computes the average prediction for each of the true outcomes
## 0 1
## 0.2149776 0.3512282
# Confusion matrix for threshold of 0.5
table(affairtrain$affairs,predicttrain > 0.5)
## FALSE TRUE
# 0 324 14
# 1 87 25
## Sensityvity
## 25/25+87 = 0.228
## Specificity
## 324/324+14 = 0.95
# Confusion matrix for threshold of 0.7
table(affairtrain$affairs,predicttrain > 0.7)
## FALSE TRUE
# 0 335 3
# 1 109 3
## Sensitivity
## 3/3+109 = 0.02
## Specificity
## 335/335+3 = 0.991
## By increasing the threshold value the sensitivity decreases and specificity increasing
#### ROC CURVE can help us to decide which value of yhe threshold is best
install.packages("ROCR")
library(ROCR)
?prediction
ROCRpred <- prediction(predicttrain,affairtrain$affairs)
## Performance function
ROCRpperf <- performance(ROCRpred,"tpr","fpr")
# ROC Curve => used to evaluate the betterness of the logistic model
# more area under ROC curve better is the model
# We will use ROC curve for any classification technique not only for logistic
plot(ROCRpperf,colorize=TRUE)
plot(ROCRpperf,colorize=TRUE,print.cutoffs.at=seq(0,1,by=0.1),text.adj=c(-0.2,1.7))
|
a25396986e6def6df7977e1317b6b48cc7041ea7
|
880ab4f16d1eabf34d9b728fe673a90402f5f052
|
/assets/docs/EDA-Student-Alcohol-Consumption.R
|
26b474e8608ec5b182f090a781ae07af3ad1732d
|
[
"MIT"
] |
permissive
|
TiewKH/TiewKH.github.io
|
6a08fd867142afaff794535a94e6dc9c8af3b0a1
|
a089b3120313ea6847a277618896727ebbfeabce
|
refs/heads/master
| 2023-05-03T10:37:59.911886
| 2022-01-07T06:51:14
| 2022-01-07T06:51:14
| 76,365,100
| 0
| 0
|
MIT
| 2023-04-17T18:29:59
| 2016-12-13T14:17:20
|
HTML
|
UTF-8
|
R
| false
| false
| 2,833
|
r
|
EDA-Student-Alcohol-Consumption.R
|
library(ggplot2)
library(plotly)
library(gridExtra)
library(reshape2)
library(plyr)
library(dplyr)
df <- read.csv("student-mat.csv", header = TRUE)
is.special <- function(x){
if (is.numeric(x)) !is.finite(x) else is.na(x)
}
(data.frame(sapply(df, is.special)))
boxplot1 <- ggplot(df, aes(x=Dalc, y=G1, fill=Dalc))+
geom_boxplot(aes(group = Dalc))+
theme_bw()+
xlab("Alcohol consumption")+
ylab("First period grade")+
ggtitle("First period grade") + theme(legend.position = "none")
boxplot2 <- ggplot(df, aes(x=Dalc, y=G2, fill=Dalc))+
geom_boxplot(aes(group = Dalc))+
theme_bw()+
xlab("Alcohol consumption")+
ylab("Second period grade")+
ggtitle("Second period grade") + theme(legend.position = "none")
boxplot3 <- ggplot(df, aes(x=Dalc, y=G3, fill=Dalc))+
geom_boxplot(aes(group = Dalc))+
theme_bw()+
xlab("Alcohol consumption")+
ylab("Final period grade")+
ggtitle("Final period grade") + theme(legend.position = "none")
grid.arrange(boxplot1, boxplot2, boxplot3, ncol = 3)
toPlot <- group_by(df, Dalc) %>% summarize(G1Mean = mean(G1), G2Mean = mean(G2), G3Mean = mean(G3))
toPlot.m <- melt(toPlot,id.var="Dalc")
colnames(toPlot.m)[3] <- "MeanGradeMarks"
ggplotly(ggplot(toPlot.m, aes(x=Dalc, y=MeanGradeMarks, fill=variable)) + geom_bar(position="dodge",stat="identity") + ggtitle("Workday Alcohol Consumption Levels (Dalc) against Mean Grade Marks"))
table(df$Dalc, df$G1)
ggplot(df, aes(x=Dalc, y=G3, color = Dalc))+ geom_jitter() + scale_colour_gradientn(colours=rainbow(4))
toPlot2 <- group_by(df, famrel) %>% summarize(WorkdayAlcoholConsumption = mean(Dalc), WeekendAlcoholConsumption = mean(Walc))
toPlot2.m <- melt(toPlot2, id.var = "famrel")
colnames(toPlot2.m)[3] <- "AlcoholMean"
ggplotly(ggplot(toPlot2.m, aes(x=famrel, y=AlcoholMean, color = variable)) + geom_line() + geom_point() + ggtitle("Family Relationship against Mean Alcohol Consumption"))
toPlot3 <- group_by(df, romantic) %>% summarize(WorkdayAlcocholConsumption = mean(Dalc), WeekendAlcoholConsumption = mean(Walc))
toPlot3.m <- melt(toPlot3, id.var = "romantic")
colnames(toPlot3.m)[1] <- "RelationshipStatus"
colnames(toPlot3.m)[3] <- "AlcoholMean"
toPlot3.m[1] <- apply(toPlot3.m[1], 1, function(x) {ifelse(x == 'yes', 'Couple', 'Single')})
ggplotly(ggplot(toPlot3.m, aes(x=variable, y=AlcoholMean, fill=RelationshipStatus)) + geom_bar(position="dodge",stat="identity") + ggtitle("Relationship Status against Mean Alcohol Consumption"))
toPlot4 <- group_by(df, health) %>% summarize(WorkdayAlcoholConsumption = mean(Dalc), WeekendAlcoholConsumption = mean(Walc))
toPlot4.m <- melt(toPlot4, id.var = "health")
colnames(toPlot4.m)[3] <- "AlcoholMean"
ggplotly(ggplot(toPlot4.m, aes(x=health, y=AlcoholMean, color = variable)) + geom_line() + geom_point() + ggtitle("Health against Mean Alcohol Consumption"))
|
5ae479dfcc911678bf11550cf0dcb1ae248b5ded
|
3284004194ec74cb7c6d646306a94e487d38068f
|
/data-analysis/4_compute_cand_scores.r
|
f20bd9e04e82e4b01b785047cb500ba6779c24d1
|
[] |
no_license
|
florence-nocca/mps-autonomy
|
8f3a943adab30125f4a08391da95e0459139514f
|
21a705177d37d9b361593886aef4fabcd3fe7ece
|
refs/heads/master
| 2021-01-22T22:13:43.164703
| 2017-10-03T13:16:46
| 2017-10-03T13:16:46
| 92,764,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,016
|
r
|
4_compute_cand_scores.r
|
## 4_compute_cand_scores.r transforms the corpuses into dfms and performs different models on them to compute candidates and parties similarity
## It needs to be executed again when a new score is computed to append it to cand_scores.csv
rm(list = ls(all = TRUE))
library(quanteda)
library(readtext)
library(stringi)
library(ggplot2)
library(cowplot)
## library(graphics) # For dendrogram
## library(ape) # For dendrogram
## Use exact values instead of scientific notations
options(scipen=999)
## Load corpuses
load("data/corpus.Rdata")
## load("data/net_corpus.Rdata")
## ## --- First execution only ---
## ## --- Retrieving candidates social and political characteristics ---
## require(readstata13)
## data = read.dta13("data/french_cand_data.dta")
## colnames(data) = tolower(colnames(data))
## ## Correct wrongly assigned account
## index = which(tolower(data$compte) == "lecallennec")
## data$compte[index] = "ilecallennec"
## data$compte = tolower(data$compte)
## ## Keep only candidates mps
## data = data[data$represente == 1,]
## ## Replace starting date by starting year only
## data$debutmandat = as.numeric(unlist(regmatches(as.character(data$debutmandat)
## , regexec("^[^-]+", as.character(data$debutmandat)
## ))))
## data$naissance = as.numeric(unlist(regmatches(as.character(data$naissance)
## , regexec("^[^-]+", as.character(data$naissance)
## ))))
## ## Keep only candidates from twCorpus
## to_match = data.frame(account = tolower(twCorpus$documents$name))
## cand_data = merge(x=to_match, y=data, by.x='account', by.y='compte')
## ## --- End of first execution ---
## --- Not first execution ---
## Read database on mps, making sure that the parameter na.strings is equal to c("") so that each missing value is coded as a NA
cand_data = read.csv("data/cand_scores.csv", header = TRUE, na.strings="NA")
## --- End ---
## Indicate party's account as docname
docnames(ptwCorpus) = paste(docvars(ptwCorpus, "name"))
ptwCorpus$documents$party = c("SOC","LR","PG","PCF","FN","REM","ECO","UDI","FI")
## Select parties
ptwCorpus = corpus_subset(ptwCorpus, name == "partisocialiste" | name == "lesrepublicains" | name == "enmarchefr")
## ptwCorpus = corpus_subset(ptwCorpus, name == "partisocialiste" | name == "lesrepublicains" | name == "enmarchefr" | name == "fn_officiel" | name == "franceinsoumise" )
## Add party as twCorpus' docvar
cand_party = data.frame(party = cand_data$nuance, row.names = cand_data$account)
twCorpus = corpus_subset(twCorpus, name %in% as.character(cand_data$account))
twCorpus$documents$party = as.character(cand_party[as.character(twCorpus$documents$name),])
## twCorpus = corpus_subset(twCorpus, party %in% c("SOC","REM","LR"))
## Indicate name and party as docname
docnames(twCorpus) = paste(docvars(twCorpus, "name"), docvars(twCorpus, "party"))
## Add additional stopwords
my_stopwords = as.vector(unlist(read.table("stopwords-fr.txt")))
my_stopwords = stri_trans_general(c(my_stopwords, "faire", "faut", "veux","veut","oui","non"), "Latin-ASCII")
## --- Create a document-frequency matrix ---
to_dfm = function(corpus, groups = NULL)
{
twdfm = dfm(corpus,
remove = c(stopwords("french"), stopwords("english"), my_stopwords),
tolower = TRUE,
remove_punct = TRUE,
stem = FALSE,
remove_twitter = TRUE,
remove_numbers = TRUE,
groups = groups)
}
twdfm = to_dfm(twCorpus)
ptwdfm = to_dfm(ptwCorpus)
## Stem dfms
ptwdfm = dfm_wordstem(ptwdfm, language = "french")
twdfm = dfm_wordstem(twdfm, language = "french")
## Extract feature labels and document names
head(featnames(twdfm), 30)
head(docnames(ptwdfm))
## 100 most frequent features in the dfm
top_feat = topfeatures(twdfm, n=200)
## Create a wordcloud with most frequent features
par(mfrow=c(1,2), oma = c(0, 0, 10, 0))
textplot_wordcloud(twdfm, max=100)
textplot_wordcloud(ptwdfm, max=100)
## Combine the two corpuses
docvars(ptwCorpus, "docset") = 1
docvars(twCorpus, "docset") = 2
allCorpus = ptwCorpus + twCorpus
## Transform alldfm to a dfm
alldfm = to_dfm(allCorpus)
alldfm = dfm_wordstem(alldfm, language = "french")
tokenInfo = summary(allCorpus, n = ndoc(allCorpus))
## Barplot of tokens by candidate
barplot(tokenInfo$Tokens, las = 2, ylab = "Nombre de tokens", main = "Nombre de tokens par compte", cex.main = 1.5, xaxt="n")
labs = tolower(tokenInfo$name)
abline(h=mean(tokenInfo$Tokens), col = "blue")
abline(h=median(tokenInfo$Tokens), col = "red")
axis(1, at =(1:length(labs) * 1.2 - 0.5), labels=labs, las=2)
legend('topright',legend = c("Moyenne","Médiane") , lty=1, col=c("blue", "red"), bty='n', cex=.75)
## For a barplot with sorted values: sort(tokenInfo$Tokens, decreasing = TRUE
## Histogram
hist(tokenInfo$Tokens, ylab = "Nombre de candidats", xlab = "Nombre de tokens par candidats", main = "Nombre de tokens par candidats", cex.main = 1.5, col = "gray", xlim = c(min(tokenInfo$Tokens),max(tokenInfo$Tokens)+10000), ylim = c(0,200), breaks = 20)
abline(v=mean(tokenInfo$Tokens), col = "blue")
abline(v=median(tokenInfo$Tokens), col = "red")
legend('topright',legend = c("Moyenne","Médiane") , lty=1, col=c("blue", "red"), bty='n', cex=.75)
## --- Text analysis ---
## --- Wordscores model ---
predictWordscores = function(dfm, virgin_docs, ref_scores)
{
scores = c(ref_scores, rep(NA, ndoc(virgin_docs)))
ws = textmodel_wordscores(dfm, scores)
pred = predict(ws)
return (pred)
}
## model_1 = predictWordscores(alldfm, twCorpus, ref_scores = c(3.83, 7.67, 9.64, 5.91, 1.7))
model_1 = predictWordscores(alldfm, twCorpus, ref_scores = c(3.83, 7.67, 5.91))
## Scores predicted by the model
scores = model_1@textscores$textscore_raw
std_err = model_1@textscores$textscore_raw_se
## Differentiate scores from mps and parties
parties_scores = scores[1:ndoc(ptwCorpus)]
mps_scores = scores[(ndoc(ptwCorpus)+1):length(scores)]
mps_std_err = std_err[(ndoc(ptwCorpus)+1):length(std_err)]
## Create empty data frame
## mps_scores = data.frame(account = twCorpus$documents$name, wordscores = mps_scores, stringsAsFactors = FALSE)
mps_scores = data.frame(account = twCorpus$documents$name, ws_se = mps_std_err, stringsAsFactors = FALSE)
## Remove previously computed wordscores
## cand_data = subset(cand_data, select = -wordscores)
## Merge by account names
cand_data = merge(cand_data, mps_scores, by = "account")
### Graphical representations
## Graphical parameters
## parties_colors = data.frame(colors = c("red", "blue", "yellow"), row.names = ptwCorpus$documents$name)
## parties_colors = data.frame(colors = c("red", "blue", "black","yellow","indianred"), row.names = ptwCorpus$documents$name)
parties_colors = data.frame(colors = c("red", "blue","yellow"), row.names = ptwCorpus$documents$name)
## cand_colors = data.frame(colors = c("blue", "red", rep("gray",4), "yellow", rep("gray", 6)), row.names = unique(cand_data$nuance))
cand_colors = data.frame(colors = c("red", "yellow", "blue"), row.names = unique(twCorpus$documents$party))
colors = as.character(cand_colors[as.character(twCorpus$documents$party),])
## Resolution options for plot
width = 1300 * 0.7
height = 768 * 0.7
dpi = 200
ratio = dpi / 72
### Plot
## y-axis
len = length(mps_scores$wordscores)
## Labels
labels = twCorpus$documents$name
## Legend
pnames = c("SOC","LR","REM")
## Save plot as png
## png("Graphs/Plot_wordscores_french_cand.png", width=width * ratio, height=height * ratio, res=dpi)
pdf("Graphs/Plot_wordscores_french_cand.pdf", width=width * ratio, height=height * ratio, res=dpi)
x = mps_scores$wordscores
plot(x=x, y=1:len, xlim=c(min(parties_scores,x), max(parties_scores,x)), xlab="Scores sur une échelle gauche-droite", ylab="Index des comptes", col=colors, main="", cex.main=1.5, pch=1, cex=0)
text(x=x, y=1:length(labels), labels=labels, cex=0.5, col=colors)
abline(v=parties_scores, col=as.character(parties_colors[ptwCorpus$documents$name,]))
legend(x=6.4, y=30, pnames, fill=as.character(parties_colors[ptwCorpus$documents$name,]), cex=0.8)
dev.off()
## Histograms by party
## Save histograms as pdf
pdf("Graphs/Hist_wordscores_french_cand.pdf")
par(mfrow=c(3,1), oma = c(0, 0, 0, 0))
lapply(1:length(pnames), function(n) {
x = cand_data$wordscores[cand_data$wordscores>0][cand_data$nuance == pnames[n]]
hist(x = x, breaks=seq(4.5,7,by=0.1), xlim=c(4.5,7),main=paste0(pnames[n]," (n = ",length(x),")"), xlab="", ylab="")
abline(v=parties_scores[n], col=as.character(as.data.frame(parties_colors)$colors[n]))
})
dev.off()
## NOMINATE and wordscores
## Add nominatepr
## require(readstata13)
## database = read.dta13("data/DonneesRFS.dta")
## colnames(database) = tolower(colnames(database))
## to_match = data.frame(account = tolower(cand_data$account))
## col_matched = data.frame(account = database$compte, nom2d1 = database$nominate_1d, nom2d2 = database$nominate_2d)
## cand_data = merge(x=cand_data, y=col_matched, by='account')
## ## Write changes to cand_scores file
## write.csv(cand_data, "data/cand_scores.csv", row.names = FALSE)
## ## Add membres of parliamentary group "Constructifs" to database
## constructifs = data.frame(account = cand_data$account, constructif = rep(0, length(cand_data$account)))
## to_add = c("marinebrenier","antoineherth","vincentledoux59","morelpierre","franckriester","solere92","warsmann")
## constructifs[constructifs$account %in% to_add,]$constructif = 1
## cand_data = merge(cand_data, constructifs, by = "account")
## ## Write changes to cand_scores file
## write.csv(cand_data, "data/cand_scores.csv", row.names = FALSE)
## Plot NOMINATE and wordscores
to_be_removed = which(is.na(cand_data$nom2d1))
data = cand_data[-to_be_removed,]
x = data$wordscores
y = data$nom2d1
data$nuance = as.character(data$nuance)
data[data$frondeur == 1,]$nuance = gsub("SOC","Frondeur",data[data$frondeur == 1,]$nuance)
data[data$constructif == 1,]$nuance = gsub("LR","Constructif",data[data$constructif == 1,]$nuance)
z = as.factor(data$nuance)
Palette = c("cyan","black","blue","gold1","red")
p = ggplot(data, aes(x, y, colour=factor(z), label = data$account)) +
geom_point(## size=3, shape=19
) + labs(x = "Wordscores", y = "Nominate (dimension 1)") + scale_colour_manual(values=Palette) + labs(colour='Parti') + geom_text(x = 6.5, y = 1, label = "LR", colour = "blue", size = 3, vjust = 1) + geom_text(x = min(x), y = min(y), label = "SOC", colour = "red", size = 3) + geom_text(x = 5.82, y = 0, label = "REM", colour = "gold", size = 3, vjust = 2) + theme_classic() + theme(legend.key = element_rect(colour = '#bdbdbd', size = 0.6) )
## To add labels
## + geom_text(aes(label = data$account), hjust = 0, vjust = 0, size = 5)
save_plot("Graphs/Plot_NOM_wordscores_french_cand.pdf", p, base_height = 6, base_width = 7)
## --- Wordfish model ---
predictWordfish = function(twCorpus, ptwCorpus, cand_party, parties_to_keep)
{
subtwCorpus = corpus_subset(twCorpus, party %in% cand_party)
subptwCorpus = corpus_subset(ptwCorpus, name %in% parties_to_keep)
allCorpus = subtwCorpus + subptwCorpus
alldfm = to_dfm(allCorpus)
alldfm = dfm_wordstem(alldfm, language = "french")
len_corpus = length(subtwCorpus$documents$name)
wf = textmodel_wordfish(alldfm, dir = c(len_corpus + 1, len_corpus + 2))
return (wf)
}
wfm = predictWordfish(twCorpus, ptwCorpus, c("REM","SOC"), c("enmarchefr", "partisocialiste"))
textplot_scale1d(wfm)
## ------ Machine Learning ------
## --- Compute cosine similarity ---
## simil = as.matrix(textstat_simil(dfm_weight(alldfm, "relFreq"), c("partisocialiste", "lesrepublicains","enmarchefr","franceinsoumise","fn_officiel","udi_off","eelv")), margin = "documents", method = "cosine")
simil = as.matrix(textstat_simil(dfm_weight(alldfm, "relFreq"), c("partisocialiste", "lesrepublicains","enmarchefr")), margin = "documents", method = "cosine")
## Set minimum value to 0
simil = pmax(simil, 0)
simil = as.data.frame(simil)[-c(1,2,3),]
simil = as.data.frame(simil)
## colnames(simil) = gsub("^","net_",colnames(simil))
simil$account = twCorpus$documents$name
cand_data = merge(cand_data, simil, by = "account")
## cand_data = subset(cand_data, select = -(73:76))
## --- k-means ---
simil = subset(simil, select = -account)
k = 3
km_simil = kmeans(simil, k, algorithm = "Hartigan-Wong", nstart=100, iter.max = 100)
## Find each party's class number looking at centers
km_simil$centers
classes = lapply(1:k, function(n){
unique(twCorpus$documents[km_simil$cluster == n,]$name)
})
## Look at class composition
n = 1
table(cand_data[cand_data$account %in% classes[[n]],]$nuance)
## k = 3 with tweets' content does not find the 3 parties, but k = 4 does, with a fourth "unclassable" class
## Create empty data frame
km_on_simil = data.frame(account = character(), net_km_simil = numeric(), stringsAsFactors = FALSE)
## Append data frame with accounts and their corresponding class
for(i in 1:k){
km_on_simil = rbind(km_on_simil, data.frame(account = tolower(classes[[i]]), net_km_simil = rep(i, length(classes[[i]]))))
}
cand_data = merge(cand_data, km_on_simil, by = "account")
## km_on_simil$km_simil = pred_party
parties_classes = data.frame(party = c("SOC","LR","REM"), row.names = c(1, 2,3))
pred_party = as.character(parties_classes[cand_data$net_km_simil,])
## cand_data$km_simil = (pred_party != cand_data$nuance)
cand_data$net_km_pred = pred_party
cand_data = subset(cand_data, select = -net_km_simil)
## --- Naive Bayesian ---
## First run: party document as training data
## trainingset = dfm_subset(alldfm, docset == 1)
## predictionset = dfm_subset(alldfm, docset == 2)
## trainingclass = factor(allCorpus$documents$party[1:3])
## Other runs: parties and "alwaysloyalists" as training data
trainingset = dfm_subset(alldfm, name %in% c(always_loyalists,ptwCorpus$documents$name))
predictionset = dfm_subset(alldfm, docset == 2)
trainingclass = factor(allCorpus$documents[allCorpus$documents$name %in% c(always_loyalists,ptwCorpus$documents$name),]$party)
model = textmodel_NB(trainingset, trainingclass)
## Party prediction
success = unlist(lapply(1:length(twCorpus$documents$name), function(n){
prediction = predict(model, newdata = predictionset[n])
pred_party = prediction$nb.predicted
true_party = twCorpus$documents$party[n]
## return(pred_party == true_party)
return(pred_party)
})
)
net_naive_pred_bis = success
## success_rate = sum(success) / length(twCorpus$documents$text)
## success rate of 79\% (92 with networks) on first run
## 85\% (89 for networks) with always_loyalists as training data
## Party predicted
naive_lr = twCorpus$documents$name[which(net_naive_pred_bis == "LR")]
naive_soc = twCorpus$documents$name[which(net_naive_pred_bis == "SOC")]
naive_rem = twCorpus$documents$name[which(net_naive_pred_bis == "REM")]
## Merge by account names
naive_party_pred = data.frame(account = naive_lr, net_naive_pred_bis = "LR")
naive_party_pred = rbind(naive_party_pred, data.frame(account = naive_soc, net_naive_pred_bis = "SOC"))
naive_party_pred = rbind(naive_party_pred, data.frame(account = naive_rem, net_naive_pred_bis = "REM"))
cand_data = merge(cand_data, naive_party_pred, by = "account")
## Remove previously computed naive
## cand_data = subset(cand_data, select = -naive_diss)
## Merge by account names
## naive = data.frame(account = naive_dissidents, naive_diss = TRUE)
## naive = rbind(naive, data.frame(account = naive_loyalists, naive_diss = FALSE))
## cand_data = merge(cand_data, naive, by = "account")
## --- Wordshoal ---
wordshoalfit = textmodel_wordshoal(alldfm, dir = c(1,2),
groups = docvars(allCorpus, "party"),
authors = docvars(allCorpus, "name"))
fitdf = merge(as.data.frame(summary(wordshoalfit)),
docvars(allCorpus),
by.x="row.names", by.y="name")
fitdf = subset(fitdf,duplicated(party))
aggregate(theta ~ party, data = fitdf, mean)
par(mfrow=c(3,1), oma = c(0, 0, 0, 0))
lapply(ptwCorpus$documents$party, function(party){
hist(as.numeric(fitdf[fitdf$party == party,]$theta), main = party)
}
)
## --- SVM ---
library(e1071)
model = svm(trainingset,trainingclass, kernel = "sigmoid")
pred = predict(model, predictionset)
pred = as.character(pred)
table(pred, twCorpus$documents$party)
## First run: success rate of 78\% (84 on networks))
# Following runs: 85\% (59 on networks !)
success = unlist(lapply(1:length(twCorpus$documents$name), function(n){
pred_party = pred[n]
true_party = twCorpus$documents$party[n]
return(pred_party == true_party)
})
)
success_rate = sum(success) / length(twCorpus$documents$text)
## Party predicted
svm_lr = twCorpus$documents$name[which(pred == "LR")]
svm_soc = twCorpus$documents$name[which(pred == "SOC")]
svm_rem = twCorpus$documents$name[which(pred == "REM")]
## Merge by account names
svm_party_pred = data.frame(account = svm_lr, net_svm_pred = "LR")
svm_party_pred = rbind(svm_party_pred, data.frame(account = svm_soc, net_svm_pred = "SOC"))
svm_party_pred = rbind(svm_party_pred, data.frame(account = svm_rem, net_svm_pred = "REM"))
cand_data = merge(cand_data, svm_party_pred, by = "account")
## Rerun models with loyalists as training data
svm_loyalists = cand_data[cand_data$nuance == cand_data$net_svm_pred,]$account
naive_loyalists = cand_data[cand_data$nuance == cand_data$net_naive_pred,]$account
## km_pred = gsub("NA",as.character("NA"),cand_data$km_pred)
km_loyalists = cand_data[cand_data$nuance == cand_data$net_km_pred,]$account
always_loyalists = Reduce(intersect, list(svm_loyalists,naive_loyalists,km_loyalists))
## mps_predicted = simil[!(simil$account %in% c(always_loyalists,ptwCorpus$documents$name)),]$account
## --- KNN ---
library(class)
## KNN on cosine similarity does not work well (62\%)
## train_seq = which(simil$account %in% always_loyalists)
## test_seq = 4:length(simil$account)
## train = simil[c(1:3,train_seq),1:3]
## labels = allCorpus$documents[c(1:3,train_seq),]$party
## test = simil[test_seq,1:3]
## pred = knn(train,test,labels,3)
## KNN on dfm is better (79\%, but still less than other models with always_loyalists training data) (74 with networks)
pred = knn(trainingset,predictionset,trainingclass,3)
success = unlist(lapply(1:length(twCorpus$documents$name), function(n){
pred_party = pred[n]
true_party = twCorpus$documents$party[n]
## return(pred_party == true_party)
return(pred_party)
})
)
## success_rate = sum(success) / length(twCorpus$documents$text)
knn_lr = twCorpus$documents$name[which(pred == "LR")]
knn_soc = twCorpus$documents$name[which(pred == "SOC")]
knn_rem = twCorpus$documents$name[which(pred == "REM")]
## Merge by account names
knn_party_pred = data.frame(account = knn_lr, net_knn_pred_bis = "LR")
knn_party_pred = rbind(knn_party_pred, data.frame(account = knn_soc, net_knn_pred_bis = "SOC"))
knn_party_pred = rbind(knn_party_pred, data.frame(account = knn_rem, net_knn_pred_bis = "REM"))
cand_data = merge(cand_data, knn_party_pred, by = "account")
## Write changes to cand_scores file
write.csv(cand_data, "data/cand_scores.csv", row.names = FALSE)
## Detect systematic dissidents
svm_dissidents = cand_data[cand_data$nuance != cand_data$svm_pred,]$account
naive_dissidents = cand_data[cand_data$nuance != cand_data$naive_pred,]$account
## cand_data$km_pred = gsub("NA",as.character("NA"),cand_data$km_pred)
km_dissidents = cand_data[as.character(cand_data$nuance) != cand_data$km_pred,]$account
always_dissidents = Reduce(intersect, list(svm_dissidents,naive_dissidents,km_dissidents))
both_dissidents = Reduce(intersect, list(net_always_dissidents,always_dissidents))
svm_bis_dissidents = cand_data[as.character(cand_data$nuance) != as.character(cand_data$svm_pred_bis),]$account
naive_bis_dissidents = cand_data[as.character(cand_data$nuance) != as.character(cand_data$naive_pred_bis),]$account
knn_bis_dissidents = cand_data[as.character(cand_data$nuance) != as.character(cand_data$knn_pred_bis),]$account
|
c1e1c786f7aec62ea28512de7fc6b404329e93ed
|
8388995e45cde141163d52c461d1aaeeb01aded4
|
/app.R
|
997659c5ed77e73abe53f2576446b489d8c082c0
|
[] |
no_license
|
kearney-stats/kearney-stats.github.io
|
e3db3be44a7cfad97dd5e003e8233e141252fc9f
|
8bbae84a52dc3f816aae4347caa7afc4e50d2f61
|
refs/heads/main
| 2023-03-01T14:11:04.697215
| 2021-01-21T15:08:28
| 2021-01-21T15:08:28
| 331,662,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,455
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(locfit)
library(knitr)
library(DT)
library(tidyverse)
library(dplyr)
data(penny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Sample Penny Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
radioButtons("num_pennies",
h3("Number of pennnies to sample:"),
choices = list("5" = 5,
"10" = 10,
"25" = 25,
"50" = 50)
)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("pennies", tableOutput("pennyData"))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
selected_years <- reactive({
penny %>%
sample_n(size = input$num_pennies) %>%
select(year) %>%
arrange(year)
})
output$pennyData <- renderTable({
selected_years()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
a222a0aca8fefa593c9ef6e5835c779eb96d0ee4
|
388934fe38dffcff8770d29633cf4725dc80890b
|
/Primer_sesion.R
|
18922cd1f10e85f0705bbec597190f4a853bdaf4
|
[] |
no_license
|
GerarLDz/RInicio
|
be0236c518794dae799ec69fe5a49af49ef6e10c
|
8d07891c041cbcad994423e6e016a1bf884c5c0f
|
refs/heads/master
| 2022-10-23T09:12:43.409791
| 2020-06-17T02:39:28
| 2020-06-17T02:39:28
| 272,860,941
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 13,531
|
r
|
Primer_sesion.R
|
variable.char <- 'Hola R'
variable_num <- 3.1416
variable.int <- 149213L
variable.logical <- TRUE
typeof(variable_num)
typeof(variable.char)
class(variable_num)
print(class(variable.char))
print(class(variable.num))
print(class(variable.int))
print(class(variable.logical))
x.1 <- 1
print((x.1*0.582)^2)
x.2 <- 2
print((x.2*0.582)^2)
vector.part1 <- c(1,2,3,4,5)
print(vector.part1)
vector.part2 <- c(7,8,9,10)
print(vector.part2)
vector.complete <- c(vector.part1, 6)
print(vector.complete)
vector.complete <- c(vector.part1, 6, vector.part2)
print(vector.complete)
##############################################
# Para crear un vector con secuencia
vector.seq.byone <- seq(from=1, to=10, by=1)
print(vector.seq.byone)
vector.seq.bytwo <- seq(from=1, to = 10, by=2)
print(vector.seq.bytwo)
vector.seq.negative <- seq(from = 10, to = 1, by=-1)
print(vector.seq.negative)
##############################################
# Para crear un vector con valores repetidos
vector.rep <- rep(x=1, times = 3)
print(vector.rep)
vector.rep.each <- rep(x = c(1,2), each = 3)
print(vector.rep.each)
vector.rep.complete <- rep(x=c(1,2), each = 3, times = 2)
print(vector.rep.complete)
#################################################
# Para ordenar un vector
vector.sorted.increasing <- sort(vector.rep.complete)
print(vector.sorted.increasing)
vector.sorted.decreasing <- sort(vector.rep.complete, decreasing = TRUE)
print(vector.sorted)
# Para ver el tamaño de un vector
length(vector.sorted.decreasing)
length(vector.sorted.decreasing)/2
# Para extraer el primer elemento de un vector
vector.multiply.four[1]
# Para extrar el primer y segundo elemento
vector.multiply.four[1:2]
# Para extraer el primer y tercer elemento
vector.multiply.four[c(1,3)]
# Para extraer el último elemento del vector
loc.last <- length(vector.multiply.four)
print(loc.last)
vector.multiply.four[loc.last]
# Para extrar el penúltimo elemento del vector
vector.multiply.four[loc.last-1]
# Para extrar todos los elementos menos el primero
vector.multiply.four[-1]
# Para extraer todos los elementos menos el último
vector.multiply.four[-loc.last]
# Para extraer todos los elementos menos el primero y el último
vector.multiply.four[-c(1, loc.last)]
# Para sustituir el valor de un elemento, por ejemplo el primero
vector.multiply.four[1] <- 100
print(vector.multiply.four)
# Para crear un dataframe
df.example <- data.frame(clave=c(1,2,3),grado=c(4,4,5),sexo=c('F','M','M'))
############ RETO 1#########
mi.vector<-seq(from=0, to=500,by=10)
print(mi.vector)
length(mi.vector)
mi.vector[21]<- -1
mi.vector.transformado<- 0.85*(mi.vector) + 10
print(mi.vector.transformado)
sort(mi.vector.transformado,decreasing = TRUE)
#Utilizaremos el dataset iris
iris
#Para ver el número de columnas de un dataframe
ncol(iris)
#Para ver el número de renglones
nrow(iris)
#Para ver el número de renglones y columnas
dim(iris)
#Para ver el nombre de las columnas utilizamos names
names(iris)
#Primer renglon, primer columna
iris[1,1]
#Tercer renglón, primer columna
iris[3,1]
#Tercer renglón, segunda columna
iris[3,2]
#Para traer renglones completos de un data frame
iris[1,]
#Los tres primeros renglones
iris[10:35,]
# Todos los renglones menos el primero
iris[-1,]
# La primer columna por el número de columna que es
iris[,1]
# La primer columna por el nombre de columna que es
iris[,'Sepal.Length']
# La primer columna por el nombre de columna que es
iris$Sepal.Length
# Las primeras dos columnas
iris[,1:2]
# Todas las columnas menos la primera
iris[,-1]
# Primer renglón, dos primeras columnas
iris[1, 1:2]
iris[1, c('Sepal.Length', 'Sepal.Width')]
# Primeros tres renglones, dos últimas columnas
iris[1:3, 2:3]
# Primeros tres renglones, primer y última columna
iris[1:3, c(1,3)]
iris[1:3, c('Sepal.Length', 'Petal.Length')]
# Para agreger un nuevo renglón utilizamos rbind
iris.parte2 <- data.frame(Sepal.Length = c(6,7),
Sepal.Width = c(2.5,3.5),
Petal.Length = c(3,3),
Petal.Width = c(0.01,2.08),
Species = c('virginica','versicolor'))
iris
iris.parte2
df.iris <- rbind(iris, iris.parte2) # Esta función reconoce automaticamente los frames y pega las tablas
df.iris
# Para agregar una columna
df.example.newcolumn <- data.frame(calificacion = c(9,6,10))
df.example.newcolumn
df.example <- cbind(df.example, df.example.newcolumn)
df.example
# Para transformar una columna
df.example$calificacion <- df.example$calificacion + 0.5
df.example
# Para transformar una columna y agregar la transformación como una nueva
df.example$nueva_col <- (df.example$calificacion + 8)/2
df.example
# Para eliminar una columna
df.example$grado <- NULL
df.example
# Para ver los primeros 8 elementos del dataframe
head(iris, 8)
# Para ver los ultimos 6 elementos del dataframe
tail(iris,10)
# Ver los distintos elementos de una columna
unique(iris$Species)
# Cambiar los nombres de las columnas
names(iris)
names(iris) <- c("Longitud.Sepalo","Ancho.Sepalo","Longitud.Petalo","Ancho.Petalo","Especies")
head(iris)
#Podemos compara los valores de las columnas de un dataframe
iris$Sepal.Width > 2
#Podemos usar la comparación para filtrar un dataframe
iris[iris$Sepal.Width > 2,c(1,2)]
iris[iris$Sepal.Width > mean(iris$Sepal.Width),]
#Para ver la estructura de un dataframe
str(iris)
#Para ver los estadisticos básicos de las columnas de un dataframe
summary(iris)
############## RETO 3 #############
max(mtcars$cyl)
min(mtcars$qsec)
mean.cyl<-mean(mtcars$cyl)
mean.qsec<-mean(mtcars$qsec)
mtcars[mtcars$cyl>mean.cyl,]
mtcars.filtrados<-mtcars[mtcars$cyl>mean.cyl & mtcars$qsec>mean.qsec,]
## Para ver la ruta dónde estamos trabajando
getwd()
#Para bajar dos niveles de directorio abajo
setwd()
#Para leer un archivo que tenemos local
dataframe.traffic<-read.csv('Metro_Interstate_Traffic_Volume.csv')
########### Declarar Funciones ############
#nombre.funcion <- function(argumento.1, argumento.2,...,argumento.n){
# ...operaciones entre argumentos...
# ...guardar resultado a mostrar en una variable (var.resultado)...
# return(var.resultado)
#}
saludo <- function(nombre){
mensaje <- paste('Hola', nombre)
return(mensaje)
}
mi.promedio <- function(vector){
promedio<- sum(vector)/length(vector)
return (promedio)
}
# Declaramos una función
operacion.personalizada <- function(a,b,c){
paso.1 <- (a+b)*4 + 2*a
paso.2 <- paso.1**2 + c/2
paso.3 <- paso.2**3
return(paso.3)
}
# Llamada a la función
operacion.personalizada(1,2,3)
operacion.personalizada(7,8,9)
############# Ejercicio 07 ##############
library(dplyr)
# Vemos la estructura de iris
str(iris)
## La notación %>% indica consecución, es decir se puede seguir sobre la misma consulta o comando ejecutado
## En los ejemplos primero se toma iris y despues sobre iris se hace el filter, podríamos hacer un segundo filtro
##sobre el primer filtro
#Nos quedamos con los renglones que tengan Sepal.Length mayor o igual a
iris %>% filter(Sepal.Length>=6)
# Para una doble condición
iris %>% filter((Sepal.Length>=6) & (Petal.Length>=4.5))
## Seleccionamos todas las columnas que empiecen con S
iris %>%
select(-Species)%>% head(5)
## Seleccionamos las columnas que empiecen con S
iris %>%
select(starts_with('S')) %>%
head(5)
iris %>%
arrange(Sepal.Length) %>%
head(5)
# Ordenamos descendentemente por la columna Sepal.Length y seleccionamos primeros 5 renglones
iris %>%
arrange(desc(Sepal.Length)) %>%
head(5)
#Para cambiar el nombre de alguna columna
iris %>%
rename(Especies = Species) %>%
head(5)
#Para agregar o transformar columnas, utilizamos mutate.
iris %>%
mutate(Mult.Width = Sepal.Width*Petal.Width) %>%
head(5)
iris %>%
group_by(Species) %>%
count()
iris %>%
group_by(Species) %>%
summarise(Mean.Sepal.Length = mean(Sepal.Length),
Median.Petal.Length = median(Sepal.Length),
Max.Petal.Width = max(Petal.Width),
Min.Petal.Length = min(Petal.Length))
####### Reto 7 ##############
#1. Haz un llamado a la libreria dplyr
#2. Lee el archivo Metro_Interstate_Traffic_Volume.csv y guardalo en df.traffic
#3. Ve la estructura del dataframe y los tipos de dato de cada columna
#4. Calcula el promedio de la columna traffic_volume y guardala en mean.traffic
#5. Selecciona solo las columnas weather_main y traffic_volume
#6. Cambia de nombre las columnas: weather_main a clima y traffic_volume a trafico
#7. Filtra a las observaciones donde la columna trafico sea mayor o igual a mean.traffic
#8. Guarda el dataframe filtrado como df.traffic.filter
#9. ¿Cuántos renglones y columnas tiene df.traffic.filter?
#10. Con df.traffic.filter, agrupa por clima y saca el min de trafico y max de trafico, guardalo en df.traffic.grouped
#11. Agrega una columna a df.traffic.grouped que sea la diferencia entre max y min del grupo
library(dplyr)
df.traffic<-read.csv('Metro_Interstate_Traffic_Volume.csv')
head(df.traffic)
str(df.traffic)
mean.traffic<-mean(df.traffic$traffic_volume)
df.traffic %>% select(weather_main,traffic_volume)
df.traffic %>% rename(clima = weather_main,trafico = traffic_volume) %>% head(5)
df.traffic %>% rename(trafico = traffic_volume) %>% head(5)
new_trafic<-df.traffic %>% rename(clima = weather_main,trafico = traffic_volume)
new_trafic %>% filter(trafico>=mean.traffic)
df.traffic.filter <- new_trafic %>% filter(trafico>=mean.traffic)
str(df.traffic.filter)
df.traffic.grouped<- df.traffic.filter %>% group_by(clima) %>% summarise(
min_trafic= min(trafico),
max_trafico= max(trafico))
df.traffic.grouped %>% mutate(diferencia = max_trafico - min_trafico) %>%head(5)
iris %>% head(5)
iris %>% group_by(Species) %>% summarise(m_sepal=mean(Sepal.Length),m_width=mean(Sepal.Width))
################# SESION 3 ###############################
A<- 8
B<- 17
C<- 10
D<- 10
ifelse(A>B | C==D, print("TRUE"), print("false"))
####### for
for(i in 1:10){
print(i)
}
#Recorre los valores contenidos en el vector
for(n in c(2,5,10,20,50)){
print(n)
}
arrayString <- c("Loops.","ForLoop","WhileLoop","DoWhileLoop","WhileLoop")
for (n in arrayString) {
print(n)
}
head(mtcars)
mtcars <- mtcars
for(i in 1:100) {
print(mtcars$`nombre carro`[i])
}
val_mayor<-function(vect){
mm<-max(vect)
return (mm)
}
airquality
str(airquality)
#Cuantas columnas, cuantos renglones y que tipo son
#Ver resumen de los datos estadisticos
head (airquality)
summary(airquality)
k<-0;q<-0;p<-0;
for (i in 1:length(airquality$Wind)){
if(airquality$Wind[i]>7){
k<-k+1
}
if(airquality$Wind[i]<4){
q<-q+1
}
if(airquality$Wind[i]>4 &airquality$Wind[i]<7){
p<-p+1
}
}
k<-0;q<-0;p<-0;
for(i in airquality$Wind){
if(i<4){
q<-q+1;
}
else if(i>7){
k<-k+1;
}
else{
p<-p+1;
}
}
######### while ##########
while (test_expression) {
statement
}
##########################
precio <- 20
while(precio <= 2500){
print(precio)
precio <- precio + 5
}
##########################
v <- c("Hello","while loop")
cnt <- 2
while (cnt < 7) {
print(v)
cnt = cnt + 1
}
##############
x <- 0
while(x < 5){
x <- x+1;
if (x == 3)
next;
print(x);
}
###############
#While sobre la columna wind de airquality e imprimir todos mientras el
#elemento sea mayor a 4
i<-1
while(airquality$Wind[i]>4){
print(airquality$Wind[i])
i<-i+1;
}
print(i)
print(airquality$Wind[i])
######### Suceción de Fibonacci #####
f0<-0
f1<-1
fp<-c(0,1)
i=1
while(i==10){
# fp[i]<- i
i<-i+1
}
#####################
rm(list=ls()) ############# limpiar workspace
remainder<- function(num,divisor=2){
remainder_res<-num%%divisor
return(remainder_res)
}
remainder(11,4)
# Función que reciba una función que exista en R (mean, min, max) y un vector
#aplicar la función de R en el vector
calc<- function(vect,fn){
a<-fn(vect)
return(a)
}
calc(c(18,56,3,78,24,94),max)
######### Ejemplo 3 - sesion 2 ###########
### los tres puntos nos indican que los argumentos de la funcion pueden ser cuales sean y se van a almacenar
## puntitos se llaman elipsis
telegram<- function(...){
paste("START",...,"STOP")
}
MAD_LIBS <- function(...){
#DESEMPAQUETAR LOS ARGUMENTOS
args<- list(...)
place<- args[[1]]
adjective <- args[[2]]
noun <- args[[3]]
paste("New",)
}
######### RETO ###########
### True o False: puedes tener ... y un argumento llamado clima en una función
ret<-function(...,clima=35){
args<- list(...)
place<- args[[1]]
print(paste(args,clima))
}
ret('Clima:',40)
hijos<- function(nhijos,...){
arg<-list(...)
nom_hijos<-arg[1]
nom_hijos1<-arg[2]
print(paste("Usted tiene",nhijos,"hijos llamados",nom_hijos,"y",nom_hijos1))
return(arg)
}
b<-hijos(2,"Juan","Beto")
|
17b30daf53b303c525924a95bc97bc454cdef0af
|
d4bee912d31490aaa7a35eb2afa5bbab22305a6d
|
/man/theme_cb.Rd
|
0d74f980dfde43e2fe8039c56aa7ae28aa5bbe51
|
[] |
no_license
|
cole-brokamp/CB
|
8fec717343d4a67516514e06a9d8cfa88376a4c1
|
7ea91855c70c9586ee9546ecbdb10b044f3ef100
|
refs/heads/master
| 2022-11-06T08:09:11.511057
| 2019-02-26T22:29:04
| 2019-02-26T22:29:04
| 39,470,699
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
theme_cb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_cb.R
\name{theme_cb}
\alias{theme_cb}
\title{Custom CB ggplot2 theme}
\usage{
theme_cb(base_size = 11, ...)
}
\arguments{
\item{base_size}{base font size}
\item{...}{other arguments passed to \code{ggplot2::theme_*()}}
}
\description{
Custom CB ggplot2 theme
}
\examples{
\dontrun{
library(ggplot2)
ggplot(mtcars, aes(wt, mpg)) +
}
}
|
cb6ce2fd9c7692e181066a4bd6d90db41b6e5798
|
0cd4ff55a44c27ffd2ec1a3a9cdec8e3d0a4e3f9
|
/lesson5/Main.R
|
59ecdc6ac71099e3a724e78a0f6c79ae0f9c7c87
|
[] |
no_license
|
agex5/GeoScripting
|
572d54572d5a359fcd9077824de5ffdc68446377
|
42b23d5c5371a4edac211fbab8cffd4264488596
|
refs/heads/master
| 2021-01-11T23:40:57.301225
| 2017-02-02T18:06:40
| 2017-02-02T18:06:40
| 78,621,847
| 0
| 5
| null | 2017-01-17T19:02:04
| 2017-01-11T09:09:18
|
R
|
UTF-8
|
R
| false
| false
| 1,161
|
r
|
Main.R
|
# TScriptLadies Nadine Drigo and Amy Gex
# 13 January 2017
#import libraries
library(raster)
library(rgdal)
#Source to run fuctions
source('R/NDVIFun.R')
#
#import datasets, both urls are defined as variables to be used as arguments in the following function
#if needed to change urls, make sure you change the last number to 1. not 0
A1990 = 'https://www.dropbox.com/s/akb9oyye3ee92h3/LT51980241990098-SC20150107121947.tar.gz?dl=1'
B2014 = 'https://www.dropbox.com/s/i1ylsft80ox6a32/LC81970242014109-SC20141230042441.tar.gz?dl=1'
#download datasets using defined variables above and the string to be used to create file names based on year
down_data(A1990,'1990')
down_data(B2014,'2014')
#Create stacks for both images
lt5 = list_data('LT5*.tif')
lc8 = list_data('LC8*.tif')
#Mask out clouds (NA) for both images
masked5 = maskCloudsNA(lt5)
masked8 = maskCloudsNA(lc8)
#calculate the NDVI
ndvi5 = overlay(masked5[[5]], masked5[[6]],fun=ndvi)
ndvi8 = overlay(masked8[[4]], masked8[[5]],fun=ndvi)
#Adjust the extent of the two images to be the same and calculate the difference of NDVI between the years
changeNDVI = change(ndvi5,ndvi8)
plot(changeNDVI)
|
b902c32203236550858456c43d38c1effc650b60
|
6e38b67c8f57b114f11fde7ad1192084d1b37ad0
|
/MiscLectureMaterials/ml_lecture_r_demo_1.R
|
aab1154c31dc48f9fe2c6ff62da6a55a5ebb8356
|
[] |
no_license
|
dcolley99/public-repo
|
0a6c0a03d6b171ad545fd217b371bd64bfba3813
|
5cc6cc57dfc1613945fa3b5861e1c2d40efb54f8
|
refs/heads/master
| 2023-01-07T15:02:25.713989
| 2023-01-03T10:16:37
| 2023-01-03T10:16:37
| 155,693,466
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,811
|
r
|
ml_lecture_r_demo_1.R
|
# Pre-requisite: Install the following packages using the Packages menu in R.
# ODBC (provides database connectivity)
# nnet (neural network functions)
# arules (associative rule functions)
# e1071 (mixed ML functions)
# plotly (graphing)
library(odbc)
library(plotly)
# Set up the connection to the CottageIndustries database
connString = "Driver={Sql Server Native Client 11.0};
Server=localhost;
Initial Catalog=CottageIndustries;
UID=DemoUser;
PWD=LetMeInPlease!"
conn <- odbcDriverConnect(connString)
# Let's create a query to fetch the data we're interested in.
# We will fetch, for each sale, the duration of the associated website visit.
# Is there a correlation between the duration and the probability of a sale?
query = "SELECT v.IPAddress, v.LocationNearestCity, v.ReferrerID, v.VisitDurationS,
ISNULL(p.ProductName, 'No sale') [Product], ISNULL(i.Quantity, 0) [QuantityBought]
FROM dbo.Visitor v
LEFT JOIN dbo.SaleHeader h ON v.VisitorID = h.VisitorID
LEFT JOIN dbo.SaleLineItem i ON h.SaleID = i.SaleID
LEFT JOIN dbo.Product p ON i.ProductID = p.ProductID"
importedData <- sqlQuery(conn, query)
# see a summary of the data
summary(importedData)
# list the data itself
importedData
# now we try to answer the question, given visit duration, how likely is it that
# a sale will result?
# we need two items here - the duration, and the fact (or otherwise) of a sale.
# we need to reduce our dataset, and also codify one of the columns.
# this is because (generally) ML is designed for ordinal data, not nominal data.
# first, slice the two interesting columns out of our data
dataSlice <- importedData[c("VisitDurationS","Product")]
dataSlice
# next, we need to create a code that means 'sale or no sale'
# at this point we are not interested in WHAT was bought, just the fact of it.
# so let's replace every value in col 2 (Product) with 0 if 'no sale', 1 otherwise.
# so 1 indicates an item was sold and 0 indicates there was no sale.
# this is called codification.
dataSlice$Product <- as.character(dataSlice$Product)
dataSlice$Product[dataSlice$Product != "No sale"] <- "1"
dataSlice$Product[dataSlice$Product != "1"] <- "0"
dataSlice$Product <- as.numeric(dataSlice$Product)
dataSlice
# now, this dataset includes one row for every sale item (not sale)
# this is a problem since a sale with > 1 item will be counted more than once
# and this could skew the result.
# we can safely group the data by VisitDurationS since there (should) be no
# case where someone bought a sale item and 'No sale' in the same visit.
aggregate(Product ~ VisitDurationS, dataSlice, max)
# let's first take a quick look at the data and see if we can spot any trends
plot <- ggplot(data=dataSlice, aes(x=Product)) + geom_bar()
# OK, we can see there are about 800 sales vs. 480 no-sales,
# or about 62.5% conversion.
noSales <- subset(dataSlice, Product == 0)
sales <- subset(dataSlice, Product == 1)
summary(noSales)
summary(sales)
# mean visit duration for visitors who bought something = 362 seconds
# mean visit duration for visitors who did not buy anything = 264 seconds
# this indicates there could be a significant difference
# and a phenomena worth investigating.
# We can check the correlation using the function 'cor'.
cor(dataSlice$VisitDurationS, dataSlice$Product)
# This results in a low value - but not too low. Typically, correlation below
# 0.2 (on a scale of 0-1) indicates no relationship, but there is a weak
# indicator of a relationship here.
# now let's try and build a linear regression model.
model <- lm(Product ~ VisitDurationS, dataSlice)
model
# this provides two co-efficients which plug into the formula:
# Product = 0.3694143 + (0.0007716 * VisitDurationS)
# don't forget Product is really the fact or not of a sale, so let's
# re-label it as 'FactOfSale'
# Let's take a look at the statistics.
summary(model)
# The p-values determine how statistically significant the model is, and
# particularly how significant the input variables (VisitDuration, in this case) are.
# We have minute p-values so we can be confident that statistical significance exists.
# Let's graph it out using plot_ly, an alternative to ggplot.
# we create an object to hold our linear regression function
lrf = function(VisitDurationS){(0.3694143 + (0.0007716 * VisitDurationS)}
# and we can use this to populate a data frame showing likelihood of sales vs. duration
final <- data.frame(VisitDurationS = integer(0), LikelihoodOfSale = numeric(0))
for (d in 1:800) {
p = lrf(d)
if (p < 0) { p = 0 }
if (p > 1) { p = 1 }
result <- data.frame(VisitDurationS = d, LikelihoodOfSale = p)
final <- rbind(final, result)
}
final
# and finally, plot it out.
plot <- plot_ly(final, x=~VisitDurationS, y=~LikelihoodOfSale, type="scatter", mode="lines")
|
1de17706b0df65332e8c5e2ffc42bb9391d0eaf3
|
fd2a324a9505ed29e6136a06216edce999fa97a1
|
/R/rMVNmixture2.R
|
8450ad46ee45e102cebbfd98dd6df50cf51c7221
|
[] |
no_license
|
cran/mixAK
|
995c88ac9b1f70ab2dac51b4fc1347b9b1356eed
|
adc4c2229d8ad3573e560fd598158e53e5d1da76
|
refs/heads/master
| 2022-09-27T10:45:02.953514
| 2022-09-19T13:46:13
| 2022-09-19T13:46:13
| 17,697,529
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,981
|
r
|
rMVNmixture2.R
|
##
## PURPOSE: Random number generation from the mixture of the multivariate normal distributions
## * mixing performed in C++ code
##
## AUTHOR: Arnost Komarek (LaTeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
## CREATED: 07/11/2008
## 15/03/2017 .C call uses registered routines
##
## FUNCTION: rMVNmixture2
##
## ======================================================================
## *************************************************************
## rMVNmixture2
## *************************************************************
rMVNmixture2 <- function(n, weight, mean, Q, Sigma)
{
thispackage <- "mixAK"
if (n <= 0) stop("n must be positive")
## number of components of the mixture + checking the weights
if (any(weight < 0)) stop("weights must be non-negative")
K <- length(weight)
if (K == 0) stop("weight is of zero length")
weight <- weight/sum(weight)
## dimension of the normal distribution + precision/covariance matrix
UNIVARIATE <- FALSE
if (!is.matrix(mean)) UNIVARIATE <- TRUE
else if (ncol(mean) == 1) UNIVARIATE <- TRUE
if (UNIVARIATE){ ### univariate
p <- 1
if (length(mean) != K) stop(paste("mean must be of length ", K, sep=""))
if (missing(Sigma)){
if (missing(Q)) stop("Sigma or Q must be given")
if (length(Q) != K) stop(paste("Q must be of length ", K, sep=""))
if (is.list(Q)){
lQ <- sapply(Q, length)
if (any(lQ != 1)) stop("all Q elements must be of length 1")
Q <- unlist(Q)
}
if (any(Q <= 0)) stop("all Q elements must be positive")
degener <- is.infinite(Q)
Sigma <- 1/Q
}else{
if (length(Sigma) != K) stop(paste("Sigma must be of length ", K, sep=""))
if (is.list(Sigma)){
lSigma <- sapply(Sigma, length)
if (any(lSigma != 1)) stop("all Sigma elements must be of length 1")
Sigma <- unlist(Sigma)
}
if (any(Sigma < 0)) stop("all Sigma elements must be nonnegative")
degener <- (Sigma==0)
Q <- 1/Sigma
}
}else{ ### multivariate
p <- ncol(mean)
if (nrow(mean) != K) stop(paste("mean must have ", K, " rows", sep=""))
if (missing(Sigma)){
if (missing(Q)) stop("Sigma or Q must be given")
if (is.matrix(Q)){
if (K != 1) stop("Q must be a list of matrices")
Q <- list(Q)
}
if (length(Q) != K) stop(paste("Q must be of length ", K, sep=""))
Sigma <- list()
QLT <- numeric(0)
for (j in 1:K){
if (!is.matrix(Q[[j]])) stop("all elements of Q must be matrices")
if (nrow(Q[[j]]) != p | ncol(Q[[j]]) != p) stop(paste("all elements of Q must be squared matrices with ", p, " rows and columns", sep=""))
Sigma[[j]] <- chol2inv(chol(Q[[j]]))
QLT <- c(QLT, Q[[j]][lower.tri(Q[[j]], diag=TRUE)])
}
}else{
if (is.matrix(Sigma)){
if (K != 1) stop("Sigma must be a list of matrices")
Sigma <- list(Sigma)
}
if (length(Sigma) != K) stop(paste("Sigma must be of length ", K, sep=""))
Q <- list()
QLT <- numeric(0)
for (j in 1:K){
if (!is.matrix(Sigma[[j]])) stop("all elements of Sigma must be matrices")
if (nrow(Sigma[[j]]) != p | ncol(Sigma[[j]]) != p) stop(paste("all elements of Sigma must be squared matrices with ", p, " rows and columns", sep=""))
Q[[j]] <- chol2inv(chol(Sigma[[j]]))
QLT <- c(QLT, Q[[j]][lower.tri(Q[[j]], diag=TRUE)])
}
}
}
## sample
if (p == 1){
SAMPLE <- .C(C_rmixNorm_R, x = double(n),
dens = double(n),
cumw = double(K),
K = as.integer(K),
w = as.double(weight),
mu = as.double(mean),
sigma = as.double(sqrt(Sigma)),
npoints= as.integer(n),
PACKAGE=thispackage)
x <- SAMPLE$x
}else{
SAMPLE <- .C(C_rmixMVN_R, x = double(p*n),
dens = double(n),
w.dets = as.double(weight),
cumw = double(K),
Li = as.double(QLT),
work = double(p),
err = integer(1),
K = as.integer(K),
mu = as.double(t(mean)),
nx = as.integer(p),
npoints= as.integer(n),
PACKAGE=thispackage)
if (SAMPLE$err) stop("Something went wrong.")
x <- matrix(SAMPLE$x, nrow=n, ncol=p, byrow=TRUE)
}
return(list(x=x, dens=SAMPLE$dens))
}
|
171b6b5efac764377659c6a7d70ea0346429c19d
|
35d453ab1756e5b648e9d8d7d89cdfd33c4f8f38
|
/R/compareTwoPlayerAves.r
|
1cd79cc7a9ba76c3a37a018544fa024561414cc6
|
[] |
no_license
|
nickzani/Cricinfo
|
1c4b099aa4e67347a6d8ce5ecac606677bd7c989
|
6d6bb3489f6690c8d57ae38bbae11614ae6019bf
|
refs/heads/master
| 2021-01-10T06:28:02.987967
| 2016-02-29T19:34:33
| 2016-02-29T19:34:33
| 36,493,623
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
compareTwoPlayerAves.r
|
#' compareTwoPlayerAves
#'
#' Uses the getPlayerInfo function to return a dataframe with the two player averages
#' @param Player1 Name of the first player
#' @param Player2 Name of the second player
#' @param Player1Country Country of the first player
#' @param Player2Country Country of the second player
#' @export
#' @examples
#' compareTwoPlayerAves("Stuart Broad", "England", "Alastair Cook", "England")
compareTwoPlayerAves <- function(Player1Name,Player1Country,Player2Name,Player2Country){
df1 <- getPlayerInfo(PlayerSearch = Player1Name, PlayerCountry = Player1Country, PlayerNumber = "", BattingBowling = "Batting", ViewType = "Ave", Clean = "Y")
df2 <- getPlayerInfo(PlayerSearch = Player2Name, PlayerCountry = Player2Country, PlayerNumber = "", BattingBowling = "Batting", ViewType = "Ave", Clean = "Y")
colBind <- data.frame(PlayerName = c(Player1Name, Player2Name), stringsAsFactors = FALSE)
if (length(names(df1)) == length(names(df2)))
{df3 <- cbind(colBind, rbind(df1,df2))}
else if (length(names(df1)) == 10)
{
df1$BF <- NA
df1$SR <- NA
df1$No4s <- NA
df1$No6s <- NA
df3 <- cbind(colBind, rbind(df1,df2))
}
else if (length(names(df2)) == 10)
{
df2$BF <- NA
df2$SR <- NA
df2$No4s <- NA
df2$No6s <- NA
df3 <- cbind(colBind, rbind(df1,df2))
}
df3
}
|
9edd335c01674c1a95d215847270f1b10b721a8f
|
7fe3e5d85e046a4777b741484bd9a372387218f3
|
/knn.r
|
e310403005caaffe1d1a3da9d60fcb8de6f6658a
|
[
"MIT"
] |
permissive
|
CodeMySky/KDD99
|
3485bf6b6e6efc78165162f73f9b564fae8f2192
|
3c1bd6f0cb967a877bad7c7ea9fb05c555c03ed6
|
refs/heads/master
| 2016-08-07T08:01:11.772513
| 2015-05-17T04:33:20
| 2015-05-17T04:33:20
| 33,835,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
knn.r
|
library('MASS')
x_train_n = data.matrix(x_train)
x_test_n = data.matrix(x_test)
# u [44461 * 41]
u = svd(x_train_n)$u
# p_c = [20 * 41]
prominent_component = u[1:20,]
# invp [41 * 20]
invp = ginv(prominent_component)
# x_train_n = [44461 * 41]
x_train_n_pca = x_train_n %*% invp
x_test_n_pca = x_test_n %*% invp
y_hat = knn(x_train_n_pca, x_test_n_pca, y_train, use.all = FALSE, k=1)
|
3377f2bc5a8acdeeefc0ad85a2691e9a0b3351b1
|
be5d6168799188c437cf5448d9d606bad24fa199
|
/src/Update_Tankbeurt.R
|
410d18d7526ef3928f65a81fd97c06c2c7b48abd
|
[
"MIT"
] |
permissive
|
SanderDevisscher/monies
|
61f4a16ed99036e677855142efa5b0c8732f802e
|
a29492a13f79c277b884078c798241e8642c807c
|
refs/heads/master
| 2022-11-25T10:14:47.918610
| 2022-02-20T13:40:33
| 2022-02-20T13:40:33
| 246,482,305
| 0
| 0
|
MIT
| 2020-03-11T06:01:03
| 2020-03-11T05:25:01
|
R
|
UTF-8
|
R
| false
| false
| 2,840
|
r
|
Update_Tankbeurt.R
|
library(tidyverse)
library(googlesheets4)
bo_email <- Sys.getenv("bo_email")
gs4_auth(email = bo_email)
source("./src/Update_Level.r")
update_level()
Data <- read_sheet("1YLYWYwPsXXAeTEFz1Mpi2tV6J13sXUveIIKY8HBDncI", sheet = "Mazout", range = "V10:V11")
avg <- read_sheet("1YLYWYwPsXXAeTEFz1Mpi2tV6J13sXUveIIKY8HBDncI", sheet = "Mazout", range = "V2:V3")
current_level <- Data$...1[1]
current_date <- strptime(as.character(Sys.Date()), "%Y-%m-%d", tz = "UTC")
current_date <- current_date[[1]]
avg <- nth(avg, 1)
i <- 1
log <- data.frame(1:10)
colnames(log) <- "datum"
repeat{
Verbruik_sim <- read_sheet("1YLYWYwPsXXAeTEFz1Mpi2tV6J13sXUveIIKY8HBDncI", sheet = "Pivot Table 2", range = "J:P", col_types = "Tnnnnnn")
#Verbruik_sim <- Verbruik_sim %>%
# mutate(Maand = format(Dag, "%m"),
# Dag2 = format(Dag, "%d"),
# Datum = as.Date(paste0(format(Sys.Date(), "%Y"), "-", Maand, "-", Dag2)))
#Verbruik_sim$Datum <- strptime(as.character(Verbruik_sim$Datum), "%Y-%m-%d", tz = "UTC")
#summary(Verbruik_sim$Datum)
new_level <- current_level
new_date <- current_date + 24*3600
threshold <- 100
while(new_level > threshold){
jaar <- format(new_date, "%Y")
Verbruik_sim <- Verbruik_sim %>%
mutate(Maand = format(Dag, "%m"),
Dag2 = format(Dag, "%d"),
Datum = as.Date(paste0(jaar, "-", Maand, "-", Dag2)))
Verbruik_sim$Datum <- strptime(as.character(Verbruik_sim$Datum), "%Y-%m-%d", tz = "UTC")
Verbruik_sim$Datum <- as.Date(paste0(jaar,"-",format(Verbruik_sim$Datum, "%m"),"-",format(Verbruik_sim$Datum, "%d")))
verbruik <- Verbruik_sim$Verbruik[Verbruik_sim$Datum == new_date]
verbruik <- verbruik[1]
if(format(new_date, "%m-%d") == "02-29"){
verbruik <- avg
}
if(is.na(verbruik)){
jaar <- format(new_date, "%Y")
verbruik <- avg
}
print(verbruik)
new_level <- new_level - verbruik
new_date <- new_date + 24*3600
print(new_level)
print(new_date)
}
log$datum[i] <- new_date[[1]]
new_date <- as.data.frame(new_date)
#maintain structure
new_date$max <- ""
new_date$min <- ""
new_date$diff <- paste0("running iteration number: [",i+1,"]")
#Trigger randomisation in gsheets
sheet_write(new_date, sheet = "Update_Mazout", ss = "1YLYWYwPsXXAeTEFz1Mpi2tV6J13sXUveIIKY8HBDncI")
i <- i + 1
if (i == 11){
print("repeat loop ends");
break
}
}
log$datum2 <- as.Date.POSIXct(log$datum, tz = "UTC", origin = "1900-01-01")
#Average of 10 iterations
log2 <- log %>%
summarise(mean = median(datum2),
max = max(datum2),
min = min(datum2),
diff = paste0(difftime(max, min, "days"), " dagen"))
sheet_write(log2, sheet = "Update_Mazout", ss = "1YLYWYwPsXXAeTEFz1Mpi2tV6J13sXUveIIKY8HBDncI")
|
5a8545f13b95ca89d6b4ea5744137b8e8047456c
|
569a4a4f753c2a77b54d57a471a39158862139c0
|
/matchupSummary.R
|
c79977fc470ef05d097d1bbf451aa70cc185789b
|
[] |
no_license
|
karigunnarsson/midMatchup
|
e75684a2ab9efc8d4982ffab5057673b16fe518a
|
05345cc0963199735bd2c970d51bae686b056737
|
refs/heads/master
| 2021-06-13T13:47:36.093903
| 2017-04-14T00:59:28
| 2017-04-14T00:59:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,192
|
r
|
matchupSummary.R
|
#######
# In this file we'll create the final datamart, where we'll calculate the win ratio, gold and XP
# difference between the heroes, the output file here should contain 113x113 lines, and could
# be used as input for a website.
#######
library(plyr)
library(dplyr)
combinedFinal <- readRDS("combinedFinal.RDS")
heroList <- unique(combinedFinal$heroID)
totalMerged <- NULL
count <- 0
for(i in heroList) {
heroGames <- select(subset(combinedFinal, combinedFinal$heroID == i), matchID, radiantWin, heroID, gold, xp, side)
heroGames$win <- ifelse(heroGames$radiantWin == 1 & heroGames$side == "Radiant",1,
ifelse(heroGames$radiantWin == 0 & heroGames$side == "Dire",1,0))
heroGamesClean <- heroGames[,c(1,3,4,5,7)]
colnames(heroGamesClean) <- c("matchID","hero1","gold1","xp1","win")
otherHeroGames <- subset(combinedFinal, combinedFinal$heroID != i)
otherHeroGamesClean <- otherHeroGames[,c(1,3,4,5)]
colnames(otherHeroGamesClean) <- c("matchID","hero2","gold2","xp2")
mergedGames <- merge(heroGamesClean, otherHeroGamesClean, by = "matchID")
totalMerged <- rbind.fill(totalMerged, mergedGames)
}
# Now to summarize the data into the final dataset
sumData <- totalMerged %>%
group_by(hero1, hero2) %>%
summarise(meanXP1 = mean(xp1), meanXP2 = mean(xp2),
meanGold1 = mean(gold1), meanGold2 = mean(gold2),
winRate = sum(win)/n(), numGames = n())
sumDataClean <- subset(sumData, sumData$numGames>40)
# Add hero names to make more readable
heroNames <- read.csv2("heronames.csv")
colnames(heroNames) <- c("hero_id", "heroName1")
name1 <- merge(sumDataClean, heroNames, by.x = "hero1", by.y = "hero_id", all.x = TRUE)
colnames(heroNames) <- c("hero_id", "heroName2")
name2 <- merge(name1, heroNames, by.x = "hero2", by.y = "hero_id", all.x = TRUE)
name2$XPDiff <- name2$meanXP1-name2$meanXP2
name2$goldDiff <- name2$meanGold1-name2$meanGold2
finalData <- select(name2, heroName1, heroName2, XPDiff, goldDiff, winRate)
finalData <- arrange(finalData, heroName1)
# Round numbers to make pretty
finalData$XPDiff <- round(finalData$XPDiff)
finalData$goldDiff <- round(finalData$goldDiff)
finalData$winRate <- round(finalData$winRate, digits=3)
saveRDS(finalData, "MidHeroShiny/midData.rds")
# I'm also interested in the overall most succesful hero (regardless of matchup) measured by Xp, Gold and winrate.
topHero <- totalMerged %>%
group_by(hero1) %>%
summarise(meanXP = mean(xp1 - xp2),
meanGold = mean(gold1 - gold2),
winRate = sum(win)/n(),
numGames = n())
topHeroClean <- subset(topHero, topHero$numGames > 200)
topHeroClean <- select(merge(topHeroClean, heroNames, by.x = "hero1", by.y = "hero_id", all.x = TRUE),
heroName2, meanXP, meanGold, winRate, numGames)
colnames(topHeroClean) <- c("hero", "xpDiff", "goldDiff", "winPct", "numGames")
# Round numbers to make pretty
topHeroClean$xpDiff <- round(topHeroClean$xpDiff)
topHeroClean$goldDiff <- round(topHeroClean$goldDiff)
topHeroClean$winPct <- round(topHeroClean$winPct, digits=3)
|
6a88e92d5f2c7a56f895c26507a62958e9f82ecb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/labstatR/examples/gen.vc.Rd.R
|
d254c0c6abc35e8f5e0c249e7717860b4af24f94
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
gen.vc.Rd.R
|
library(labstatR)
### Name: gen.vc
### Title: Simula una variabile casuale discreta
### Aliases: gen.vc
### Keywords: distribution
### ** Examples
x <- c(-2,3,7,10,12)
p <- c(0.2, 0.1, 0.4, 0.2, 0.1)
y <- NULL
for(i in 1:1000) y <- c(y,gen.vc(x,p))
table(y)/length(y)
|
111ffb46f94173738d1a64246af693897fa0dec0
|
6cc6a8f3cfae2e25a86b7a85b78896357f1bba0f
|
/Code/dataooopen.R
|
92c9e4875d16567234426a16708de69ab0dd402b
|
[] |
no_license
|
RyanYaNg7/dataopencitadel
|
dc2abe859bb4ada07b089ff67b29609c2c94e5e9
|
ae91d217bf0bc45fc1d696f8c08a47041218ce0f
|
refs/heads/master
| 2020-04-02T08:46:09.341935
| 2018-10-23T04:04:11
| 2018-10-23T04:04:11
| 154,260,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,098
|
r
|
dataooopen.R
|
# load data
library(readr)
library(dplyr)
library(stringr)
library(data.table)
chemicals <- read_csv("Desktop/dataopen/chemicals.csv") %>% as.data.table()
industry_occupation <- read_csv("Desktop/dataopen/industry_occupation.csv") %>% as.data.table()
water_usage <- read_csv("Desktop/dataopen/water_usage.csv") %>% as.data.table()
water_usage_dictionary <- read_csv("Desktop/dataopen/water_usage_dictionary.csv") %>% as.data.table()
earnings <- read_csv("Desktop/dataopen/earnings.csv") %>% as.data.table()
# exploring fips issue
names(industry_occupation)
digit5 <- function(data) {
lapply(data$fips[data$fips%>%complete.cases], FUN = function(x) str_length(x)==5)
}
digit5(chemicals) %>% unlist() %>% sum() #number of fips with 5 digits
index <- digit5(chemicals) %>% unlist()
chemicals$fips[index] # so most of the fips are 5 digits
# merge data
merged <- merge(water_usage, industry_occupation, by = c('fips', 'year'), all=TRUE)
merged <- merge(merged, water_usage, by='fips', all=TRUE)
# exploring chemicals
# number of contaminated cities
chemicals[contaminant_level=='Greater than MCL', .N] #13545
# create a flag for every fips if there is a city that has contaminant_level greater than MCL
chemicals[, contaminated := as.integer(any(contaminant_level=='Greater than MCL')), by=c('fips', 'year')]
# number of counties with contaminated at least one city
chemicals[contaminated == 1L, .(uniqueN(fips))]
# extract the counties by year
years <- chemicals[contaminated == 1L, .(sort(unique(year)))]
# assign a contaminated regions population
chemicals[contaminant_level=='Greater than MCL', populated_affected := sum(pop_served), by=c('fips', 'year')]
chemicals[contaminant_level=='Greater than MCL', cities_contanimanted := .N, by=c('fips', 'year')]
# population affected for each region in each year
chemicals[complete.cases(populated_affected),unique(populated_affected), by=c('fips', 'year')]
population_affected <- chemicals[complete.cases(populated_affected),unique(populated_affected), by=c('fips', 'year')]
population_affected[, fips := as.character(fips)]
population_affected[str_length(population_affected$fips)==4, fips := str_pad(fips,5,'left','0')]
write_csv(population_affected,"Desktop/dataopen/population_affected.csv")
# cities affected for each region in each year
chemicals[complete.cases(populated_affected),unique(cities_contanimanted), by=c('fips', 'year')]
city_affected <- chemicals[complete.cases(cities_contanimanted),unique(cities_contanimanted), by=c('fips', 'year')]
city_affected[, fips := as.character(fips)]
city_affected[str_length(city_affected$fips) == 4, fips:= str_pad(fips,5,'left', '0')]
write_csv(city_affected,"Desktop/dataopen/city_affected.csv")
# create time series for population affected
population_affected_ts <- dcast(population_affected, fips ~year, value.var=c('V1'))
population_affected_ts <- ts(population_affected_ts)
# industry_occupation and earnings to see industry size
summary(industry_occupation)
summary(earnings)
names(industry_occupation) %in% names(earnings) %>% sum()
names(industry_occupation)
names(earnings)
industry_occupation_sub <- industry_occupation[,.(fips, agriculture, construction, manufacturing, wholesale_trade,
retail_trade, transport_utilities, information, finance_insurance_realestate,
prof_scientific_waste, edu_health, arts_recreation, public_admin,
other)]
setnames(x = industry_occupation_sub, old=names(industry_occupation_sub), new = c('fips1', 'agriculture1', 'construction1', 'manufacturing1', 'wholesale_trade1',
'retail_trade1', 'transport_utilities1', 'information1', 'finance_insurance_realestate1',
'prof_scientific_waste1', 'edu_health1', 'arts_recreation1', 'public_admin1',
'other1'))
earnings_sub <- earnings[,.(fips, agriculture=agri_fish_hunt, construction, manufacturing, wholesale_trade,
retail_trade, transport_warehouse_utilities, information, finance_insurance_realestate=fin_ins_realest,
prof_scientific_waste=prof_sci_tech, edu_health=total_edu_health_social,
arts_recreation=arts_ent_rec, public_admin=pub_admin, other=other_ser, year)]
industry_occupation_sub$fips %>%uniqueN()
earnings_sub$fips %>% uniqueN()
industry_merged <- merge(industry_occupation_sub, earnings_sub, by='fips', all=TRUE)
write_csv(earnings_sub, "Desktop/dataopen/earnings_sub.csv")
# chem
chem_val <- chemicals[, sum(value), by=c('fips', 'year')]
chem_val_ts <- chemicals[, sum(value), by=c('fips', 'year')] %>% dcast(fips ~ year, value.var='V1')
chem_val[, fips := as.character(fips)]
chem_val[str_length(chem_val$fips)==4, fips := str_pad(fips,5,'left','0')]
write_csv(chem_val, "Desktop/dataopen/chem_val.csv")
manufacturing_ts <- dcast(earnings, fips ~ year, value.var = 'manufacturing')
|
59425a5c682f32c8c6f25f567d3c3d510752a246
|
d1f039f7a355c4a004037de196106a2798c53aee
|
/Big5VarianceAnalysis/VarianceAnalysis.R
|
a4dbfee7aacf18332015d72683a25fe9c67e3287
|
[] |
no_license
|
PetraBurdejova/SPL-1
|
b7d59f9c70fa9032e2afc7be80563efbae3b3b4d
|
09e68aa0a8c7e89819bfae6c1a30ea4bf7834d8e
|
refs/heads/master
| 2020-03-28T14:26:16.105598
| 2018-08-10T15:12:02
| 2018-08-10T15:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,470
|
r
|
VarianceAnalysis.R
|
source("SPL_Big5GritDataPreparation/SPL_Big5GritDataPreparation.R")
DATA = getCombinedData(FALSE)
#Do the analysis of variance
aov1f = aov(Agree~age,data=DATA) #an example of analysis of variance for Agree ~age
summary(aov1f) #show the summary table
#creating ANOVA for Personality traits against independent variable
#for Age, Hand, Race, Gender (add. information out of ind. variable a better fit?):
anova_multi <- function(DATA,set1, set2, set3, set4){
i_dataset = length(DATA[i:max(length(DATA))])
aov_test <- matrix(nrow=5,ncol=4)
rownames(aov_test) <- names(DATA[9:13])
colnames(aov_test) <- c('Test Stat - Gender',"Test Stat - Age","Test Stat - Hand","Test Stat - Race")
for(i in 9: i_dataset){
aov_test[i-8,1] <- anova(lm(DATA[,i] ~ set1))$"Pr(>F)"[1]
aov_test[i-8,2] <- anova(lm(DATA[,i] ~ set2))$"Pr(>F)"[1]
aov_test[i-8,3] <- anova(lm(DATA[,i] ~ set3))$"Pr(>F)"[1]
aov_test[i-8,4] <- anova(lm(DATA[,i] ~ set4))$"Pr(>F)"[1]
}
return(aov_test)
}
#DATA run:
v = anova_multi(DATA,DATA[,2],DATA[,3],DATA[,4],DATA[,5])
#add a varaible
aov2f = aov(Openess~age+gender,data=DATA) #do the analysis of variance
summary(aov2f) #show the summary table
#print(model.tables(aov2f,"means"),digits=3)
#report the means and the number of subjects/cell - still in thinking..
#attach(DATA)
#interaction.plot(Neuro,gender,age) #another way to graph the means
#detach(DATA)
|
520a04d15e7476f898b011316a82012ab8d3ce4b
|
de7d600029c0a14195b033df58ea6506ba2012f1
|
/plot1.R
|
6c2c1fad202b8d59a3b1f1ed8b6b853e1b6f6874
|
[] |
no_license
|
mmottahedi/ExData_Plotting1
|
76e781f5340efabce15a73c83cbda881053e2d84
|
cca9f65df1eb45b280938b075fc4d5b148c071de
|
refs/heads/master
| 2021-01-17T15:50:25.295753
| 2015-05-10T03:27:16
| 2015-05-10T03:27:16
| 35,355,114
| 0
| 0
| null | 2015-05-10T03:14:09
| 2015-05-10T03:14:09
| null |
UTF-8
|
R
| false
| false
| 468
|
r
|
plot1.R
|
setwd("/home/mfc/cwd/data.science/EDA/Project1")
library(data.table)
data <- read.table("/home/mfc/cwd/data.science/EDA/Project1/data.txt",sep = ";",header = T,na.strings = "?")
date <- strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
data <- cbind(date,data)
png(filename="plot1.png",width =480 , height = 480)
hist(data$Global_active_power[1:4321],col="red", freq=T,
xlab = "Global Active Power (killowatts)",main="Global Active Power")
dev.off()
|
074c9a9c4562b18fd295429591a2c41ebcccdbdd
|
791c7e3f6a37d11f73c5b125f693e4bccc0d76cf
|
/src/BrowsingHistoryAnalysis/feature_extraction_for_model_generation.r
|
d683f13652baf1e9b02d4fbc3ad02109470acf32
|
[] |
no_license
|
akshaynayak/Behavioural-Targeting-Tool
|
64757a9f458a933a2eabcf628e94d2c81c651a53
|
fa94b1b90501ac4703398d301e76f9919007d1ce
|
refs/heads/master
| 2016-08-12T06:45:02.702486
| 2016-01-24T21:05:10
| 2016-01-24T21:05:10
| 50,301,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,023
|
r
|
feature_extraction_for_model_generation.r
|
library(tm)
colnames(unskewed_data)<-c("title","no","label","url")
colnames(unskewed_data)
myCorpus = Corpus(VectorSource(unskewed_data$title))
#class(myCorpus)
#myCorpus = Corpus(VectorSource(sample))
myCorpus=tm_map(myCorpus,stripWhitespace)
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, removePunctuation)
myCorpus = tm_map(myCorpus, removeNumbers)
myCorpus = tm_map(myCorpus, removeWords, stopwords("english"))
myCorpus=tm_map(myCorpus,stripWhitespace)
#myCorpus=tm_map(myCorpus,stemDocument)
#write.csv(file="chrome_history_wbt.csv",x=chrome_history)
#writeCorpus(myCorpus)
#stemmedcol=wordStem(training$Title)
myCorpus <- tm_map(myCorpus, PlainTextDocument)
#myDTM = DocumentTermMatrix(corpus_clean, control = list(minWordLength = 1))
myDTM=DocumentTermMatrix(myCorpus)
#mydtm_hist=DocumentTermMatrix(tweets.text.corpus)
library(tm)
library(slam)
colTotals <- col_sums(myDTM)
#colTotals<-col_sums(dtMatrix)
#colTotals
dtm2 <- myDTM[,which(colTotals >0)]
#dim(dtm2)
|
9eb4b3c1a655f93c6c3902144cdb04b1686dba6d
|
34c1b039e379665053ec1b439c531d130731b0ba
|
/plot4.R
|
001ef677dc20558d91cd1c9ff2c338a2be31a18c
|
[] |
no_license
|
iambritishdaniel/ExploratoryDataAnalysis_Assignment2
|
6839df0fc5ae2b6c6cf4fca974567ff090b4fe94
|
b39c544b1f99a5369717fcbd073599f456615894
|
refs/heads/master
| 2021-01-16T19:16:53.387278
| 2014-06-19T22:56:09
| 2014-06-19T22:56:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,419
|
r
|
plot4.R
|
require(ggplot2)
# check for data directory and create if necessary
if (!file.exists("data")) {
dir.create("data")
}
# check for data file else download and extract archive to data directory
dataFile1 = "data/Source_Classification_Code.rds"
dataFile2 = "data/summarySCC_PM25.rds"
if (!file.exists(dataFile1) || !file.exists(dataFile2)) {
url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip";
download.file(url, destfile="data/data.zip", method="curl")
unzip("data/data.zip", exdir="data")
}
# read data
src <- readRDS(dataFile1)
emi <- readRDS(dataFile2)
# ensure we are only dealing with PM25-PRI emissions data
emi <- emi[emi$Pollutant=='PM25-PRI',]
# open image for writing
png("plot4.png", width=800, height=600)
# define a color palette to use for all charts
cPalette <- c("#ff0000",
"#00ff00",
"#0000ff",
"#ff00ff",
"#ff8000",
"#33ffff",
"#aacc00")
# limit emissions to those from coal combustion sources
coalEmi <- emi[emi$SCC %in% sources[grep("coal", src$EI.Sector, ignore.case=TRUE), 1], ]
# merge sector name with emission data
srcNames <- src[, c(1, 4)]
coalEmi <- merge(coalEmi, srcNames, by.x = "SCC", by.y = "SCC")
# define a function to rename our facets with something easier to read
facetLabeller <- function(var,val){
val <- as.character(val)
if (var=="EI.Sector") {
val[val=="Fuel Comb - Electric Generation - Coal"] <- "Electric Generation"
val[val=="Fuel Comb - Comm/Institutional - Coal"] <- "Comm./Institutional"
val[val=="Fuel Comb - Industrial Boilers, ICEs - Coal"] <- "Industrial Boilers, ICEs"
}
return(val)
}
# create faceted plot by sector
plot4 <- (ggplot(coalEmi, aes(x=year, y=Emissions))
+ geom_point(aes(color=EI.Sector), size=8, alpha=0.2, shape=18)
+ facet_grid(.~EI.Sector, labeller=facetLabeller)
+ geom_smooth(size=1, col="black", linetype=1, method="lm")
+ labs(title="Emissions from Coal-Combustion Sources",
x="Year",
y="Emissions (Tons)")
+ theme_bw()
+ theme(legend.position="none")
+ scale_colour_manual(values=cPalette)
+ scale_x_continuous(breaks=c(seq(1999, 2008, by=3))))
print(plot4)
# close graphics device
dev.off()
|
ed436979c017c02aa9fa4e66aba3681ee1d12fbd
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/tclust/R/R_restr.eigen.R
|
73359c2ec5346ca6d9fa4930f4f4c38514269e07
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 10,225
|
r
|
R_restr.eigen.R
|
.restr2_eigenv <- function (autovalues, ni.ini, restr.fact, zero.tol)
{
ev <- autovalues
###### function parameters:
###### ev: matrix containin eigenvalues #n proper naming - changed autovalue to ev (eigenvalue)
###### ni.ini: current sample size of the clusters #n proper naming - changed ni.ini to cSize (cluster size)
###### factor: the factor parameter in tclust program
###### init: 1 if we are applying restrictions during the smart inicialization
###### 0 if we are applying restrictions during the C steps execution
###### some inicializations
if (!is.matrix (ev)) #n checking for malformed ev - argument (e.g. a vector of determinants instead of a matrix)
if (is.atomic (ev)) #n
ev <- t (as.numeric (ev)) #n
else #n
ev <- as.matrix (ev) #n
stopifnot (ncol (ev) == length (ni.ini)) #n check wether the matrix autovalues and ni.ini have the right dimension.
d <- t (ev)
p <- nrow (ev)
K <- ncol (ev)
n <- sum(ni.ini)
nis <- matrix(data=ni.ini,nrow=K,ncol=p)
#m MOVED: this block has been moved up a bit. see "old position of "block A" for it's old occurrence
idx.nis.gr.0 <- nis > zero.tol #n as nis € R we have to be carefull when checking against 0
used.ev <- ni.ini > zero.tol #n
ev.nz <- ev[,used.ev] #n non-zero eigenvalues
#m
#o if ((max (d[nis > 0]) <= zero.tol)) #m
#i if ((max (d[idx.nis.gr.0]) <= zero.tol)) #n "idx.nis.gr.0" instead of (nis > 0)
if ((max (ev.nz) <= zero.tol)) #n simplify syntax
return (matrix (0, nrow = p, ncol = K)) #m
#m
###### we check if the eigenvalues verify the restrictions #m
#m
#o if (max (d[nis > 0]) / min (d[nis > 0]) <= restr.fact) #m
#i if (max (d[idx.nis.gr.0]) / min (d[idx.nis.gr.0]) <= restr.fact) #n "idx.nis.gr.0" instead of (nis > 0)
if (max (ev.nz) / min (ev.nz) <= restr.fact) #n simplify syntax
{ #m
#o d[!idx.nis.gr.0] <- mean (d[idx.nis.gr.0]) #n "idx.nis.gr.0" instead of (nis > 0)
ev[,!used.ev] <- mean (ev.nz) #n simplify syntax
return (ev) #m
} #m
###### d_ is the ordered set of values in which the restriction objective function change the definition
###### points in d_ correspond to the frontiers for the intervals in which this objective function has the same definition
###### ed is a set with the middle points of these intervals
#o d_ <- sort (c (d, d / restr.fact))
d_ <- sort (c (ev, ev / restr.fact)) #n using ev instead of d
dim <- length (d_) ##2do: continue here cleaning up .restr2_eigenv
d_1 <- d_
d_1[dim+1] <- d_[dim] * 2
d_2 <- c (0, d_)
ed <- (d_1 + d_2) / 2
dim <- dim + 1;
##o
##o old position of "block A"
##o
###### the only relevant eigenvalues are those belong to a clusters with sample size greater than 0.
###### eigenvalues corresponding to a clusters with 0 individuals has no influence in the objective function
###### if all the eigenvalues are 0 during the smart initialization we assign to all the eigenvalues the value 1
###### we build the sol array
###### sol[1],sol[2],.... this array contains the critical values of the interval functions which defines the m objective function
###### we use the centers of the interval to get a definition for the function in each interval
###### this set with the critical values (in the array sol) contains the optimum m value
t <- s <- r <- array(0, c(K, dim))
sol <- sal <- array(0, c(dim))
for (mp_ in 1:dim)
{
for (i in 1:K)
{
r[i,mp_] <- sum ((d[i,] < ed[mp_])) + sum((d[i,] > ed[mp_]*restr.fact))
s[i,mp_] <- sum (d[i,]*(d[i,] < ed[mp_]))
t[i,mp_] <- sum (d[i,]*(d[i,] > ed[mp_] * restr.fact))
}
sol[mp_] <- sum (ni.ini / n * (s[,mp_] + t[,mp_] / restr.fact)) / (sum(ni.ini /n * (r[, mp_])))
e <- sol[mp_] * (d < sol[mp_]) +
d * (d >= sol[mp_]) * (d <= restr.fact * sol[mp_]) +
(restr.fact*sol[mp_]) * (d > restr.fact * sol[mp_])
o <- -1/2 * nis / n * (log(e) + d / e)
sal[mp_] <- sum(o)
}
###### m is the optimum value for the eigenvalues procedure
#o eo <- which.max (c (sal)) ## remove c ()
# m <- sol[eo]
m <- sol[which.max (sal)] #n
###### based on the m value we get the restricted eigenvalues
t (m * (d < m) + d * (d >= m) * (d <= restr.fact * m) + (restr.fact * m) * (d > restr.fact * m)) ## the return value
}
## 2do: replace .multbyrow (a, b), by a %*% diag (b)
.multbyrow <- function (a, b) t (t(a) * as.numeric (b)) ## auxiliary function, performing an operation (*) row- rather than column-wise
.restr2_deter_ <- function (autovalues, ni.ini, restr.fact, zero.tol = 1e-16)
{
###### function parameters:
###### autovalues: matrix containing eigenvalues
###### ni.ini: current sample size of the clusters
###### factor: the factor parameter in tclust program
###### some initializations
p = nrow (autovalues)
if (p == 1)
return (.restr2_eigenv (autovalues, ni.ini, restr.fact, zero.tol))
K = ncol (autovalues)
es = apply (autovalues, 2, prod)
idx.ni.ini.gr.0 <- ni.ini > zero.tol #n as ni.ini € R we have to be carefull when checking against 0
###### we check if all the determinants in no empty populations are 0
#o if (max(es[ni.ini > 0]) <= zero.tol) ## all eigenvalues are somehow zero.
if (max(es[idx.ni.ini.gr.0]) <= zero.tol) #n "idx.ni.ini.gr.0" instead of (ni.ini > 0)
return (matrix (0, p, K)) ## -> return zero mat
###### we put in d the determinants of the populations (converting es into a matrix of dim 1 x K)
d = t(es) #### --> dim (d) = 1 x K (has once been "d <- matrix (es, nrow = 1)")
#n put this block into a function (for improved readability)
autovalues_det <- .HandleSmallEv (autovalues, zero.tol) ## handling close to zero eigenvalues here
#cat ("\n1d^(1/p):\t", d^(1/p), "\n")
###### we check if all the determinants verify the restrictions
#o if (max (d[ni.ini > 0]) / min (d[ni.ini > 0]) <= restr.fact)
if (max (d[idx.ni.ini.gr.0]) / min (d[idx.ni.ini.gr.0]) <= restr.fact) #n "idx.ni.ini.gr.0" instead of (ni.ini > 0)
{
#o d [ni.ini == 0] <- mean (d[ni.ini > 0]) ## and get the mean - determinants for all clusters without observations.
d [!idx.ni.ini.gr.0] <- mean (d[idx.ni.ini.gr.0]) #n "idx.ni.ini.gr.0" instead of (ni.ini > 0)
dfin <- d^(1/p)
}
else
dfin <- .restr2_eigenv (d^(1/p), ni.ini, restr.fact^(1/p), zero.tol)
###### we apply the restriction to the determinants by using the .restr2_eigenv function
###### In order to apply this function is neccessary to transform d and factor with the power (1/p)
#cat ("\nfin:\t", dfin, "\n")
.multbyrow (autovalues_det, dfin) ## autovalues_det %*% diag (dfin)
}
.HandleSmallEv <- function (autovalues, zero.tol) #n a part of .restr2_deter_, which handles almost zero eigenvalues
{ ## handling close to zero eigenvalues here
###### populations with one eigenvalue close to 0 are very close to be contained in a hyperplane
###### autovalues2 is equal to autovalues except for the columns corresponding to populations close to singular
###### for these populations we put only one eigenvalue close to 0 and the rest far from 0
K <- nrow (autovalues) #n
#o autovalues[autovalues < zero.tol] = zero.tol
autovalues[autovalues <= zero.tol] <- zero.tol #n "<= zero.tol" for checking for zero
mi <- apply(autovalues,2,min) ## the minimum eigenvalue of each cluster
ma <- apply(autovalues,2,max) ## the maximum eigenvalue of each cluster
#o idx.iter <- (1:K) [ma/mi>1 / zero.tol] #o all clusters which have almost zero eigenvalues
idx.iter <- which (mi/ma <= zero.tol) #n making more obvious for what to check!
for (i in idx.iter) ## for each of these clusters. set all "normal" eigenvalues to a high value
autovalues[autovalues[,i] > mi[i] / zero.tol, i] <- mi[i] / zero.tol
#o es2 = apply(autovalues, 2, prod)
det = apply(autovalues, 2, prod) #n the new determinants
###### autovalues_det contains the autovalues corrected by the determinant
###### the product of the eigenvalues of each column in autovalues_det is equal to 1
#o autovalues_det <- .multbyrow (autovalues, es2^(-1/p))
p <- nrow (autovalues) #n
autovalues_det <- .multbyrow (autovalues, det^(-1/p)) #n
return (autovalues_det)
}
.restr2_deter_.C <- function (autovalues, ni.ini, restr.fact, p = nrow (autovalues), K = ncol (autovalues), zero.tol = 1e-16)
{
## this is an auxiliary function, wrapping the RestrictEigenValues_deter - C++ function.
## results should ALWAYS be EXACTLY the same as returned by .restr2_deter_ .
## 20120831: removed "name =" argmuent name from .C call
ret <- .C ("RestrictEigenValues_deter", PACKAGE = "tclust"
, as.integer (c(p, K))
, nParOut = integer (1)
, as.double (c (restr.fact, zero.tol))
, dParOut = double (1)
, EV = as.double (autovalues)
, as.double (ni.ini)
)
if (ret$nParOut[1])
matrix (ret$EV, nrow = p)
else
matrix (0, nrow = p, ncol = K)
}
.restr2_eigen_.C <- function (autovalues, ni.ini, restr.fact, p = nrow (autovalues), K = ncol (autovalues), zero.tol = 1e-16)
{
## this is an auxiliary function, wrapping the RestrictEigenValues - C++ function.
## results should ALWAYS be EXACTLY the same as returned by .restr2_eigenv.
## 20120831: removed "name =" argmuent name from .C call
ret <- .C ("RestrictEigenValues", PACKAGE = "tclust"
, as.integer (c(p, K))
, nParOut = integer (1)
, as.double (c (restr.fact, zero.tol))
, dParOut = double (1)
, EV = as.double (autovalues)
, as.double (ni.ini)
)
if (ret$nParOut[1])
matrix (ret$EV, nrow = p)
else
matrix (0, nrow = p, ncol = K)
}
|
0ced84bca21eca9c267304a5b362c5d6c0ca1c64
|
ed157f4d1f9f309b50c228bbeb58900ca276116d
|
/man/trim.Rd
|
97064cf64cbaa6478e5dfe8c70eb9ffaef3b222f
|
[] |
no_license
|
ugenschel/rotations
|
fad38b96a98de9d811b51912f399d2d2faaa91e0
|
d52a5c1063962f19e48389287baf21f5c97ba3b6
|
refs/heads/master
| 2021-01-24T01:30:11.660323
| 2012-08-17T16:35:32
| 2012-08-17T16:35:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
rd
|
trim.Rd
|
\name{trim}
\alias{trim}
\title{Function to Trim the Sample}
\usage{
trim(Rs, alpha)
}
\arguments{
\item{Rs}{The sample of random rotations}
\item{alpha}{The percent of observations to be trimeed}
}
\value{
S3 \code{trim} object; a sample of size n-(n*alpha) of
random roatations
}
\description{
This function will take a sample of size n random
rotations, find the observations beyond the alpha/2
percentile and delete them from the sample. To determine
which observations to remove, the average distance of
each sample point from the estimated central direction is
used.
}
|
e0faa49d2c89a1c35f3de5cd3bfb6f711711b767
|
1bd67a3f0b937d3e76fc8fce02c06dc73884ecc4
|
/Exercise-2.R
|
327f4b128c32f736bc9e864362379bb952a6590d
|
[] |
no_license
|
kvipin16/R-Exercise
|
6068de1a0bb89de6edea39ecf4bebaa19cc9c7ab
|
0dc80109baa40133a90567dec5c9ee72c5ecd949
|
refs/heads/master
| 2023-04-04T04:52:53.194342
| 2021-04-25T18:30:43
| 2021-04-25T18:30:43
| 361,378,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
r
|
Exercise-2.R
|
#2. Write a R program to get the details of the objects in memory
name<-"vipin"
int1 <- 1.5
int2 <- 6
nums <- c(1,2,3,5)
print(ls())
print(ls.str())
|
189deaf01f41cca37b5c0938213ab75ed2460d41
|
46906266c769d91a63bb13f39d5bfef1a0c1d4a6
|
/global.R
|
4a8b285f95696bdcb970708595a871bb3f3ae78a
|
[] |
no_license
|
Mihobsine/Workshop_COVID19_tracker
|
e1cc5e2d1ae5c19de032483abbdb51582c17d6cb
|
fdc87a8c6cd371011b38fac537ce4fa6fbea1ae8
|
refs/heads/master
| 2022-09-27T07:41:42.743895
| 2020-06-05T15:32:22
| 2020-06-05T15:32:22
| 267,881,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
global.R
|
#Files for all global values of the app
library(dplyr)
library(geojsonio)
library(lubridate)
# Step 1 - Open your files here and prepare the dataframe to merge it with geojson here
# Step 4 - Prepare your datas for plot here
# Use this line to set up the format of the day column
#your_data_frame$jour = ymd(your_data_frame$jour)
|
d898ed8ee04afcd77829703dfac8704b38ffd6ce
|
ef88c5d7857b9d8f3a7b7f0c2a11475774f83ab2
|
/R/kaps.R
|
ec103ec1dd490a4df88177307cc6ac78c9202696
|
[] |
no_license
|
cran/kaps
|
bb8e5695ff48704172802d86cf13d71350088761
|
130a0db675e6293cb7d159f76e4fd0e57bf52668
|
refs/heads/master
| 2021-01-21T12:21:25.733266
| 2014-11-01T00:00:00
| 2014-11-01T00:00:00
| 17,696,879
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,305
|
r
|
kaps.R
|
##########################################################################
####
#### Multiway-splits adaptive partitioning with distribution free approach
####
#### Soo-Heang EO and HyungJun CHO
####
#### Version 1.0.0
####
#### 30 Dec, 2013
####
##########################################################################
kaps <- function(formula, data, K = 2:4, mindat, type = c("perm", "NULL"), ...){
##########################
##### pre-processing step
#options(warn = -1)
if(missing(mindat)) mindat = floor(nrow(data) * 0.05)
if(!is.Formula(formula)) {
formula <- as.Formula(formula)
#class(formula) <- "Formula"
}
# for minor options used in kaps algorithm
# minors = kaps.control()
minors = kaps.control(...)
if(any(K == 1)) stop("the number of subgroups (K) have to be greater than subjects.")
n <- nrow(data) # total number of observations concerning with time
rownames(data) <- 1:n
if(n == 0L) stop("0 (non-NA) cases.")
if(length(K) < 1) stop("the minimum number of subgroups (K) is greater than 1.")
if(length(K) > 10) stop("the maximum number of subgroups (K) is too large.")
call <- match.call()
######################################################
### Finding optimal subgroups
type <- match.arg(type)
if(length(K) == 1 & type == "NULL"){
result = kaps.fit(formula = formula, data = data, K = K, mindat = mindat, minors = minors)
test.stat2 = adj.test(result)
result@call <- call
result@Z <- test.stat2[1] # overall statistic
result@X <- test.stat2[2] # test statistic for the worst pair
return(result)
} else if(type == "test"){
cat("Now, finding optimal number of subgroups (K) by test estimates. \n")
# choose maximal pairwise permutation p-value
# NEED TO MODIFY
# lapply(K, kaps.fit, formula = formula, data = data, mindat = mindat, minors = minors)
test.stat = kaps.test(formula, data, K, minors)
} else if(type == "perm"){
cat("Now, finding optimal number of subgroups (K) by KAPS-permutation. \n")
fit = lapply(K, kaps.fit, formula = formula, data = data, mindat = mindat, minors = minors)
# 1: overall test statistic
# 2: overall p-value
# 3: worst pair test statistic
# 4: worst pair p-value
test.stat = sapply(fit, kaps.perm, permute = TRUE)
# 1: overall p-value
# 2: worst pair p-value
# choose worst pairwise permutation test
test.stat2 = sapply(fit, adj.test)
test.stat2 <- as.matrix(test.stat2)
index = 1:ncol(test.stat)
index.pair = test.stat[2,] <= 0.05
#index.pair <- ifelse(is.na(index.pair), FALSE, index.pair)
if(all(index.pair == FALSE)){
# No significant subgroups
result <- fit[[1]]
result@index <- as.integer(0)
result@groups <- K
attr(result@groups,"names") <- paste("K<",K,sep="")
} else{
index <- index[sum(index.pair)]
cat("Now, selecting a set of cut-off points...\n")
result <- fit[[index]]
result@index <- as.integer(index)
result@groups <- K
attr(result@groups,"names") <- paste("K=",K,sep="")
}
result@Z <- as.vector(test.stat2[1, ]) #overall statistic
result@X <- as.vector(test.stat2[2, ]) #optimal pair p-value
result@results <- fit # results objects
result@test.stat <- test.stat # Bonferroni corrected p-values
result@call <- call
result@Options <- minors
return(result)
} else if (type == "NULL"){
index = 1
}
######################################################
# Obtain log-rank statistics at K
### parallel computing in order to find optimal k subgroups
cat("Now, selecting a set of cut-off points...\n")
#fit = kaps.fit(formula = formula, data = data, K = K.sel, mindat = mindat, minors = minors)
fit = lapply(K, kaps.fit, formula = formula, data = data, mindat= mindat, minors = minors)
test.stat2 = sapply(fit, adj.test)
test.stat2 <- as.matrix(test.stat2)
result <- fit[[index]]
result@index <- as.integer(index)
result@groups <- K
attr(result@groups,"names") <- paste("K=",K,sep="")
result@Z <- as.vector(test.stat2[1, ]) # overall test statistic
result@X <- as.vector(test.stat2[2, ]) # test statistic for the worst pair
result@results <- fit # result objects
#if(type != "NULL") {
# result@test.stat <- test.stat
# result@over.stat.sample <- fit.boot$boot.over.stat
# result@pair.stat.sample <- fit.boot$boot.pair.stat
#}
result@call <- call
result@Options <- minors
return(result)
}
############################################################
# Fit KAPS with test estimates approach
############################################################
kaps.test <- function(formula, data, K = 2:5, minors = kaps.control()){
#options(warn = -1)
if(any(K == 1)) stop("the number of subgroups (K) is greater than 1.")
n = nrow(data) # total number of observations concerning with time
rownames(data) <- 1:n
if(n == 0L) stop("0 (non-NA) cases.")
if(length(K) < 1) stop("the minimum number of subgroups (K) is greater than 1.")
#if(length(K) > 10) stop("the maximum number of subgroups (K) is too large.")
#call = match.call()
# kaps by test estimates approach
fold.over.stat = fold.pair.stat = matrix(0, nrow = 1, ncol = length(K))
result = matrix(0, nrow = length(K), ncol = 4)
colnames(result) <- c("over_pval", "over_stat", "pair_pval", "pair_stat")
rownames(result) <- paste("K=",K,sep="")
## CHECK ME: reduce computing time by parallel computing
index = sample(1:n, floor(n*0.7))
learning = data[index,, drop = FALSE]
test = data[-index,, drop = FALSE]
mindat = floor(nrow(learning) * 0.05)
fit = lapply(K, kaps.fit, formula = formula, data = learning, mindat = mindat, minors = minors)
test.stat = sapply(fit, kaps.perm, newdata = test)
rownames(test.stat) = c("perm_overall_pval", "perm_min_pair_pval")
colnames(test.stat) = paste("K=",K,sep="")
print(round(test.stat,3))
fold.over.stat[1,] <- test.stat[1,]
fold.pair.stat[1,] <- test.stat[2,]
### output
result[,1] <- fold.over.stat
#result[,2] <- apply(fold.over.stat, 2, sd, na.rm = TRUE) / sqrt(V)
result[,3] <- fold.pair.stat
#result[,4] <- apply(fold.pair.stat, 2, sd, na.rm = TRUE) / sqrt(V)
#result <- as.data.frame(result)
return(result)
}
# END @ Nov 12, 2013
|
783433a1925aec9afb3c28517752838a9a700b8d
|
6fa7c44eefa97557cde6c6daefa91959f9ff2264
|
/carmen/src/calc_pwilcox.r
|
3b27c3370fdb31e27f9761febb2f6dad5f0f627f
|
[] |
no_license
|
DavidQuigley/QuantitativeGenetics
|
a8b24019f0233f70be468c17175c7fe3b3ae9139
|
31c19fef62901f823b1d329298f72428d190abd0
|
refs/heads/master
| 2020-12-22T08:59:28.749267
| 2017-12-01T16:37:41
| 2017-12-01T16:37:41
| 3,517,532
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
calc_pwilcox.r
|
write_to_max_p = function(max_val, max_p){
for( i in (1:max_val) ){
if( i < j ){
for( j in ( i:(max_val)) ){
legal = F
for( test_val in (3: 10000) ){
pval = pwilcox(test_val, i, j) * 2 # 2 tailed
if( legal==F && pval < max_p ){
legal = T
}
if( legal && pval > max_p ){
write( c(i, j, test_val-1), 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
break
}
}
}
}
}
0
}
unlink('c:\\code\\SingleQTL\\m_w_pvalues.txt')
max_value = 40
max_p_value = 0.01
write( "-1 0.01 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
write_to_max_p(max_value, max_p_value)
write( "-1 0.001 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
max_p_value = 0.001
write_to_max_p(max_value, max_p_value)
write( "-1 0.0001 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
max_p_value = 0.0001
write_to_max_p(max_value, max_p_value)
write( "-1 0.00001 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
max_p_value = 0.00001
write_to_max_p(max_value, max_p_value)
write( "-1 0.000001 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
max_p_value = 0.000001
write_to_max_p(max_value, max_p_value)
write( "-1 0.0000001 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
max_p_value = 0.0000001
write_to_max_p(max_value, max_p_value)
write( "-1 0 0", 'c:\\code\\SingleQTL\\m_w_pvalues.txt', append=T )
|
348e24dc03249593feae3e5e6a0edcce1cef0440
|
3db42fef7b85bdd3d51a220deb261193365d4d3b
|
/man/weight.Rd
|
350082e38d6d83e69b618798916123fe77184f7b
|
[] |
no_license
|
mwheymans/psfmi
|
db30501dd207a057803e0f57e9003f96a8fe3d01
|
6afb51f1f1d9d7df11e91e5ffc64b14c3a663e33
|
refs/heads/master
| 2023-06-23T14:23:27.086994
| 2023-06-17T12:57:35
| 2023-06-17T12:57:35
| 129,861,191
| 15
| 6
| null | 2022-10-19T17:12:03
| 2018-04-17T07:05:18
|
R
|
UTF-8
|
R
| false
| false
| 753
|
rd
|
weight.Rd
|
\name{weight}
\alias{weight}
\docType{data}
\title{Dataset of persons from the The Amsterdam Growth and Health Longitudinal Study (AGHLS)}
\description{
Dataset of persons from the The Amsterdam Growth and Health Longitudinal Study (AGHLS)
}
\usage{data(weight)}
\format{
A data frame with 450 observations on the following 7 variables.
\describe{
\item{\code{ID}}{continuous}
\item{\code{SBP}}{continuous: Systolic Blood Pressure}
\item{\code{LDL}}{continuous: Cholesterol}
\item{\code{Glucose}}{continuous}
\item{\code{HDL}}{continuous: Cholesterol}
\item{\code{Gender}}{dichotomous: 1=male, 0=female}
\item{\code{Weight}}{continuous: bodyweight}
}
}
\examples{
data(weight)
## maybe str(weight)
}
\keyword{datasets}
|
81d0fde6e6820bd286e4fd4b248ded933a8d4eb6
|
bee49fb2eb33c04541e372ef0142f7b763824319
|
/session1_tasks.R
|
2a437f4968692b7cb6442bc189c25f31a9355543
|
[] |
no_license
|
mrzhsp/session_1
|
4b20a300412cfd997ad25c2f33340f120fdceeb1
|
e67aa14632d5714ab328c4f185212e9b60460d3b
|
refs/heads/master
| 2020-06-13T10:15:49.284079
| 2019-07-01T14:39:17
| 2019-07-01T14:39:17
| 194,624,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,740
|
r
|
session1_tasks.R
|
# Task 1
library(tidyverse)
mpg %>% tbl_df()
ggplot(data = mpg, aes(x = displ, y = hwy, color = trans)) +
geom_point(color = "red") +
geom_smooth()
# Task 2
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point(color = "red") +
geom_smooth()
# Task 3
ggplot(data = mpg, aes(x = displ, y = hwy, color = drv)) +
geom_point(color = "red") +
geom_smooth(method = "lm", se = FALSE)
# Task 4
ggplot(data = mpg, aes(x = displ, y = hwy, color = factor(cyl))) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
# Task 5
ggplot(mpg, aes(x = displ, y = hwy, colour = factor(cyl))) +
geom_point() +
scale_x_log10(breaks = c(2, 3, 4, 5, 6, 7)) +
scale_y_log10(breaks = c(20, 30, 40)) +
geom_smooth(method = "lm", se = FALSE) +
labs(
x = "Engine Size Displacement (litres)",
y = "Highway miles per gallon",
colour = "Cylinders",
title = "Fuel Economy and Engine Size"
) +
facet_wrap(~year, labeller = as_labeller(c(
"1999" = "Model Year 1999",
"2008" = "Model Year 2008"
)))
# Task 6
data(mpg, package = "ggplot2")
mpg %>% tbl_df
mpg2 <- mpg %>% select(manufacturer, model, displ, year, cyl, trans, cty, hwy)
# Task 7
mpg3 <- mpg2 %>% mutate(displ2 = displ^2,
vol_per_cyl = round(displ / cyl, 2))
# Task 8
mpg3 %>% arrange(desc(vol_per_cyl))
mpg3 %>% filter(manufacturer == "chevrolet") %>%
arrange(desc(vol_per_cyl))
mpg4 <- mpg3 %>%
group_by(manufacturer, year) %>%
summarise(max_vol_per_cyl = max(vol_per_cyl))
# Task 9
mpg5 <- mpg4 %>% spread(year, max_vol_per_cyl)
# Task 10
mpg6 <- mpg5 %>% mutate(change = `2008` - `1999`)
# Task 11
mpg6 %>%
rename(max_vpc_1999 = `1999`, max_vpc_2008 = `2008`) %>%
gather(variable, value) %>% View
|
df2ad9feadc3b0f8f375ef478111e6defd5becd9
|
4ec548c762474908e50080adb57410063899f85a
|
/man/isSubsetArbitrary.Rd
|
715d90e0caf98c0b25b57af6ca2f212eaf919418
|
[] |
no_license
|
jaspeir/NIJ_Tabitha
|
d6e37ae02ba14620967ca2eb0530fed1478c7337
|
37ec34c0a446d19c5c4413b2e340555adc373b10
|
refs/heads/master
| 2022-10-01T11:05:41.720909
| 2020-04-01T21:56:33
| 2020-04-01T21:56:33
| 268,837,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 472
|
rd
|
isSubsetArbitrary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helperFunctions.R
\name{isSubsetArbitrary}
\alias{isSubsetArbitrary}
\title{Function to determine whether A is a subset of B - arbitrary sets.}
\usage{
isSubsetArbitrary(A, B)
}
\arguments{
\item{A}{the left operand - vector}
\item{B}{the right operand - vector}
}
\value{
the boolean value of the operation
}
\description{
Function to determine whether A is a subset of B - arbitrary sets.
}
|
daecbcaf4e31d1225412c94ffa18df306fe0cc81
|
1bf4bc16ae1e516b82788227118778f9fa008dda
|
/Hmsc_CD/oregon_ada/code_GIS/Pred4_3_clamped_plots.r
|
1a015a7f74e44bf3e0809b609ff5bb1bf409e3d3
|
[
"MIT"
] |
permissive
|
dougwyu/HJA_analyses_Kelpie
|
93b6b165e4d2de41f46bacd04c266afa7af0541e
|
4ab9de1072e954eac854a7e3eac38ada568e924b
|
refs/heads/master
| 2021-11-03T19:45:17.811298
| 2021-10-26T09:45:15
| 2021-10-26T09:45:15
| 245,229,951
| 1
| 1
|
MIT
| 2021-09-10T07:19:04
| 2020-03-05T17:44:55
|
HTML
|
UTF-8
|
R
| false
| false
| 7,804
|
r
|
Pred4_3_clamped_plots.r
|
library(dplyr)
library(ggplot2)
library(raster)
setwd("J:/UEA/gitHRepos/HJA_analyses_Kelpie/Hmsc_CD/oregon_ada")
# setwd("D:/CD/UEA/gitHRepos/HJA_analyses_Kelpie/Hmsc_CD/oregon_ada")
gis_in <- "J:/UEA/Oregon/gis/raw_gis_data"
gis_out <- "J:/UEA/Oregon/gis/processed_gis_data"
# gis_in <- "D:/CD/UEA/Oregon/gis/raw_gis_data"
# gis_out <- "D:/CD/UEA/Oregon/gis/processed_gis_data"
## create a reduced prediction area - convex hull around (all points + HJA) + buffer
## Load sample site points
load(file.path(gis_out, "sample_sites.rdata"))
xy.utm; xy.all.utm
## bring in raster template to check new data
load("J:/UEA/gitHRepos/HJA_analyses_Kelpie/Hmsc_CD/oregon_ada/data/gis/templateRaster.rdata") # r.msk, indNA,
rm(r.aoi.pred, aoi.pred.sf)
## check scale of background to range of variables at sample sites
load("J:/UEA/Oregon/gis/processed_gis_data/r_oversize/newData_unscaled.rdata") # allVars, newData, indNA
head(allVars) # extracted predictor values at all sample point sites/
colnames(newData) # all data (without NAs) from all predictor layers across study area
# remove these
# varsOut <- c("SiteName", "trap", "period", "geometry", "cut_r", "cut_msk", "cut_40msk", "insideHJA",
# "aspect30","maxT_annual","meanT_annual","minT_annual","precipitation_mm")
# Just use these -- - just those in model
## Final set of VIF chosen predictors
vars11 <- c("gt4_500", "cut_r1k","cut_r250","cut40_r1k","cut40_r250","be30","tri30","Nss30",
"Ess30","twi30","tpi250","tpi1k","l_rumple","nbr_stdDev_r100","ndvi_p5_r100",
"ndvi_p5_r500","ndvi_p50_r100","ndvi_p50_r500","ndmi_p95_r100",
"LC08_045029_20180726_B1","LC08_045029_20180726_B5","lg_DistStream",
"lg_DistRoad","lg_cover2m_max","lg_cover2m_4m","lg_cover4m_16m") # insideHJA
#
# tmp1 <- allVars %>%
# filter(period == "S1") %>%
# dplyr::select(all_of(vars11)) %>%
# tidyr::pivot_longer(cols = everything(), names_to = "predictors", values_to = "value")%>%
# mutate(set = "samples")%>%
# arrange(predictors)
#
# head(tmp1)
#
# compVars <- newData %>%
# dplyr::select(all_of(vars11)) %>%
# tidyr::pivot_longer(cols = everything(), names_to = "predictors", values_to = "value")%>%
# mutate(set = "background")%>%
# arrange(predictors)%>%
# rbind(tmp1)
#
# rm(tmp1)
# head(compVars)
# save(compVars, file = "code_GIS/compVars.rdata")
load("code_GIS/compVars.rdata")
sumVars <- compVars %>%
group_by(predictors, set)%>%
summarise(min = min(value),
max = max(value),
pcL = quantile(value, prob = 0.05),
pcH = quantile(value, prob = 0.95),
valL = seq(min,max, length.out = 100)[5],
valH = seq(min,max, length.out = 100)[95])%>%
#filter(set == "samples") %>%
tidyr::pivot_wider(names_from = set, values_from = c(min, max, pcL, pcH, valL, valH))%>%
mutate(colL = if_else(max_samples >= valH_background, "green", "red"), # pcH_background
colH = if_else(min_samples <= valL_background, "green", "red"))# pcL_background
# select(c(1,3,5,6,8,10,12,14,15))
sumVars
#sumVars[,c(1,3,5,6,8,10,12,14,15)]
ggplot(compVars, aes(x = value))+
geom_histogram()+
geom_vline(data = sumVars, aes(xintercept = max_samples, colour = colH))+
geom_vline(data = sumVars, aes(xintercept = min_samples, col = colL))+
scale_colour_identity(guide = "legend", labels = c("outside", "within"),
name = "5/95 % data value")+ # name = "5/95 % data value" name ="5/95 percentile"
#geom_vline(data = sumVars, aes(xintercept = pcL_background), col = "blue")+
#geom_vline(data = sumVars, aes(xintercept = pcH_background), col = "blue")+
facet_wrap(~predictors, scales = "free", ncol = 5, nrow = 6) #
0.025* nrow(newData)
getwd()
ggsave("../local/plots/sample_background_quantile.png")
ggsave("../local/plots/sample_background_data_value.png")
### Make new data masking values out of range of sample sites.
## IS specific to variables used in model. So needs to be made for each new set of vars...
sumVars
# probably faster to copy max/min values as extra column, but used more memory in RAM>
## uses sumVars - specific to vars and can change cutoff value there.
newData_ir <- newData %>%
dplyr::select(all_of(vars11)) %>%
tidyr::pivot_longer(cols = everything(), names_to = "predictors", values_to = "value") %>%
rowwise() %>%
mutate(value2 =
case_when(
value < sumVars$min_samples[sumVars$predictors == predictors] |
value > sumVars$max_samples[sumVars$predictors == predictors] ~ NA_real_, # must be same type...
TRUE ~ value
))%>%
arrange(predictors)
newData_clamp <- newData %>%
dplyr::select(all_of(vars11)) %>%
tidyr::pivot_longer(cols = everything(), names_to = "predictors", values_to = "value") %>%
rowwise() %>%
mutate(value2 =
case_when(
value < sumVars$min_samples[sumVars$predictors == predictors]
~ sumVars$min_samples[sumVars$predictors == predictors],
value > sumVars$max_samples[sumVars$predictors == predictors]
~ sumVars$max_samples[sumVars$predictors == predictors],
TRUE ~ value
))%>%
arrange(predictors)%>%
as.data.frame()
newData_clamp
save(newData_ir,newData_clamp, file = "J:/UEA/Oregon/gis/processed_gis_data/r_oversize/newData_ir.rdata")
load("J:/UEA/Oregon/gis/processed_gis_data/r_oversize/newData_ir.rdata")
newData[1:10, vars11]
summary(newData_ir[newData_ir$predictors == "be30",])
## Change to wide format
newData_cl_w <- do.call(data.frame,
lapply(vars11, function(x) newData_clamp[newData_clamp$predictors == x, "value2"]))
colnames(newData_cl_w) <- vars11
head(newData_cl_w)
newData_ir_w <- do.call(data.frame,
lapply(vars11, function(x) newData_ir[newData_ir$predictors == x, "value2"]))
colnames(newData_ir_w) <- vars11
head(newData_ir_w)
save(newData_cl_w, newData_ir_w,
file = "J:/UEA/Oregon/gis/processed_gis_data/r_oversize/newData_clamp_w.rdata")
## Make raster stack to check area of NAs
x <- vars11[1]
## make rasters
rList <- lapply(vars11, function(x) {
tmp <- r.msk
tmp[indNA] <- newData_ir$value2[newData_ir$predictors == x]
tmp
})
# plot(tmp)
rStack_ir <- stack(rList)
names(rStack_ir) <- vars11
rStack_ir
plot(rStack_ir[[1:10]], colNA = "black")
## sum to get all NAs
smStack_ir <- is.na(sum(rStack_ir))
plot(smStack_ir, colNA = "black")
# how many extra NAs?
sum(indNA) - sum(is.na(values(smStack_ir)))
## Clamp stack
rList <- lapply(vars11, function(x) {
tmp <- r.msk
tmp[indNA] <- newData_clamp$value2[newData_clamp$predictors == x]
tmp
})
# plot(tmp)
rStack_clamp <- stack(rList)
names(rStack_clamp) <- vars11
rStack_clamp
plot(rStack_clamp[[1:10]], colNA = "black")
rStack_ir[[1]]
rStack_clamp[[1]]
save(rStack_ir, rStack_clamp, file = "J:/UEA/Oregon/gis/processed_gis_data/r_oversize/rStack_ir_clamp.rdata")
## Make newData (in wide format)
# load("J:/UEA/Oregon/gis/processed_gis_data/r_oversize/rStack_ir_clamp.rdata") # rStack_ir, rStack_clamp
# newData_cl_w <- raster::values(rStack_clamp)
# head(newData_cl_w)
# newData_cl_w <- data.frame(newData_cl_w[indNA, ])
#
## Scale whole data set - apart from categorical predictors
allBrck.sc <- scale(dropLayer(allBrck, c("insideHJA", "cut_r" , "cut_msk", "cut_40msk")))
# stores scale parameters in the @data slot
allBrck.sc # in memory
# str(allBrck.sc)
inMemory(allBrck.sc[[1]])
sapply(1:nlayers(allBrck.sc), function(x) inMemory(allBrck.sc[[x]]))
## add back categorical - but bring into memory first
catRasters <- readAll(allBrck[[c("insideHJA", "cut_r" , "cut_msk", "cut_40msk")]])
catRasters[[1]]
allBrck.sc <- addLayer(allBrck.sc, catRasters)
names(allBrck.sc)
|
818eeb83a699c39a208371652f27a02c63dfc905
|
b81875d1dc66033329e6e82914cd08727dffc8bf
|
/R/var.R
|
5837d792bede8870244e621e635e4756bfc30b97
|
[] |
no_license
|
cran/Bolstad
|
b4cb3d49c8edca8ebcc51fe89539a3d144e8de32
|
3dc15d83e44e4e5e120e91465ae7ca213ba4e699
|
refs/heads/master
| 2021-01-21T00:52:47.450624
| 2020-10-05T05:50:02
| 2020-10-05T05:50:02
| 17,678,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
var.R
|
#' Variance generic
#'
#' @param x an object for which we want to compute the variance
#' @param \dots Any additional arguments to be passed to \code{var}.
#' @export
var = function(x, ...){
UseMethod("var")
}
#' @export
var.default = function(x, ...){
stats::var(x, ...)
}
#' @export
var.Bolstad = function(x, ...){
if(any(grepl("var", names(x))))
return(x$var)
xVals = x$param.x
mx = mean(x, ...)
fx = approxfun(xVals, (xVals - mx)^2 * x$posterior)
return(integrate(fx, min(xVals), max(xVals))$value)
}
|
6fcc1524cee74b1a54fa927e9749059975d2b5c7
|
9e1efc0746759ecfb9f9d4cd56490f2dfe50ed48
|
/R code/population and line chart.R
|
2ce6d121c5aaeec6ba650d4eb69a53e71c9b432a
|
[] |
no_license
|
yungclee/Dialysis-Analysis
|
ee84f3af67b25b816ff0685825f3a7b7124ef262
|
29955cefc8cc8e4f47356b56ae577885a61b0516
|
refs/heads/master
| 2022-03-06T04:25:31.490496
| 2019-10-21T00:07:47
| 2019-10-21T00:07:47
| 105,085,300
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 3,317
|
r
|
population and line chart.R
|
total.id.harea=read.csv("D:/data1/temp/total_id_hosparea.csv")
total.id.harea
summary(total.id.harea)
sort(total.id.harea$id_birthday)
subset(total.id.harea,by='id_birthday'==18000101)
cd=read.csv("D:/data1/temp/20141227/corrected data(total_cd_list_hosparea_2002).csv")
total.id.harea=cd
total.id.harea.id=read.csv("D:/data1/temp/20141227/corrected data(total_cd_list_hosparea_2002_id).csv")
func.date=floor(total.id.harea.id$func_date/10000)
id.birth=floor(total.id.harea.id$id_birthday/10000)
table(func.date-id.birth)
hist(func.date-id.birth)
id.pop.2000=read.csv("D:/data1/temp/20141218/ID_population.csv")
id.pop.2002=read.csv("D:/data1/id_population/id_population2002.csv")
id.pop.2003=read.csv("D:/data1/id_population/id_population2003.csv")
id.pop.2004=read.csv("D:/data1/id_population/id_population2004.csv")
id.pop.2005=read.csv("D:/data1/id_population/id_population2005.csv")
id.pop.2006=read.csv("D:/data1/id_population/id_population2006.csv")
id.pop.2007=read.csv("D:/data1/id_population/id_population2007.csv")
id.pop.2008=read.csv("D:/data1/id_population/id_population2008.csv")
id.pop.2009=read.csv("D:/data1/id_population/id_population2009.csv")
barplot(table(id.pop.2000$AREA_NO_I)[-1])
barplot(table(id.pop.2002$AREA_NO_I)[-1],main='2002跋羆计',xlab='跋(絏)',ylab="计",col='royalblue2')
barplot(table(id.pop.2003$AREA_NO_I)/10000,main='2003跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
barplot(table(id.pop.2004$AREA_NO_I)/10000,main='2004跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
barplot(table(id.pop.2005$AREA_NO_I)/10000,main='2005跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
barplot(table(id.pop.2006$AREA_NO_I)/10000,main='2006跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
barplot(table(id.pop.2007$AREA_NO_I),main='2007跋羆计',xlab='跋(絏)',ylab="计",col='royalblue2')
barplot(table(id.pop.2008$AREA_NO_I)/10000,main='2008跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
barplot(table(id.pop.2009$AREA_NO_I)[-length(table(id.pop.2008$AREA_NO_I))]/10000,main='2009跋羆计',xlab='跋(絏)',ylab="计(窾)",col='royalblue2',ylim=c(0,7))
table(id.pop$AREA_NO_I)
names(id.pop)
describe(cd$drug_amt)
describe(subset(cd$drug_amt, drug_amt!=0))
cd=read.csv("D:/data1/temp/20141227/corrected data(total_cd_list_hosparea_2002).csv")
table.out=table(cd$drug_day)
bar.out=barplot(table(cd$drug_day))
points(bar.out,c(0:99),type="o",pch=20,cex=2,lwd=3)
plot.new()
plot(y=table.out[1:50],x=c(0:49),col='royalblue3',ylim=c(0,80000),xlab='矪よぱ计',ylab='Ω计',main='矪よぱ计ч絬瓜',pch=20)
lines(y=table.out[1:50],x=c(0:49),col='royalblue2')
for(i in 1:50){
if (table.out[i]>1000){text(x=i-1,y=table.out[i]+4000,i-1)}
}
plot(y=table.out[1:50],x=c(0:49),col='royalblue3',ylim=c(0,80000),xlab='Drug Day',ylab='Count',main='Line Chart of Drug Day',pch=20)
lines(y=table.out[1:50],x=c(0:49),col='royalblue2')
for(i in 1:50){
if (table.out[i]>1000){text(x=i-1,y=table.out[i]+4000,i-1)}
}
points(table.out[1:100])
|
b34ff74777d74cbda0ae7aeb0ee6d2e662408a0c
|
30eb33fa62cd3f2a8b4b4de48d156647e79df700
|
/R/iViewDatasetControls.R
|
15d1af8260aee447a68d8bc5ccbd676eb4c31024
|
[] |
no_license
|
MindscapeNexus/CardinaliView
|
857d205ba9ad27c35c8108a8b0c50adfa7542915
|
7bd5ff5c92a57b28e1aa84048342ee5755d83327
|
refs/heads/master
| 2021-01-18T10:30:55.814124
| 2014-06-16T10:21:31
| 2014-06-16T10:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
iViewDatasetControls.R
|
#### Class selecting a dataset ####
## --------------------------------
.iViewDatasetControls <- setRefClass("iViewDatasetControls",
contains = "iViewControls",
methods = list(
initialize = function(...) {
uuid <<- Cardinal:::uuid()
interface <<- ggroup(...)
plist$dataset <<- "(no dataset)"
widgets$dataset.frame <<- gframe(
container=interface,
horizontal=FALSE,
expand=TRUE,
text="Dataset")
widgets$dataset <<- gcombobox(
container=widgets$dataset.frame,
expand=TRUE,
items=c("(no dataset)", .ls.MSImageSet()))
handlers$dataset <<- addHandlerChanged(
obj=widgets$dataset,
handler=.changed.dataset,
action=.self)
}))
.changed.dataset <- function(h, ...) {
dataset <- svalue(h$obj)
blockHandler(h$action$widgets$dataset, h$action$handlers$dataset)
h$obj[] <- .ls.MSImageSet()
unblockHandler(h$action$widgets$dataset, h$action$handlers$dataset)
elt <- h$action$findParent("iViewGroup")
elt$update(dataset=dataset)
elt <- h$action$findParent("iViewNotebook")
tab <- svalue(elt$interface)
names(elt$interface)[tab] <- dataset
}
.ls.MSImageSet <- function() {
objects <- eapply(globalenv(), is, "MSImageSet")
names(objects)[unlist(objects)]
}
|
9840fdb5830e84f790d97a3f343f9457b1bf0f16
|
ac1368f9fd5ef76bec2ee541c91f3b0bc2d975ef
|
/man/list_body.Rd
|
81e932e78ff401346a944d92c275c50c55c436bc
|
[] |
no_license
|
cran/frite
|
9bcf904b11118e32a485ca5f0d28eb26d1609e5d
|
493efb605501751d91f8e841bf3b62c6d9543214
|
refs/heads/master
| 2020-03-27T03:51:56.846778
| 2018-07-01T14:00:06
| 2018-07-01T14:00:06
| 145,894,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 432
|
rd
|
list_body.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_body.R
\name{list_body}
\alias{list_body}
\title{Converts the body of a function to a list}
\usage{
list_body(.f)
}
\arguments{
\item{.f}{A non-primitive function}
}
\value{
A list
}
\description{
This will help you see what code you're changing when using other functions in this
package.
}
\examples{
list_body(strwrap)
}
|
b412651ec5fc6eedabb898b233354c9a51fb9775
|
4c4450be5daa591b195ebb9517c822fd2ab898fb
|
/Data_Deep_Dive_EP.R
|
a27f362dbbb3e43cb9c9930cb313e7ea307591ec
|
[] |
no_license
|
aweisberg44/nflscrapR_EPA_AW
|
f065eb9573dafe0c1f146d55efcf88ad99b4a531
|
87c33e93fd257c8366eeb5b63a94483db5dc7aad
|
refs/heads/master
| 2020-04-29T01:12:02.356786
| 2019-03-15T01:19:41
| 2019-03-15T01:19:41
| 175,722,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,861
|
r
|
Data_Deep_Dive_EP.R
|
# Title: Data_Deep_Dive_EP.R
# Author: Aaron Weisberg
# Created: Feb 04, 2017
# Last Edit: Feb 09, 2017
# Description: This script takes NFL play by play data
# from 2009 to 2016 and explores the EP training data for relationhips
# between points added and key scoring drivers in football.
# Examples of such fields include field position, number
# of timeouts, down and distance, scoring differential,
# and time on the clock. There is some data cleaning
# performed at the end of the script to allow for
# further data exploration later.
# Load required packages
require(utils)
require(dplyr)
require(tidyr)
require(plyr)
require(ggplot2)
require(scales)
require(zoo)
require(corrplot)
require(rpart)
require(rpart.plot)
require(data.table)
require(stats)
# Set up file paths
rm(list=ls())
path_data <- "C:\\Users\\aweis\\OneDrive\\Documents\\NFL_Analytics\\Data\\"
path_log <- "C:\\Users\\aweis\\OneDrive\\Documents\\NFL_Analytics\\Log_Files\\"
path_out <- "C:\\Users\\aweis\\OneDrive\\Documents\\NFL_Analytics\\Analysis\\"
# Set up log file
log_filename <- paste(path_log, "Data_Deep_Dive_",
gsub(":", "_", gsub("-", "_", gsub(" ", "_", Sys.Date()))), ".txt", sep="")
sink(file = log_filename, append = F, type = c("output", "message"), split = T)
# Bring in training and validation data sets
filename <- paste(path_data, "EPA_Next_Score_Method_Trn.rdata", sep="")
load(filename)
filename <- paste(path_data, "EPA_Next_Score_Method_Val.rdata", sep="")
load(filename)
print("Names of training data fields")
names(trn_data)
print("Names of validation data fields")
names(val_data)
print("Classes of training data fields")
sapply(trn_data, class)
print("Classes of validation data fields")
sapply(val_data, class)
# Create data frame with names and classes of every field
data.info <- NULL
data.info <- data.frame(sapply(trn_data, class))
colnames(data.info) <- "Class"
data.info$Variable <- rownames(data.info)
filename <- paste(path_data, "EP_trn_field_class_info.rdata", sep="")
save(data.info, file = filename, compress = T)
filename <- gsub(".rdata", ".csv", filename)
write.csv(data.info, file = filename)
# Ok after a closer look, we can get rid of the existing WPA and EPA fields
# Since we are going to build our own model, we do not need theirs until later for benchmarking
# But for development, we can get rid of them
# Everything else is a valid predictor for now
WP.Vars <- grep("WP", data.info$Variable, value = T)
ExpPts.Vars <- grep("ExpPts", data.info$Variable, value = T)
trn_data <- trn_data[ , !(names(trn_data) %in% WP.Vars) & !(names(trn_data) %in% ExpPts.Vars)]
val_data <- val_data[ , !(names(val_data) %in% WP.Vars) & !(names(val_data) %in% ExpPts.Vars)]
# Ok now that we have slightly trimmed down our data set, lets start searching for predictors of points added
# Lets make a correlation table. Do not care about covariance since the scaling is often way off for that
# Subset to only numeric useful fields
numeric.vars <- sapply(trn_data, is.numeric)
Not.Necessary.Vars <- c("Play.ID", "Points.Scored", "PlayAttempted")
trn_numeric <- trn_data[ , numeric.vars & !(names(trn_data) %in% Not.Necessary.Vars)]
pearson.table <- data.frame(cor(trn_numeric, method = "pearson", use ='na.or.complete'))
spearman.table <- data.frame(cor(trn_numeric, method = "spearman", use ='na.or.complete'))
filename <- paste(path_out, "Pearson_Correlation_Matrix", gsub("-", "_", Sys.Date()), ".rdata", sep="")
save(pearson.table, file = filename)
gsub(".rdata", ".csv", filename)
write.csv(pearson.table, file = filename)
filename <- paste(path_out, "Spearman_Correlation_Matrix", gsub("-", "_", Sys.Date()), ".rdata", sep="")
save(spearman.table, file = filename)
gsub(".rdata", ".csv", filename)
write.csv(pearson.table, file = filename)
pdf.filename <- paste(path_out, "Pearson_Correlation_Matrix", gsub("-", "_", Sys.Date()), ".pdf", sep="")
pdf(file = pdf.filename)
corrplot(cor(trn_numeric, method = "pearson", use ='na.or.complete'), method = "circle", na.label = "NA", na.label.col = "purple")
dev.off()
pdf.filename <- paste(path_out, "Spearman_Correlation_Matrix", gsub("-", "_", Sys.Date()), ".pdf", sep="")
pdf(file = pdf.filename)
corrplot(cor(trn_numeric, method = "spearman", use ='na.or.complete'), method = "circle", na.label = "NA", na.label.col = "purple")
dev.off()
# Check key character fields and factors for average points added by level
trn_non_numeric <- trn_data[ , (!(names(trn_data) %in% names(trn_numeric)) & !(names(trn_data) %in% Not.Necessary.Vars)) | names(trn_data) %in% "Points.Added"]
names(trn_non_numeric)
Not.Neccessary.Vars2 <- c("GameID", "Date", "Tackler1", "Tackler2", "Partition", "BlockingPlayer", "desc")
trn_non_numeric <- trn_non_numeric[ , !(names(trn_non_numeric) %in% Not.Neccessary.Vars2)]
names(trn_non_numeric)
# Ok lets look at some of our time related variables:
# qtr and Half are 2 obvious ones
trn_non_numeric <- data.table(trn_non_numeric)
qtr.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "qtr"]
Half.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "Half"]
title <- "Points Scored by Quarter"
qtr.plot <- ggplot(data = qtr.data, aes(x = qtr, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Quarter")
qtr.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = qtr.plot)
title <- "Points Scored by Half"
Half.plot <- ggplot(data = Half.data, aes(x = Half, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Half")
Half.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = Half.plot)
# Ok now lets check some down, field position, and distance variables
# down, SideofField
down.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "down"]
SideofField.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "SideofField"]
title <- "Points Scored by Down"
down.plot <- ggplot(data = down.data, aes(x = down, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Down")
down.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = down.plot)
title <- "Points Scored by Side of Field"
SideofField.plot <- ggplot(data = SideofField.data, aes(x = SideofField, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("SideofField")
SideofField.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = SideofField.plot)
# Ok lets take a quick peak at some play attributes
# PlayType, PassLocation, RunLocation, RunGap, PenaltyType
PlayType.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "PlayType"]
PassLocation.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "PassLocation"]
title <- "Points Scored by Play Type"
PlayType.plot <- ggplot(data = PlayType.data, aes(x = PlayType, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Play Type")
PlayType.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = PlayType.plot)
title <- "Points Scored by Pass Location"
PassLocation.plot <- ggplot(data = PassLocation.data, aes(x = PassLocation, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("PassLocation")
PassLocation.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = PassLocation.plot)
RunLocation.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "RunLocation"]
RunGap.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "RunGap"]
title <- "Points Scored by Run Location"
RunLocation.plot <- ggplot(data = RunLocation.data, aes(x = RunLocation, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Run Location")
RunLocation.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = RunLocation.plot)
title <- "Points Scored by Run Gap"
RunGap.plot <- ggplot(data = RunGap.data, aes(x = RunGap, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("Run Gap")
RunGap.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = RunGap.plot)
PenaltyType.data <- trn_non_numeric[ , list(avg.points.added = mean(Points.Added)), by = "PenaltyType"]
title <- "Points Scored by Penalty Type"
PenaltyType.plot <- ggplot(data = PenaltyType.data, aes(x = PenaltyType, y = avg.points.added)) + ggtitle(title) + geom_bar(stat = "identity") +
coord_flip() + ylab("Average Points Added") + xlab("PenaltyType")
PenaltyType.plot
filename <- paste(path_out, gsub(" ", "_", title), "_", gsub("-", "_", Sys.Date()), ".png", sep = "")
ggsave(filename = filename, plot = PenaltyType.plot)
########################### CLEAN DATA FOR POTENTIAL RANDOM FOREST EVALUATION #############################
# Ok this was pretty helpful. At least now we know that on average these fields make a difference
# We also learned there is still some data cleaning to be done
# Lets clean up a few fields, then maybe we can try a random forest or CHAID tree to help us with some variable selection
# First data cleaning item - fix all of the NAs for things like extra points, field goals, ect with binary indicator variables
sum(is.na(trn_data$ExPointResult))
sum(is.na(trn_data$TwoPointConv))
sum(is.na(trn_data$DefTwoPoint))
trn_data <- trn_data %>% mutate(PAT_Attempt = ifelse(!(is.na(ExPointResult)) | !(is.na(TwoPointConv)) | !(is.na(DefTwoPoint)),
1, 0))
sum(is.na(trn_data$PAT_Attempt))
trn_data <- data.table(trn_data)
trn_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("ExPointResult")]
trn_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("TwoPointConv")]
trn_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("DefTwoPoint")]
trn_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("DefTwoPoint", "TwoPointConv", "ExPointResult")]
# Ok looks like our PAT_Attempt flag is constructed properly
# Lets fill in some binary indicators now
trn_data <- trn_data %>% mutate(ExPointMade = ifelse(ExPointResult == "Made" & !(is.na(ExPointResult)), 1, 0))
trn_data <- trn_data %>% mutate(TwoPointMade = ifelse(TwoPointConv == "Success" & !(is.na(TwoPointConv)), 1, 0))
trn_data <- trn_data %>% mutate(DefTwoPointMade = ifelse(DefTwoPoint == "Success" & !(is.na(DefTwoPoint)), 1, 0))
trn_data[ , list(SUM_ExPointMade = sum(ExPointMade)), by = c("ExPointResult")]
trn_data[ , list(SUM_TwoPointMade = sum(TwoPointMade)), by = c("TwoPointConv")]
trn_data[ , list(SUM_DefTwoPointMade = sum(DefTwoPointMade)), by = c("DefTwoPoint")]
# Those flags look like they are properly constructed
# Lets Fix Play attempted
sum(is.na(trn_data$PlayAttempted))
# Nevermind it doesn't need to be fixed
# Lets check rush attempt and pass attempt
sum(is.na(trn_data$RushAttempt))
sum(is.na(trn_data$PassAttempt))
# Those look good as well
# Lets check our challenge related fields
sum(is.na(trn_data$Challenge.Replay))
sum(is.na(trn_data$ChalReplayResult))
trn_data[ , list(Count = sum(PlayAttempted)), by = c("ChalReplayResult")]
trn_data <- trn_data %>% mutate(Challenge.Occurred = ifelse(!(is.na(ChalReplayResult)), 1, 0),
Play.Overturned = ifelse(ChalReplayResult == 'Reversed' & !(is.na(ChalReplayResult)), 1, 0),
Play.Upheld = ifelse(ChalReplayResult == 'Upheld' & !(is.na(ChalReplayResult)), 1, 0))
# Those should help us figure out challenge related items
# Lets check out our fields from the kicking game and see if we can spruce some of those up with indicator variables as well
sum(is.na(trn_data$Onsidekick))
sum(is.na(trn_data$PuntResult))
sum(is.na(trn_data$FieldGoalResult))
sum(is.na(trn_data$FieldGoalDistance))
# Ok onside kick is fine, but the other 3 need to be taken care of before we can do exploratory variable searching since
# kicking plays may have a unique effect on points added
trn_data <- trn_data %>% mutate(PuntBlocked = ifelse(PuntResult == 'Blocked' & !(is.na(PuntResult)), 1, 0),
FieldGoalMade = ifelse(FieldGoalResult == 'Good' & !(is.na(FieldGoalResult)), 1, 0),
FieldGoalBlocked = ifelse(FieldGoalResult == 'Blocked' & !(is.na(FieldGoalResult)), 1, 0),
KickBlocked = ifelse((PuntResult == 'Blocked' | FieldGoalResult == 'Blocked')
& !(is.na(PuntResult)) & !(is.na(FieldGoalResult)), 1, 0))
trn_data$FieldGoalDistance <- as.numeric(trn_data$FieldGoalDistance)
trn_data$FieldGoalDistance[is.na(trn_data$FieldGoalDistance)] <- 0
sum(is.na(trn_data$FieldGoalDistance))
# Ok those look better now
# Lets take a look at making some return game variables to clean up missing values there as well
sum(is.na(trn_data$ReturnResult))
trn_data <- trn_data %>% mutate(ReturnTD = ifelse(ReturnResult == 'Touchdown' & !(is.na(ReturnResult)), 1, 0),
FairCatch = ifelse(ReturnResult == 'Fair Catch' & !(is.na(ReturnResult)), 1, 0),
ReturnTouchback = ifelse(ReturnResult == "Touchback" & !(is.na(ReturnResult)), 1, 0))
# Ok now lets check some passing game related fields like completions, deep pass, and short pass to make sure
# we clean up any potential missing values there as well
sum(is.na(trn_data$PassOutcome))
sum(is.na(trn_data$PassLength))
trn_data <- trn_data %>% mutate(CompletedPass = ifelse(PassOutcome == "Complete" & !(is.na(PassOutcome)), 1, 0),
DeepPass = ifelse(PassLength == "Deep" & !(is.na(PassLength)), 1, 0))
############################### Add these new fields to validation data as well ############################################
# Ok this was pretty helpful. At least now we know that on average these fields make a difference
# We also learned there is still some data cleaning to be done
# Lets clean up a few fields, then maybe we can try a random forest or CHAID tree to help us with some variable selection
# First data cleaning item - fix all of the NAs for things like extra points, field goals, ect with binary indicator variables
sum(is.na(val_data$ExPointResult))
sum(is.na(val_data$TwoPointConv))
sum(is.na(val_data$DefTwoPoint))
val_data <- val_data %>% mutate(PAT_Attempt = ifelse(!(is.na(ExPointResult)) | !(is.na(TwoPointConv)) | !(is.na(DefTwoPoint)),
1, 0))
sum(is.na(val_data$PAT_Attempt))
val_data <- data.table(val_data)
val_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("ExPointResult")]
val_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("TwoPointConv")]
val_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("DefTwoPoint")]
val_data[ , list(SUM_PAT_ATTEMPT = sum(PAT_Attempt)), by = c("DefTwoPoint", "TwoPointConv", "ExPointResult")]
# Ok looks like our PAT_Attempt flag is constructed properly
# Lets fill in some binary indicators now
val_data <- val_data %>% mutate(ExPointMade = ifelse(ExPointResult == "Made" & !(is.na(ExPointResult)), 1, 0))
val_data <- val_data %>% mutate(TwoPointMade = ifelse(TwoPointConv == "Success" & !(is.na(TwoPointConv)), 1, 0))
val_data <- val_data %>% mutate(DefTwoPointMade = ifelse(DefTwoPoint == "Success" & !(is.na(DefTwoPoint)), 1, 0))
val_data[ , list(SUM_ExPointMade = sum(ExPointMade)), by = c("ExPointResult")]
val_data[ , list(SUM_TwoPointMade = sum(TwoPointMade)), by = c("TwoPointConv")]
val_data[ , list(SUM_DefTwoPointMade = sum(DefTwoPointMade)), by = c("DefTwoPoint")]
# Those flags look like they are properly constructed
# Lets Fix Play attempted
sum(is.na(val_data$PlayAttempted))
# Nevermind it doesn't need to be fixed
# Lets check rush attempt and pass attempt
sum(is.na(val_data$RushAttempt))
sum(is.na(val_data$PassAttempt))
# Those look good as well
# Lets check our challenge related fields
sum(is.na(val_data$Challenge.Replay))
sum(is.na(val_data$ChalReplayResult))
val_data[ , list(Count = sum(PlayAttempted)), by = c("ChalReplayResult")]
val_data <- val_data %>% mutate(Challenge.Occurred = ifelse(!(is.na(ChalReplayResult)), 1, 0),
Play.Overturned = ifelse(ChalReplayResult == 'Reversed' & !(is.na(ChalReplayResult)), 1, 0),
Play.Upheld = ifelse(ChalReplayResult == 'Upheld' & !(is.na(ChalReplayResult)), 1, 0))
# Those should help us figure out challenge related items
# Lets check out our fields from the kicking game and see if we can spruce some of those up with indicator variables as well
sum(is.na(val_data$Onsidekick))
sum(is.na(val_data$PuntResult))
sum(is.na(val_data$FieldGoalResult))
sum(is.na(val_data$FieldGoalDistance))
# Ok onside kick is fine, but the other 3 need to be taken care of before we can do exploratory variable searching since
# kicking plays may have a unique effect on points added
val_data <- val_data %>% mutate(PuntBlocked = ifelse(PuntResult == 'Blocked' & !(is.na(PuntResult)), 1, 0),
FieldGoalMade = ifelse(FieldGoalResult == 'Good' & !(is.na(FieldGoalResult)), 1, 0),
FieldGoalBlocked = ifelse(FieldGoalResult == 'Blocked' & !(is.na(FieldGoalResult)), 1, 0),
KickBlocked = ifelse(PuntResult == 'Blocked' | FieldGoalResult == 'Blocked'
& !(is.na(FieldGoalResult)) & !(is.na(PuntResult)), 1, 0))
val_data$FieldGoalDistance <- as.numeric(val_data$FieldGoalDistance)
val_data$FieldGoalDistance[is.na(val_data$FieldGoalDistance)] <- 0
sum(is.na(val_data$FieldGoalDistance))
# Ok those look better now
# Lets take a look at making some return game variables to clean up missing values there as well
sum(is.na(val_data$ReturnResult))
val_data <- val_data %>% mutate(ReturnTD = ifelse(ReturnResult == 'Touchdown' & !(is.na(ReturnResult)), 1, 0),
FairCatch = ifelse(ReturnResult == 'Fair Catch' & !(is.na(ReturnResult)), 1, 0),
ReturnTouchback = ifelse(ReturnResult == "Touchback" & !(is.na(ReturnResult)), 1, 0))
# Ok now lets check some passing game related fields like completions, deep pass, and short pass to make sure
# we clean up any potential missing values there as well
sum(is.na(val_data$PassOutcome))
sum(is.na(val_data$PassLength))
val_data <- val_data %>% mutate(CompletedPass = ifelse(PassOutcome == "Complete" & !(is.na(PassOutcome)), 1, 0),
DeepPass = ifelse(PassLength == "Deep" & !(is.na(PassLength)), 1, 0))
# Ok all the indicators we wanted are set up. Let's do some work on cleaning up the factor data
names(Filter(is.factor, trn_data))
# Ok none of those should be missing, but lets double check
sum(is.na(trn_data$qtr))
sum(is.na(trn_data$down))
sum(is.na(trn_data$Half))
# Fix Down
trn_data$down <- as.character(trn_data$down)
trn_data$down[is.na(trn_data$down)] <- "No Down"
sum(is.na(trn_data$down))
# Lets check all of our character fields. Some of these we made indicators for, but we should fill the NAs in
# with something for use in random forests
names(Filter(is.character, trn_data))
# alright lets double check for some obvious ones we know shouldn't have an NAs
sum(is.na(trn_data$GameID))
sum(is.na(trn_data$time))
sum(is.na(trn_data$SideofField))
sum(is.na(trn_data$posteam))
sum(is.na(trn_data$DefensiveTeam))
sum(is.na(trn_data$desc))
sum(is.na(trn_data$HomeTeam))
sum(is.na(trn_data$AwayTeam))
sum(is.na(trn_data$Scoring.Team))
sum(is.na(trn_data$Partition))
# Turns out defensive team and side of field need some light work
trn_data$SideofField[is.na(trn_data$SideofField)] <- "Missing"
trn_data$DefensiveTeam[is.na(trn_data$DefensiveTeam)] <- "Missing"
# Some of these we don't need for expected points modeling - get rid of them
trn_data$Passer <- NULL
trn_data$Interceptor <- NULL
trn_data$Rusher <- NULL
trn_data$Receiver <- NULL
trn_data$Returner <- NULL
trn_data$BlockingPlayer <- NULL
trn_data$Tackler1 <- NULL
trn_data$Tackler2 <- NULL
trn_data$RecFumbPlayer <- NULL
trn_data$PenalizedPlayer <- NULL
# For the rest of these - fill in with applicable values
# Check missing first
sum(is.na(trn_data$ExPointResult))
sum(is.na(trn_data$TwoPointConv))
sum(is.na(trn_data$DefTwoPoint))
sum(is.na(trn_data$PuntResult))
sum(is.na(trn_data$PlayType))
sum(is.na(trn_data$PassOutcome))
sum(is.na(trn_data$PassLength))
sum(is.na(trn_data$PassLocation))
sum(is.na(trn_data$RunLocation))
sum(is.na(trn_data$RunGap))
sum(is.na(trn_data$ReturnResult))
sum(is.na(trn_data$FieldGoalResult))
sum(is.na(trn_data$RecFumbTeam))
sum(is.na(trn_data$ChalReplayResult))
sum(is.na(trn_data$PenalizedTeam))
sum(is.na(trn_data$PenaltyType))
# Besides PlayType, all of these have missing values to fill in
trn_data$ExPointResult[is.na(trn_data$ExPointResult)] <- "No Attempt"
table(trn_data$ExPointResult)
trn_data$TwoPointConv[is.na(trn_data$TwoPointConv)] <- "No Attempt"
table(trn_data$TwoPointConv)
trn_data$DefTwoPoint[is.na(trn_data$DefTwoPoint)] <- "No Attempt"
table(trn_data$DefTwoPoint)
trn_data$PuntResult[is.na(trn_data$PuntResult)] <- "No Punt"
table(trn_data$PuntResult)
trn_data$PassOutcome[is.na(trn_data$PassOutcome)] <- "No Pass"
table(trn_data$PassOutcome)
trn_data$PassLength[is.na(trn_data$PassLength)] <- "No Pass"
table(trn_data$PassLength)
trn_data$PassLocation[is.na(trn_data$PassLocation)] <- "No Pass"
table(trn_data$PassLocation)
trn_data$RunLocation[is.na(trn_data$RunLocation)] <- "No Rush"
table(trn_data$RunLocation)
trn_data$RunGap[is.na(trn_data$RunGap)] <- "No Rush"
table(trn_data$RunGap)
trn_data$ReturnResult[is.na(trn_data$ReturnResult)] <- "No Return"
table(trn_data$ReturnResult)
trn_data$FieldGoalResult[is.na(trn_data$FieldGoalResult)] <- "No FG"
table(trn_data$FieldGoalResult)
trn_data$RecFumbTeam[is.na(trn_data$RecFumbTeam)] <- "No Fumble"
table(trn_data$RecFumbTeam)
trn_data$ChalReplayResult[is.na(trn_data$ChalReplayResult)] <- "No Challenge"
table(trn_data$ChalReplayResult)
trn_data$PenalizedTeam[is.na(trn_data$PenalizedTeam)] <- "No Penalty"
table(trn_data$PenalizedTeam)
trn_data$PenaltyType[is.na(trn_data$PenaltyType)] <- "No Penalty"
table(trn_data$PenaltyType)
# All good, but penalty type is a bit long. Lets get rid of it since it isn't really suitable for modeling
# If we need it later, we can bring it back from the master data file
trn_data$PenaltyType <- NULL
# We should also check the numeric fields for NAs. Most of these should be fine, but lets double check
names(Filter(is.numeric, trn_data))
# Ok first lets check the ones we know are fine
sum(is.na(trn_data$Drive))
sum(is.na(trn_data$TimeUnder))
sum(is.na(trn_data$TimeSecs))
sum(is.na(trn_data$PlayTimeDiff))
sum(is.na(trn_data$yrdln))
sum(is.na(trn_data$yrdline100))
sum(is.na(trn_data$ydstogo))
sum(is.na(trn_data$ydsnet))
sum(is.na(trn_data$GoalToGo))
sum(is.na(trn_data$FirstDown))
sum(is.na(trn_data$PlayAttempted))
sum(is.na(trn_data$Yards.Gained))
sum(is.na(trn_data$sp))
sum(is.na(trn_data$Touchdown))
sum(is.na(trn_data$Safety))
sum(is.na(trn_data$Onsidekick))
sum(is.na(trn_data$PassAttempt))
sum(is.na(trn_data$InterceptionThrown))
sum(is.na(trn_data$RushAttempt))
sum(is.na(trn_data$Reception))
sum(is.na(trn_data$Fumble))
sum(is.na(trn_data$Sack))
sum(is.na(trn_data$Challenge.Replay))
sum(is.na(trn_data$Accepted.Penalty))
sum(is.na(trn_data$Penalty.Yards))
sum(is.na(trn_data$PosTeamScore))
sum(is.na(trn_data$DefTeamScore))
sum(is.na(trn_data$ScoreDiff))
sum(is.na(trn_data$AbsScoreDiff))
sum(is.na(trn_data$Accepted.Penalty))
sum(is.na(trn_data$Play.ID))
sum(is.na(trn_data$Points.Added))
sum(is.na(trn_data$Season))
# Ok we have to fix TimeSecs, PlayTimeDiff, yrdln, yrdline100, GoalToGo,
# FirstDown, PlayAttempted, PosTeamScore, DefTeamScore,ScoreDiff, AbsScoreDiff
# Lets get the easy ones we can set = 0 first
trn_data$PlayTimeDiff[is.na(trn_data$PlayTimeDiff)] <- 0
trn_data$PlayAttempted[is.na(trn_data$PlayAttempted)] <- 0
trn_data$FirstDown[is.na(trn_data$FirstDown)] <- 0
trn_data$GoalToGo[is.na(trn_data$GoalToGo)] <- 0
# OK for these just take the value from the last completed play
trn_data <- trn_data %>% group_by(GameID) %>% mutate(TimeSecs = na.locf(TimeSecs, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$TimeSecs))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(yrdln = na.locf(yrdln, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$yrdln))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(yrdline100 = na.locf(yrdline100, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$yrdline100))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(PosTeamScore = na.locf(PosTeamScore, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$PosTeamScore))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(DefTeamScore = na.locf(DefTeamScore, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$DefTeamScore))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(ScoreDiff = na.locf(ScoreDiff, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$ScoreDiff))
trn_data <- trn_data %>% group_by(GameID) %>% mutate(AbsScoreDiff = na.locf(AbsScoreDiff, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(trn_data$AbsScoreDiff))
# Ok it's all clean. Now lets apply it to the validation data set
# Ok all the indicators we wanted are set up. Let's do some work on cleaning up the factor data
names(Filter(is.factor, val_data))
# Ok none of those should be missing, but lets double check
sum(is.na(val_data$qtr))
sum(is.na(val_data$down))
sum(is.na(val_data$Half))
# Fix down for kicking situations and extra points
val_data$down <- as.character(val_data$down)
val_data$down[is.na(val_data$down)] <- "No Down"
sum(is.na(val_data$down))
# Lets check all of our character fields. Some of these we made indicators for, but we should fill the NAs in
# with something for use in random forests
names(Filter(is.character, val_data))
# alright lets double check for some obvious ones we know shouldn't have an NAs
sum(is.na(val_data$GameID))
sum(is.na(val_data$time))
sum(is.na(val_data$SideofField))
sum(is.na(val_data$posteam))
sum(is.na(val_data$DefensiveTeam))
sum(is.na(val_data$desc))
sum(is.na(val_data$HomeTeam))
sum(is.na(val_data$AwayTeam))
sum(is.na(val_data$Scoring.Team))
sum(is.na(val_data$Partition))
# Some of these we don't need for expected points modeling - get rid of them
val_data$Passer <- NULL
val_data$Interceptor <- NULL
val_data$Rusher <- NULL
val_data$Receiver <- NULL
val_data$Returner <- NULL
val_data$BlockingPlayer <- NULL
val_data$Tackler1 <- NULL
val_data$Tackler2 <- NULL
val_data$RecFumbPlayer <- NULL
val_data$PenalizedPlayer <- NULL
# For the rest of these - fill in with applicable values
# Check missing first
sum(is.na(val_data$ExPointResult))
sum(is.na(val_data$TwoPointConv))
sum(is.na(val_data$DefTwoPoint))
sum(is.na(val_data$PuntResult))
sum(is.na(val_data$PlayType))
sum(is.na(val_data$PassOutcome))
sum(is.na(val_data$PassLength))
sum(is.na(val_data$PassLocation))
sum(is.na(val_data$RunLocation))
sum(is.na(val_data$RunGap))
sum(is.na(val_data$ReturnResult))
sum(is.na(val_data$FieldGoalResult))
sum(is.na(val_data$RecFumbTeam))
sum(is.na(val_data$ChalReplayResult))
sum(is.na(val_data$PenalizedTeam))
sum(is.na(val_data$PenaltyType))
# Besides PlayType, all of these have missing values to fill in
val_data$ExPointResult[is.na(val_data$ExPointResult)] <- "No Attempt"
table(val_data$ExPointResult)
val_data$TwoPointConv[is.na(val_data$TwoPointConv)] <- "No Attempt"
table(val_data$TwoPointConv)
val_data$DefTwoPoint[is.na(val_data$DefTwoPoint)] <- "No Attempt"
table(val_data$DefTwoPoint)
val_data$PuntResult[is.na(val_data$PuntResult)] <- "No Punt"
table(val_data$PuntResult)
val_data$PassOutcome[is.na(val_data$PassOutcome)] <- "No Pass"
table(val_data$PassOutcome)
val_data$PassLength[is.na(val_data$PassLength)] <- "No Pass"
table(val_data$PassLength)
val_data$PassLocation[is.na(val_data$PassLocation)] <- "No Pass"
table(val_data$PassLocation)
val_data$RunLocation[is.na(val_data$RunLocation)] <- "No Rush"
table(val_data$RunLocation)
val_data$RunGap[is.na(val_data$RunGap)] <- "No Rush"
table(val_data$RunGap)
val_data$ReturnResult[is.na(val_data$ReturnResult)] <- "No Return"
table(val_data$ReturnResult)
val_data$FieldGoalResult[is.na(val_data$FieldGoalResult)] <- "No FG"
table(val_data$FieldGoalResult)
val_data$RecFumbTeam[is.na(val_data$RecFumbTeam)] <- "No Fumble"
table(val_data$RecFumbTeam)
val_data$ChalReplayResult[is.na(val_data$ChalReplayResult)] <- "No Challenge"
table(val_data$ChalReplayResult)
val_data$PenalizedTeam[is.na(val_data$PenalizedTeam)] <- "No Penalty"
table(val_data$PenalizedTeam)
val_data$PenaltyType[is.na(val_data$PenaltyType)] <- "No Penalty"
table(val_data$PenaltyType)
# All good, but penalty type is a bit long. Lets get rid of it since it isn't really suitable for modeling
# If we need it later, we can bring it back from the master data file
val_data$PenaltyType <- NULL
# We should also check the numeric fields for NAs. Most of these should be fine, but lets double check
names(Filter(is.numeric, val_data))
# Ok first lets check the ones we know are fine
sum(is.na(val_data$Drive))
sum(is.na(val_data$TimeUnder))
sum(is.na(val_data$TimeSecs))
sum(is.na(val_data$PlayTimeDiff))
sum(is.na(val_data$yrdln))
sum(is.na(val_data$yrdline100))
sum(is.na(val_data$ydstogo))
sum(is.na(val_data$ydsnet))
sum(is.na(val_data$GoalToGo))
sum(is.na(val_data$FirstDown))
sum(is.na(val_data$PlayAttempted))
sum(is.na(val_data$Yards.Gained))
sum(is.na(val_data$sp))
sum(is.na(val_data$Touchdown))
sum(is.na(val_data$Safety))
sum(is.na(val_data$Onsidekick))
sum(is.na(val_data$PassAttempt))
sum(is.na(val_data$InterceptionThrown))
sum(is.na(val_data$RushAttempt))
sum(is.na(val_data$Reception))
sum(is.na(val_data$Fumble))
sum(is.na(val_data$Sack))
sum(is.na(val_data$Challenge.Replay))
sum(is.na(val_data$Accepted.Penalty))
sum(is.na(val_data$Penalty.Yards))
sum(is.na(val_data$PosTeamScore))
sum(is.na(val_data$DefTeamScore))
sum(is.na(val_data$ScoreDiff))
sum(is.na(val_data$AbsScoreDiff))
sum(is.na(val_data$Accepted.Penalty))
sum(is.na(val_data$Play.ID))
sum(is.na(val_data$Points.Added))
sum(is.na(val_data$Season))
# Ok we have to fix TimeSecs, PlayTimeDiff, yrdln, yrdline100, GoalToGo,
# FirstDown, PlayAttempted, PosTeamScore, DefTeamScore,ScoreDiff, AbsScoreDiff
# Lets get the easy ones we can set = 0 first
val_data$PlayTimeDiff[is.na(val_data$PlayTimeDiff)] <- 0
val_data$PlayAttempted[is.na(val_data$PlayAttempted)] <- 0
val_data$FirstDown[is.na(val_data$FirstDown)] <- 0
val_data$GoalToGo[is.na(val_data$GoalToGo)] <- 0
# OK for these just take the value from the last completed play
val_data <- val_data %>% group_by(GameID) %>% mutate(TimeSecs = na.locf(TimeSecs, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$TimeSecs))
val_data <- val_data %>% group_by(GameID) %>% mutate(yrdln = na.locf(yrdln, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$yrdln))
val_data <- val_data %>% group_by(GameID) %>% mutate(yrdline100 = na.locf(yrdline100, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$yrdline100))
val_data <- val_data %>% group_by(GameID) %>% mutate(PosTeamScore = na.locf(PosTeamScore, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$PosTeamScore))
val_data <- val_data %>% group_by(GameID) %>% mutate(DefTeamScore = na.locf(DefTeamScore, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$DefTeamScore))
val_data <- val_data %>% group_by(GameID) %>% mutate(ScoreDiff = na.locf(ScoreDiff, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$ScoreDiff))
val_data <- val_data %>% group_by(GameID) %>% mutate(AbsScoreDiff = na.locf(AbsScoreDiff, na.rm = F, fromLast = F)) %>% ungroup
sum(is.na(val_data$AbsScoreDiff))
# Save data sets for future use
filename <- paste(path_data, "trn_data_post_deep_dive.rdata", sep="")
save(trn_data, file = filename)
filename <- gsub(".rdata", ".csv", filename)
write.csv(trn_data, file = filename)
filename <- paste(path_data, "val_data_post_deep_dive.rdata", sep="")
save(val_data, file = filename)
filename <- gsub(".rdata", ".csv", filename)
write.csv(val_data, file = filename)
# Close Log
sink()
|
838448ce4d3f6ccab21189588b44e82e32104499
|
a177b4eb653bbd30224bc02b61eccfe37f17b073
|
/r-for-data-science/Chapter16_Vectors/vectors.R
|
635cd0f71f98659eadc946988aec2f73a92c3993
|
[] |
no_license
|
cholzkorn/data-science
|
566e8a40080622c0810ddd5e2812736dabdf3e82
|
f7e2c2898132a63e30ea632604d1ec97724cfbd2
|
refs/heads/master
| 2021-06-08T23:32:41.494590
| 2020-01-03T20:25:09
| 2020-01-03T20:25:09
| 148,336,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,767
|
r
|
vectors.R
|
rm(list=ls())
# We will use functions from the purr package, which is included in the tidyverse
library(tidyverse)
# There are two types of vectors:
# Atmomic vectors: logical, integer, double, character, complex and raw
# integer and double are also called numeric vectors
# Lists, which are sometimes called recursive vectors because they may
# contain other lists
# Every vector has two key properties
# 1. Its type, which you can determine with typeof()
typeof(letters)
typeof(1:10)
# 2. Its length, which you can determine with length()
x <- list("a", "b", 1:10)
length(x)
# Vectors can also contain arbitrary additional metadata in the form
# of attributes. These attributes are used to create augmented vectors,
# which build on additional behavior. There are four important types
# of augmented vector:
# Factors are built on top of integer vectors.
# Dates and date-times are built on top of numeric vectors.
# Data frames and tibbles are built on top of lists.
# ---- IMPORTANT TYPES OF ATOMIC VECTOR ------------------------------------------- #
## LOGICAL
# Logical vectors take three types of values: TRUE, FALSE and NA
# They are usually constructed with comparison operators
1:10 %% 3 == 0
c(TRUE, FALSE, TRUE, TRUE, NA)
## NUMERIC
# Integer and double vectors are known collectively as numeric vec-
# tors. In R, numbers are doubles by default. To make an integer, place
# a L after the number:
typeof(1)
typeof(1L)
1.5L
# Doubles are approximations. Doubles represent floating-point
# numbers that cannot always be precisely represented with a
# fixed amount of memory. This means that you should consider
# all doubles to be approximations. For example, what is square of
# the square root of two?
x <- sqrt(2) ^ 2
x
x - 2
# Integers have one special value, NA, while doubles have four, NA,
# NaN, Inf, and -Inf. All three special values can arise during
# division:
c(-1, 0 , 1) / 0
# Avoid using == to check for these other special values. Instead
# use the helper functions is.finite(), is.infinite(), and
# is.nan():
## CHARACTER
# R uses a global string pool. This means
# that each unique string is only stored in memory once, and every
# use of the string points to that representation. This reduces the
# amount of memory needed by duplicated strings. You can see this
# behavior in practice with pryr::object_size()
x <- "This is a reasonably long string."
pryr::object_size(x)
y <- rep(x, 1000)
pryr::object_size(y)
## MISSING VALUES
# Note that each type of atomic vector has its own missing value:
NA # logical
NA_integer_ # integer
NA_real_ # double
NA_character_ # character
# ---- USING ATOMIC VECTORS -------------------------------------------------------- #
## COERCION
# There are two ways to convert, or coerce, one type of vector to
# another:
# 1. Explicitly: as.logical(), as.integer(), as.double(), as.character()
# 2. Implicit coercion happens when you use a vector in a specific
# context that expects a certain type of vector. For example, when
# you use a logical vector with a numeric summary function, or
# when you use a double vector where an integer vector is
# expected.
x <- sample(20, 100, replace = TRUE)
y <- x > 10
sum(y) # how many are greater than 10?
mean(y) # what proportion are greater than 10?
# You may see some code (typically older) that relies on implicit coer-
# cion in the opposite direction, from integer to logical:
if (length(x)) {
# do something
}
# In this case, 0 is converted to FALSE and everything else is converted
# to TRUE. I think this makes it harder to understand your code, and I
# don't recommend it. Instead be explicit: length(x) > 0.
# It's also important to understand what happens when you try and
# create a vector containing multiple types with c()-the most com-
# plex type always wins:
typeof(c(TRUE, 1L))
typeof(c(1L, 1.5))
typeof(c(1.5, "a"))
# An atomic vector cannot have a mix of different types because the
# type is a property of the complete vector, not the individual ele-
# ments. If you need to mix multiple types in the same vector, you
# should use a list, which you'll learn about shortly
# ---- TEST FUNCTIONS --------------------------------------------------------- #
# The base R functions like typeof() often return misleading results
# It's better to use the is_* functions of the purr package
# is_logical()
# is_integer()
# is_double()
# is_numeric()
# is_character()
# is_atomic()
# is_list()
# is_vector()
# Each predicate also comes with a "scalar" version, like
# is_scalar_atomic(), which checks that the length is 1. This is use-
# ful, for example, if you want to check that an argument to your func-
# tion is a single logical value.
# ---- SCALARS AND RECYCLING RULES --------------------------------------------- #
# As well as implicitly coercing the types of vectors to be compatible,
# will also implicitly coerce the length of vectors. This is called vec-
# tor recycling, because the shorter vector is repeated, or recycled, to
# the same length as the longer vector.
# This is generally most useful when you are mixing vectors and
# "scalars." I put scalars in quotes because R doesn't actually have
# scalars: instead, a single number is a vector of length 1. Because
# there are no scalars, most built-in functions are vectorized, meaning
# that they will operate on a vector of numbers. That's why, for exam-
# ple, this code works:
sample(10) + 100
runif(10) > 0.5
# Recycling:
1:10 + 1:2
1:10 + 1:3 # 10 is not a multiple of 3, so recycling doesn't work
# the vectorized functions in tidyverse will throw errors when you recycle
# anything other than a scalar
# If you do want to recycle, you'll need todo it yourself with rep()
tibble(x = 1:4, y = 1:2)
tibble(x = 1:4, rep(1:2, 2))
# ---- NAMING VECTORS ---------------------------------------------------------- #
# All types of vectors can be named. You can name them during cre-
# ation with c():
c(x = 1, y = 2, z = 4)
# ... or afterwards wir purrr::set_names()
purrr::set_names(1:3, c("a", "b", "c"))
# ---- SUBSETTING ---------------------------------------------------------------- #
# dplyr::filter only works with a tibble, so we need something else for vectors
# There are four types of things that you can subset a vector with:
# A numeric vector containing only integers. The integers must
# either be all positive, all negative, or zero.
# Subsetting with positive integers keeps the elements at those
# positions:
x <- c("one", "two", "three", "four", "five")
x[c(3, 2, 5)]
# By repeating a position, you can actually make a longer output than input:
x[c(1, 1, 5, 5, 5, 2)]
# Negative values drop the elements at the specified positions:
x[c(-1, -3, -5)]
# It's an error to mix positive and negative values:
x[c(-1, 1)]
# The error message mentions subsetting with zero, which returns no values:
x[0]
# Subsetting with a logical vector keeps all values corresponding
# to a TRUE value. This is most often useful in conjunction with
# the comparison functions:
x <- c(10, 3, NA, 5, 8, 1, NA)
# All non-missing values of x
x[!is.na(x)]
# All even (or missing!) values of x
x[x %% 2 == 0]
# If you have a named vector, you can subset it with a character vector:
x <- c(abc = 1, def = 2, xyz = 5)
x[c("abc", "xyz")]
# The simplest type of subsetting is nothing, x[], which returns
# the complete x. This is not useful for subsetting vectors, but it is
# useful when subsetting matrices (and other high-dimensional
# structures) because it lets you select all the rows or all the col-
# umns, by leaving that index blank.
# ---- RECURSIVE VECTORS (LISTS) ---------------------------------------------------- #
# Lists are a step up in complexity from atomic vectors, because lists
# can contain other lists. This makes them suitable for representing
# hierarchical or tree-like structures. You create a list with list():
x <- list(1, 2, 3)
x
# A very useful tool for working with lists is str() because it focuses
# on the structure, not the contents:
str(x)
x_named <- list(a = 1, b = 2, c = 3)
str(x_named)
# Unlike atomic vectors, lists() can contain a mix of objects:
y <- list("a", 1L, 1.5, TRUE)
str(y)
# Lists can even contain other lists!
z <- list(list(1, 2), list(3, 4))
str(z)
# ---- SUBSETTING LISTS ------------------------------------------------------------ #
# There are three ways to subset a list, which I'll illustrate with a:
a <- list(a = 1:3, b = "a string", c = pi, d = list(-1, -5))
a
# [ extracts a sublist. The result will always be a list:
str(a[1:2])
# [[ extracts a single component from a list. It removes a level of
# hierarchy from the list:
str(y[[1]])
str(y[[4]])
# $ is a shorthand for extracting named elements of a list. It works
# similarly to [[ except that you don't need to use quotes:
a$a
a[["a"]]
# The distinction between [ and [[ is really important for lists,
# because [[ drills down into the list while [ returns a new, smaller list.
# ---- LIST ATTRIBUTES --------------------------------------------------------------- #
# Any vector can contain arbitrary additional metadata through its
# attributes. You can think of attributes as a named list of vectors that
# can be attached to any object. You can get and set individual
# attribute values with attr() or see them all at once with attributes():
x <- 1:10
attr(x, "greeting")
attr(x, "greeting") <- "Hi!"
attr(x, "farewell") <- "Bye!"
attributes(x)
x
# There are three very important attributes that are used to implement
# fundamental parts of R:
# Names are used to name the elements of a vector
# Dimensions (dims, for short) make a vector behave like a matrix or array
# Class is used to implement the S3 object-oriented system.
# You've seen names earlier, and we won't cover dimensions because
# we don't use matrices in this book. It remains to describe the class,
# which controls how generic functions work. Generic functions are
# key to object-oriented programming in R, because they make func-
# tions behave differently for different classes of input. A detailed dis-
# cussion of object-oriented programming is beyond the scope of this
# book, but you can read more about it in Advanced R.
# Here's what a typical generic function looks like:
as.Date
# The call to "UseMethod" means that this is a generic function, and it
# will call a specific method, a function, based on the class of the first
# argument. (All methods are functions; not all functions are meth-
# ods.) You can list all the methods for a generic with methods():
methods("as.Date")
# For example, if x is a character vector, as.Date() will call
# as.Date.character(); if it's a factor, it'll call as.Date.factor().
# You can see the specific implementation of a method with
# getS3method():
getS3method("as.Date", "default")
getS3method("as.Date", "numeric")
# The most important S3 generic is print(): it controls how the
# object is printed when you type its name at the console. Other
# important generics are the subsetting functions [, [[, and $.
# ---- AUGMENTED VECTORS ---------------------------------------------------- #
# factors and dates are called augmented vectors, because they have additional
# attributes, including class. Because augmented vectors have a class, they behave
# differently to the atomic vector on which they are built. In this book, we make use
# of four important augmented vectors:
## FACTORS
# Factors are designed to represent categorical data that can take a
# fixed set of possible values. Factors are built on top of integers, and
# have a levels attribute:
x <- factor(c("ab", "cd", "ab"), levels = c("ab", "cd", "ef"))
typeof(x)
attributes(x)
## DATES AND DATE-TIMES
# Dates in R are numeric vectors that represent the number of days since
# 1 January 1970
x <- as.Date("1971-01-01")
unclass(x)
typeof(x)
attributes(x)
# Date-times are numeric vectors with class POSIXct that represent
# the number of seconds since 1 January 1970. (In case you were won-
# dering, "POSIXct" stands for "Portable Operating System Interface,"
# calendar time.)
x <- lubridate::ymd_hm("1970-01-01 01:00")
unclass(x)
# The tzone attribute is optional. It controls how the time is printed,
# not what absolute time it refers to:
attr(x, "tzone") <- "US/Pacific"
x
attr(x, "tzone") <- "US/Eastern"
x
# There is another type of date-times called POSIXlt. These are built
# on top of named lists:
y <- as.POSIXlt(x)
typeof(y)
attributes(y)
# POSIXlts are rare inside the tidyverse. They do crop up in base R,
# because they are needed to extract specific components of a date,
# like the year or month. Since lubridate provides helpers for you to
# do this instead, you don't need them. POSIXct's are always easier to
# work with, so if you find you have a POSIXlt, you should always
# convert it to a regular date-time with lubridate::as_date_time().
# ---- TIBBLES ----------------------------------------------------------- #
# Tibbles are augmented lists. They have three classes: tbl_df, tbl,
# and data.frame. They have two attributes: (column) names and
# row.names.
tb <- tibble::tibble(x = 1:5, y = 5:1)
typeof(tb)
attributes(tb)
# Traditional data.frames have a very similar structure:
df <- data.frame(x = 1.5, y = 5:1)
typeof(df)
attributes(df)
# The main difference is the class. The class of tibble includes
# "data.frame," which means tibbles inherit the regular data frame
# behavior by default.
# The difference between a tibble or a data frame and a list is that all
# of the elements of a tibble or data frame must be vectors with the
# same length. All functions that work with tibbles enforce this con-
# straint.
|
17731a750bb19418062b395e2fe812da4160aa4c
|
72329732fb914e85559b59109bcdd7b609367051
|
/file_selector.R
|
f8ceda089a9be320e7ff52a18e242f6b9ab26768
|
[] |
no_license
|
Altanastor/Momoshop
|
7e94a22f0ea78b264399b1e66f5ecebd42046579
|
04f3cb519fee1bf69e42d9e67ac07c8f24f54b6e
|
refs/heads/master
| 2020-04-19T06:56:18.698362
| 2018-02-14T16:19:12
| 2018-02-14T16:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,180
|
r
|
file_selector.R
|
# Dependencies -----
library(tidyverse)
library(magick)
# shiny related
library(shiny)
library(shinydashboard)
library(shinyFiles)
library(DT)
# domestic functions
# remove path, keep filename
trim_path <- function(x){
x %>% strsplit("/") %>% sapply(function(.x) .x[length(.x)])
}
# remove extension, keep filename
trim_ext <- function(x){
x %>% gsub("\\.[[:alnum:]]{3}$", "", .)
}
# remove path and extension, keep filename
trim_both <- function(x){
x %>% trim_path() %>% trim_ext()
}
## ui.R ##
ui_sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Files", tabName = "files", icon = icon("file-photo-o")),
menuItem("Widgets", icon = icon("th"), tabName = "more")
)
)
ui_body <- dashboardBody(
tabItems(
# Files tab
tabItem(tabName = "files",
h2("Files"),
# Button to pick a folder
shinyDirButton('directory', 'Folder select', 'Select the folder containing images'),
# Current file
h3("Current file:"),
verbatimTextOutput("active_file"),
br(),
# Button to navigate files
actionButton("minus", icon("chevron-left")),
actionButton("plus", icon("chevron-right")),
# Display selected folder
textOutput("dir"),
DTOutput("lf")
),
# More tab
tabItem(tabName = "more",
h2("...")
)
)
)
# Put them together into a dashboardPage
ui <- dashboardPage(
dashboardHeader(title = "Folder select"),
ui_sidebar,
ui_body
)
server <- function(input, output, session) {
# Files ==================================================
# select a folder
volumes <- c(volumes="~")
shinyDirChoose(input, 'directory', roots=volumes, session=session)
# print selected folder
output$dir <- renderPrint({
if (length(input$directory)){
cat(paste("Working in", parseDirPath(volumes, input$directory)))
} else {
cat("None folder selected")
}
})
# reactive data.frame for images and associated .mom
lf <- reactive({
dir <- parseDirPath(volumes, input$directory)
# if dir has not been selected yet
if (length(dir)==0)
return(NULL)
# list all files in dir
lf <- list.files(dir, full.names=TRUE, rec=TRUE)
# images among them
lf_img <- grep("\\.(jpg)|(png)$", lf, value=TRUE)
# .mom among them
lf_mom <- grep("\\.mom$", lf, value=TRUE)
# prepare a df to store this
df <- data.frame(name=trim_both(lf_img),
path=lf_img,
mom=FALSE,
stringsAsFactors = FALSE)
# img with a .mom file receive a TRUE
img_with_mom <- match(trim_both(lf_mom), trim_both(lf_img))
df$mom[img_with_mom] <- TRUE
# handling edited files
# img_with_edited <- x[x %in% trim_suffix(x)]
# finally return the table
df
})
# output lf() with DT
output$lf <- renderDT({
if (is.null(lf()))
return(NULL)
# # prepare the color vector for active file
# active_color <- rep("white", nrow(lf()))
# active_color[active_file_id()] <- "red"
# now render the df
lf() %>%
datatable(filter = 'top',
options = list(autoWidth=TRUE, pageLength = 50)) %>%
formatStyle('path', 'mom',
backgroundColor = styleEqual(c(0, 1), c('gray90', 'greenyellow')))
}
)
# active image id ----------------------------------------
active_file_id <- reactiveVal(1)
observeEvent(input$minus, {
new_file <- active_file_id() - 1
# prevent below 1 values
if (new_file < 1)
new_file <- 1
active_file_id(new_file)
})
observeEvent(input$plus, {
new_file <- active_file_id() + 1
# prevent above nrow values
if (!is.null(lf()) && new_file > nrow(lf()))
new_file <- nrow(lf())
active_file_id(new_file)
})
# active image -------------------------------------------
active_file <- reactive({
if (is.null(lf()))
return(NULL)
df <- lf()
df$path[active_file_id()]
})
output$active_file <- renderText(
active_file()
)
# end Files ==============================================
}
shinyApp(ui, server)
|
f1c9af4f60ec72ebac2b3b2b062e13122aec8c5d
|
3e508d7cd0798f70f70242a837f99123397fc1c4
|
/tests/sim/20210919-sim/util.R
|
66f674b52e742dd87f0f3983f498a657d32ac623
|
[] |
no_license
|
jlivsey/Dvar
|
f0d142df9866a4232947d114891111097c4c5851
|
824cdcc822829d655b905bfeca54a40b82f16e36
|
refs/heads/master
| 2023-07-26T19:17:28.531305
| 2023-07-06T13:55:28
| 2023-07-06T13:55:28
| 237,249,747
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,182
|
r
|
util.R
|
printf = function(msg, ...)
{
cat(sprintf(msg, ...))
}
logger = function(msg, ...)
{
sys.time = as.character(Sys.time())
cat(sys.time, "-", sprintf(msg, ...))
}
print_vector = function(x)
{
sprintf("c(%s)", paste(x, collapse = ","))
}
#' Convert valid alpha array to probabilities under log-linear formulation
#'
#' @param A alpha array. Must be valid and provide identifiable log-linear
#' specification
#' @param J index List of variable interactions to include
#' @param I vector. Number of categories for each variable in sence
#'
#' @return an array of dimension I with probabilities
#' @export
#'
alpha2prob <- function(A, J, I){
Jcheck(J)
lenJ <- sapply(J, length)
P <- array(dim = I)
# all indicies of P array (by construction all values are NA)
Pidx <- which(is.na(P), arr.ind = TRUE)
for(i in 1:nrow(Pidx)){
x <- Pidx[i, ]
Px <- 0
for(k in 1:max(lenJ)) { # loop over different length of J
for(j in which(lenJ == k)){
Px <- Px + A[[j]][matrix(x[J[[j]]], nrow = 1)]
}
}
P[matrix(x, nrow = 1)] <- exp(Px) / (1 + exp(Px))
}
} # end alpha2prob()
# -----------------------------------------------------------------------------
#' Evaluate Array at Specified margin
#'
#' @param A array
#' @param mar list of vectors of specified margin
#'
#' @return Array compressed my mar
#' @export
#'
arrayEval <- function(A, mar){
A[mar[[1]], mar[[2]], mar[[3]], mar[[4]], mar[[5]], mar[[6]]]
}
# -----------------------------------------------------------------------------
#' Initalize array with zeros and ones
#'
#' @param arrayDim Dimension of output array
#' @param idx list of vectors specifying location of ones
#'
#' @return array of dimension arrayDim with ones at idx
#' @export
#'
arraySetOnes <- function(arrayDim, idx){
Z <- array(0, arrayDim)
Z[idx[[1]], idx[[2]], idx[[3]], idx[[4]], idx[[5]], idx[[6]]] <- 1
return(Z)
}
# -----------------------------------------------------------------------------
#' Convert zero entries of mar to full vector
#'
#' @param mar list of of margins
#' @param A full array used only for dimensionality alternatively pass mydim
#' @param mydim dimensionality of full array
#'
#' @return mar with zero replaced by full vector
#' @export
#'
marginZero2vec <- function(mar, mydim = NULL, A = NULL){
if(class(mar) != 'list'){
stop('mar needs to be a list')
}
if(!is.null(A)) mydim <- dim(A)
zeroBool <- lapply(mar, function(x){x == 0})
for(i in 1:length(zeroBool)){
if(zeroBool[[i]][1]) {
mar[[i]] <- 1:mydim[i]
}
}
return(mar)
}
# -----------------------------------------------------------------------------
#' Recode or condense a marginal of histogram
#'
#' @param A array to be recoded
#' @param margin margin that will be recoded
#' @param newCode list of vectors that specify the recode
#'
#' @return recoded array
#' @export
#'
recode <- function(A, margin, newCode){
d <- dim(A)
dl <- length(dim(A))
# dimension of recoded array
newd <- d
newd[margin] <- length(newCode)
# initialize empty mar object
mar <- vector(mode = 'list', length = dl)
for(i in 1:dl) mar[[i]] <- 1:d[i] # will change mar[[margin]] later
# loop over all newCodes
Aout <- array(NA, dim = newd)
for(i in 1:length(newCode)){
# create margin object for i^th newCode
mar[[margin]] <- newCode[[i]]
# WILL NEED TO BE CHANGED WHEN GENERALIZING TO MULTIPLE DIMENSIONS
# DON'T KNOW HOW TO HANDLE LHS OF THIS ASSIGNEMENT IN GENERAL.
Aout[,,,,,i] <- apply(arrayEval(A, mar), (1:dl)[-margin], sum)
}
return(Aout)
}
# function to calculate all subtouples
all_subtouples <- function(v){
A <- list()
idx <- 1
for(n in 1:(length(v)-1)){
C <- combn(v, n)
for(j in 1:ncol(C)){
A[[idx]] <- C[, j]
idx <- idx + 1
}
}
return(A)
}
# function to check J that every touple has all sub-touples included
# NULL string = valid
# any other string is the error message
check_valid <- function(J){
flag <- TRUE
# CHECK HERE FOR J INCREASING VECTOR SIZES
for(i in 1:length(J)){
current_J <- J[[i]]
# Check that i(th) element is vector
if(!is.numeric(current_J)){
flag <- paste0(i, "th element of J is not of class numeric")
return(flag)
}
# DONE if length is one
current_n <- length(current_J)
if(current_n == 1){
next
}
# check that i(th) element is sorted in increasing order
if(!all.equal(current_J, sort(current_J))){
flag <- paste0(i, "th element of J is not a increasing index order")
return(flag)
}
# All prior elements of J contain all sub-touples
A <- all_subtouples(current_J)
for(k in 1:length(A)){
if(!any(unlist(lapply(J[1:(i-1)], function(x) identical(x, A[[k]]))))){
flag <- paste0("subtouple does not exisit in list")
return(flag)
}
}
}
return(flag)
}
#' Check Geography associated with given margin
#'
#' @param mar list of length 6 with the 6th being Geo margin
#'
#' @return Geo level in ('state', 'county', 'tract')
#' @export
#'
geo_of_mar <- function(mar){
if(length(mar[[6]]) > 6){
return("state")
} else if(length(mar[[6]]) > 1 ){
return("county")
} else if(length(mar[[6]] == 1)){
return("tract")
} else{
stop("Geo of margin is not an expected length")
}
}
#' Convert geo character to epsilon modifier
#'
#' @param geoMod vector of modifier levels from largest to smallest in
#' geographic size
#' @param geo character valued geography
#'
#' @return entry of geoMod associated with geo
#' @export
#'
geoChar2geoMod <- function(geoMod, geo){
if(geo == "state"){
return(geoMod[1])
}else if(geo == "county"){
return(geoMod[2])
}else if(geo == "tract"){
return(geoMod[3])
}else{
stop("problem with geoChar2geoMod function - didn't enter a geo level")
}
}
#' Check query associated with a given margin
#'
#' @param mar list of length 6 with first 1-5 elements being margins
#' associated with hhgq, sex, cenrace, age, hisp in that order
#'
#' @return query from ("detail", "hhgq", "votingAge_hist_cenrace", "age_sex")
#' @export
#'
query_of_mar <- function(mar){
zerosMar <- lapply(mar, function(x){x[1]==0})
zerosMar <- unlist(zerosMar)
if(zerosMar[2] == 1 &&
zerosMar[3] == 1 &&
zerosMar[4] == 1 &&
zerosMar[5] == 1 ){
return("hhgq")
}else if(zerosMar[1] == 1 && zerosMar[2] == 1){
return("votingAge_hisp_cenrace")
}else if(zerosMar[1] == 1 && zerosMar[3] == 1 && zerosMar[5] == 1){
return("age_sex")
}else{
return("detail")
}
}
#' Convert query character to epsilon modifier
#'
#' @param queryMod vector of modifier levels of query
#' @param query character valued geography
#'
#' @return entry of geoMod associated with geo
#' @export
#'
queryChar2queryMod <- function(queryMod, query){
if(query == "detailed"){
return(queryMod[1])
}else if(query == "hhgq"){
return(queryMod[2])
}else if(query == "votingAge_hisp_cenrace"){
return(queryMod[3])
}else if(query == "age_sex"){
return(queryMod[4])
}else{
stop("problem with geoChar2geoMod function - didn't enter a geo level")
}
}
|
31ddf554ee4f91790de1ca46c9e4bf1a80442e7b
|
9b893f56477a12e7634cf2ed0cb6d16d981c9ed4
|
/notes/bglmer_runs.R
|
59d9880f728b519b8456489dd334ec62325e61a8
|
[] |
no_license
|
bbolker/mixedmodels-misc
|
32ac8b2d8bc86e841fdc102078daef63dcd86ec8
|
b75c0f548a7f02e10b2cea31d92dbc4dc6fa60d3
|
refs/heads/master
| 2023-06-26T11:39:35.474015
| 2023-04-07T21:31:22
| 2023-04-07T21:31:22
| 32,484,880
| 110
| 44
| null | 2022-09-05T15:57:32
| 2015-03-18T21:17:36
|
HTML
|
UTF-8
|
R
| false
| false
| 2,128
|
r
|
bglmer_runs.R
|
library(lme4)
library(blme)
library(glmmTMB)
library(brms)
library(MCMCglmm)
## @knitr setup_runs
form <- contrast~c.con.tr*c.type.tr*c.diff.tr+(1|id)+(1|item.new)
mydata <- expand.grid(c.con.tr=factor(1:3),
c.type.tr=factor(1:2),
c.diff.tr=factor(1:2),
id=factor(1:10),
item.new=factor(1:10))
## @knitr simulate_data
set.seed(101)
mydata$contrast <- simulate(form[-2],
newdata=mydata,
newparams=list(beta=rep(1,12),
theta=rep(1,2)),
family=binomial,
weights=rep(1,nrow(mydata)))[[1]]
## @knitr run_models
t.bglmer <- system.time(fit.bglmer <- bglmer(form,
data=mydata, family=binomial (link='logit'),
fixef.prior= normal(cov = diag(9,12))))
t.glmer <- system.time(fit.glmer <- glmer(form,
data=mydata, family=binomial (link='logit')))
t.glmmTMB <- system.time(fit.glmmTMB <- glmmTMB(form,
data=mydata, family=binomial (link='logit')))
prior <- get_prior(form,
data=mydata, family=bernoulli)
prior$prior[1] <- "normal(0,3)"
t.brms <- system.time(fit.brms <- brm(contrast~c.con.tr*c.type.tr*c.diff.tr+(1|id)+(1|item.new),
prior=prior,
chains=3,
data=mydata, family=bernoulli))
t.MCMCglmm <- system.time(fit.MCMCglmm <- MCMCglmm(contrast~c.con.tr*c.type.tr*c.diff.tr,
random=~id+item.new,
data=mydata,
prior=list(B=list(mu=rep(0,12),V=diag(9,12)),
R=list(V=1,nu=0),
G=list(list(V=1,nu=0),
list(V1=1,nu=0)))))
resList <- list(blme=fit.bglmer,lme4=fit.glmer,glmmTMB=fit.glmmTMB,
brms=fit.brms,MCMCglmm=fit.MCMCglmm)
attr(resList,"times") <-
list(t.bglmer,t.glmer,t.glmmTMB,t.brms,t.MCMCglmm)
saveRDS(resList,file="bglmer_runs.rds")
|
6f1c3b5b769b0fe7bab15ccea15a11ea24fdaad6
|
1e3df770a5a917c22ee0a53666cb5ef2c9083361
|
/R/confSim.R
|
3de390c9ab26904136502694afb6bee2b1a056cc
|
[] |
no_license
|
cran/visualizationTools
|
1c7d75ef5535381498ec154b1880e24ee090c8ff
|
7a58ca10d2e406eab889f3e36d1d8cb498801239
|
refs/heads/master
| 2020-05-18T18:04:21.695412
| 2011-08-01T00:00:00
| 2011-08-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,736
|
r
|
confSim.R
|
confSim <-
function(fun=mean,conf.level=0.95,mu=0,stdev=1,sleep=0.2,trials=100,n=8,N=20,xlim,sim=TRUE)
{
if(identical(fun,mean)==FALSE && identical(fun,sd)==FALSE)
stop("function is not yet supported! ")
if(missing(xlim))
{if(identical(fun,mean))
xlim=c(mu-7*stdev/sqrt(n),mu+10*stdev/sqrt(n))
if(identical(fun,sd))
xlim=c(0,1.75*stdev*sqrt(n-1))
}
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
val=numeric(trials);q1=numeric(trials);q2=numeric(trials);count=0;temp=0;
color=numeric(trials);lwd.vec=numeric(trials);lty.vec=numeric(trials)
for (i in 1:trials)
{
if(identical(fun,mean))
{
xlab=expression(bar(x))
val=t.test(rnorm(n,mu,stdev),conf.level=conf.level) #t.test
q1[i]=val$conf.int[[1]]
q2[i]=val$conf.int[[2]]
if (mu<q1[i]||mu>q2[i])
{
color[i]=2
lwd.vec[i]=2
lty.vec[i]=2
}
else
{
color[i]=1
lwd.vec[i]=1
lty.vec[i]=1
}
}
if(identical(fun,sd))
{
xlab=expression(sd)
val=sd(rnorm(n,mu,stdev))
q1[i]=val*sqrt((n-1)/qchisq((1-conf.level)/2,df=n-1))
q2[i]=val*sqrt((n-1)/qchisq(conf.level+(1-conf.level)/2,df=n-1))
if (stdev>q1[i]||stdev<q2[i])
{
color[i]=2
lwd.vec[i]=2
lty.vec[i]=2
}
else
{
color[i]=1
lwd.vec[i]=1
lty.vec[i]=1
}
}
}
if(sim==TRUE)
{
for(i in 1:trials)
{
if(i<=N) #Add first intervals
{
plot(mu,0,col="transparent",ylim=c(0,N),xlim=xlim,xlab=xlab, #Basic plot
ylab="Trial No.",main="Confidence Intervals")
axis(2,at=0:N,labels=NA,tcl=-0.25)
if(identical(fun,mean))
abline(v=mu)
if(identical(fun,sd))
abline(v=stdev)
for(j in 1:i)
{
if(conf.level==1)
abline(h=j)
lines(x=c(q1[j],q2[j]),y=c(j,j),col=color[j],lwd=lwd.vec[j],lty=lty.vec[j])
lines(x=c(q1[j],q1[j]),y=c(j+0.01*N,j-0.01*N),col=color[j],lwd=lwd.vec[j],lty=lty.vec[j])
lines(x=c(q2[j],q2[j]),y=c(j+0.01*N,j-0.01*N),col=color[j],lwd=lwd.vec[j],lty=lty.vec[j])
}
Sys.sleep(sleep)
if(identical(fun,mean))
{
if (mu<q1[i]||mu>q2[i])
count=count+1
}
if(identical(fun,sd))
{
if (stdev>q1[i]||stdev<q2[i])
count=count+1
}
}
if(i>N)
{
plot(mu,0,col="transparent",ylim=c(i-N,i),xlim=xlim,xlab=xlab, #Basic plot
ylab="Trial No.",main="Confidence Intervals")
axis(2,at=seq(i-N+1,i,length=N),labels=NA,tcl=-0.25)
if(identical(fun,mean))
abline(v=mu)
if(identical(fun,sd))
abline(v=stdev)
for(j in 0:N)
{
if(conf.level==1)
abline(h=i-j)
lines(x=c(q1[i-j],q2[i-j]),y=c(i-j,i-j),col=color[i-j],lwd=lwd.vec[i-j],lty=lty.vec[i-j]) #Add further intervals
lines(x=c(q1[i-j],q1[i-j]),y=c((i-j)+0.01*N,(i-j)-0.01*N),col=color[i-j],lwd=lwd.vec[i-j],lty=lty.vec[i-j])
lines(x=c(q2[i-j],q2[i-j]),y=c((i-j)+0.01*N,(i-j)-0.01*N),col=color[i-j],lwd=lwd.vec[i-j],lty=lty.vec[i-j])
}
Sys.sleep(sleep)
if(identical(fun,mean))
{
if (mu<q1[i]||mu>q2[i])
count=count+1
}
if(identical(fun,sd))
{
if (stdev>q1[i]||stdev<q2[i])
count=count+1
}
}
legend("topright",legend=c(paste("Trials:",trials),paste("Size:",n),paste("mean:",mu),
paste("sd:",stdev),paste("conf.level:",conf.level),paste("Out:",count)),inset=0.04,bg="white")
}
}
if(sim==FALSE)
{
plot(mu,0,col="transparent",ylim=c(0,trials),xlim=xlim,xlab=xlab, #Basic plot
ylab="Trial No.",main="Confidence Intervals of normal random numbers")
axis(2,at=0:trials,labels=NA,tcl=-0.25)
if(identical(fun,mean))
abline(v=mu)
if(identical(fun,sd))
abline(v=stdev)
for(i in 1:trials)
{
if(conf.level==1)
abline(h=i)
lines(x=c(q1[i],q2[i]),y=c(i,i),col=color[i],lwd=lwd.vec[i],lty=lty.vec[i])
lines(x=c(q1[i],q1[i]),y=c(i+0.005*trials,i-0.005*trials),col=color[i],lwd=lwd.vec[i],lty=lty.vec[i])
lines(x=c(q2[i],q2[i]),y=c(i+0.005*trials,i-0.005*trials),col=color[i],lwd=lwd.vec[i],lty=lty.vec[i])
if(identical(fun,mean))
{
if (mu<q1[i]||mu>q2[i])
count=count+1
}
if(identical(fun,sd))
{
if (stdev>q1[i]||stdev<q2[i])
count=count+1
}
}
legend("topright",legend=c(paste("Trials:",trials),paste("Size:",n),paste("mean:",mu),
paste("sd:",stdev),paste("conf.level:",conf.level),paste("Out:",count)),inset=0.04,bg="white")
}
invisible(list(q1,q2))
}
|
04ac5d38aaf3eef8cf75bad0c5ad1d19a9cbb3bd
|
c39efa69fd31f46c64ce9bda5b1f2b88229592d5
|
/autotable/man/offender_characteristics_relationship_to_victim.Rd
|
6576f0c54352d0c107833661a3751303fb9c0451
|
[] |
no_license
|
ONS-centre-for-crime-and-justice/nature-of-crime-r
|
e0126bb48df8fae64a5f7000a8df40b43af121fa
|
117cb5a56b34795421b8574395b0b1adf86a1c89
|
refs/heads/master
| 2022-12-04T18:50:38.816978
| 2020-09-01T13:23:17
| 2020-09-01T13:23:17
| 288,486,934
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 569
|
rd
|
offender_characteristics_relationship_to_victim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/offender_characteristics_relationship_to_victim.R
\name{offender_characteristics_relationship_to_victim}
\alias{offender_characteristics_relationship_to_victim}
\title{offender_characteristics_relationship_to_victim}
\usage{
offender_characteristics_relationship_to_victim(
config,
master_config,
subtable
)
}
\arguments{
\item{config}{To do.}
\item{master_config}{To do.}
\item{subtable}{To do.}
}
\value{
To do.
}
\description{
To do.
}
\details{
To do.
}
\examples{
To do.
}
|
1b488f6a5f4599d5f999817bb79f8e6b7383b5a0
|
fae9abe8a466ef7db9d124032b9b7254391ea68b
|
/plot1.R
|
9a9767d6146c30fa70e69828a3bd206b953a2358
|
[] |
no_license
|
sankaraju/ExData_Plotting1
|
58bc9adaa0a3262503a7b5d2ac016259d0da5b69
|
d25a238b969876a263f6720801f3468d976548f0
|
refs/heads/master
| 2020-12-27T10:32:52.998852
| 2015-01-12T01:16:15
| 2015-01-12T01:16:15
| 29,109,429
| 0
| 0
| null | 2015-01-11T23:04:08
| 2015-01-11T23:04:08
| null |
UTF-8
|
R
| false
| false
| 753
|
r
|
plot1.R
|
## Getting full dataset
power <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
## Subsetting the data
powerFiltered <- subset(power, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(power)
## Converting dates
#datetime <- paste(as.Date(powerFiltered$Date), data$Time)
#powerFiltered$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(powerFiltered$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
c1fb9bfeae3c4e116a80b5c7be00969b2886ff36
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Letombe/renHorn/renHorn_400CNF1800_2aQBF_24/renHorn_400CNF1800_2aQBF_24.R
|
273438a9d29132f8ecf69c33a6d0474160b53082
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77
|
r
|
renHorn_400CNF1800_2aQBF_24.R
|
9a71c1613431d45efa6d438254c146e8 renHorn_400CNF1800_2aQBF_24.qdimacs 400 1800
|
27f947a5e792350fec595f0a7edd91f446ed393c
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/get_location_code_mapping_GAUL.R
|
3bd74076546ea6986a127da00f44e7acded5c508
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,404
|
r
|
get_location_code_mapping_GAUL.R
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param remove_diacritics PARAM_DESCRIPTION
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @seealso
#' \code{\link[DBI]{dbDisconnect}}
#' @rdname get_location_code_mapping_GAUL
#' @export
#' @importFrom DBI dbDisconnect
get_location_code_mapping_GAUL <- function(remove_diacritics) {
conn <- get_shared_db_conn()
# Retrieve core fields and path_to_top_parent (used to calculate ihme_lc_id)
all_locations_sql <- paste(
"SELECT",
"location_id AS loc_id,",
"location_name AS loc_name,",
"location_name_short AS loc_nm_sh,",
"path_to_top_parent AS path_to_top_parent",
"FROM shared.location"
)
locs <- run_sql_query(conn, all_locations_sql)
# Get GAUL_CODE metadata
gaul_code_sql <- paste(
"SELECT",
"location_id AS loc_id,",
"location_metadata_value AS GAUL_CODE",
"FROM shared.location_metadata_history",
# id 26 is GAUL codes
"WHERE location_metadata_type_id = 26",
# 19 is 2017.g - the latest version of the metadata before
# the location_metadata table was blown away ~25 June
" AND location_metadata_version_id = 19"
)
gaul_codes <- run_sql_query(conn, gaul_code_sql)
gaul_codes["GAUL_CODE"] <- as.numeric(gaul_codes$GAUL_CODE)
# Get ISO3 names (part of ihme_lc_id)
iso_lookup_sql <- paste(
"SELECT",
"location_id AS loc_id,",
"location_metadata_value AS iso_name",
"FROM shared.location_metadata_history",
# id 1 is "short identifier"/ISO3 code
"WHERE location_metadata_type_id = 1",
" AND location_metadata_version_id = 19"
)
iso_lookup <- run_sql_query(conn, iso_lookup_sql)
data <- merge(locs, gaul_codes, by = "loc_id")
data["ihme_lc_id"] <- sapply(data$path_to_top_parent, function(path_str) {
get_ihme_lc_id(iso_lookup, path_str)
})
# remove path_to_top_parent
data <- subset(data, select = c(
"loc_id",
"loc_name",
"loc_nm_sh",
"ihme_lc_id",
"GAUL_CODE"
))
data <- data.table(data)
DBI::dbDisconnect(conn)
# Fix diacritics
if (remove_diacritics) {
data$loc_name <- fix_diacritics(data$loc_name)
data$loc_nm_sh <- fix_diacritics(data$loc_nm_sh)
}
# cast to data.table for usability/backwards compatibility
return(data)
}
|
07bd707689ea179c40cc755f6bca05441379865b
|
325d076c5fcdba87e8bad019a147b37eeb677e90
|
/man/train.Rd
|
65b2166dae2fc2863dabce27a6852b1d357f9e3f
|
[
"CC-BY-4.0"
] |
permissive
|
iiasa/ibis.iSDM
|
8491b587b6ccc849477febb4f164706b89c5fa3c
|
e910e26c3fdcc21c9e51476ad3ba8fffd672d95e
|
refs/heads/master
| 2023-08-26T12:38:35.848008
| 2023-08-19T21:21:27
| 2023-08-19T21:21:27
| 331,746,283
| 11
| 1
|
CC-BY-4.0
| 2023-08-22T15:09:37
| 2021-01-21T20:27:17
|
R
|
UTF-8
|
R
| false
| true
| 9,085
|
rd
|
train.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.R
\name{train}
\alias{train}
\alias{train,}
\alias{train-method}
\title{Train the model from a given engine}
\usage{
train(
x,
runname,
filter_predictors = "none",
optim_hyperparam = FALSE,
inference_only = FALSE,
only_linear = TRUE,
method_integration = "predictor",
aggregate_observations = TRUE,
clamp = FALSE,
verbose = getOption("ibis.setupmessages"),
...
)
\S4method{train}{BiodiversityDistribution,character,character,logical,logical,logical,character,logical,logical,logical}(x,runname,filter_predictors,optim_hyperparam,inference_only,only_linear,method_integration,aggregate_observations,clamp,verbose,...)
}
\arguments{
\item{x}{\code{\link[=distribution]{distribution()}} (i.e. \code{\linkS4class{BiodiversityDistribution}}) object).}
\item{runname}{A \code{\link{character}} name of the trained run.}
\item{filter_predictors}{A \code{\link{character}} defining if and how highly
correlated predictors are to be removed prior to any model estimation.
Available options are:
\itemize{
\item \code{"none"} No prior variable removal is performed (Default).
\item \code{"pearson"}, \code{"spearman"} or \code{"kendall"} Makes use of pairwise comparisons to identify and
remove highly collinear predictors (Pearson's \code{r >= 0.7}).
\item \code{"abess"} A-priori adaptive best subset selection of covariates via the \code{"abess"} package (see References).
Note that this effectively fits a separate generalized linear model to
reduce the number of covariates.
\item \code{"boruta"} Uses the \code{"Boruta"} package to identify non-informative features.
}}
\item{optim_hyperparam}{Parameter to tune the model by iterating over input
parameters or selection of predictors included in each iteration. Can be
set to \code{TRUE} if extra precision is needed (Default: \code{FALSE}).}
\item{inference_only}{By default the engine is used to create a spatial
prediction of the suitability surface, which can take time. If only
inferences of the strength of relationship between covariates and
observations are required, this parameter can be set to \code{TRUE} to
ignore any spatial projection (Default: \code{FALSE}).}
\item{only_linear}{Fit model only on linear baselearners and functions.
Depending on the engine setting this option to \code{FALSE} will result in
non-linear relationships between observations and covariates, often
increasing processing time (Default: \code{TRUE}). How non-linearity is
captured depends on the used engine.}
\item{method_integration}{A \code{\link{character}} with the type of integration that
should be applied if more than one \code{\linkS4class{BiodiversityDataset}} object is
provided in \code{x}. Particular relevant for engines that do not support
the integration of more than one dataset. Integration methods are generally
sensitive to the order in which they have been added to the
\code{\link{BiodiversityDistribution}} object.
Available options are:
\itemize{
\item \code{"predictor"} The predicted output of the first (or previously fitted) models are
added to the predictor stack and thus are predictors for subsequent models
(Default).
\item \code{"offset"} The predicted output of the first (or previously fitted) models are
added as spatial offsets to subsequent models. Offsets are back-transformed
depending on the model family. This option might not be supported for every
\code{\link{Engine}}.
\item \code{"interaction"} Instead of fitting several separate models, the observations from each dataset
are combined and incorporated in the prediction as a factor interaction
with the "weaker" data source being partialed out during prediction. Here
the first dataset added determines the reference level (see Leung et al.
2019 for a description).
\item \code{"prior"} In this option we only make use of the coefficients from a previous model to define priors to be used in the next model.
Might not work with any engine!
\item \code{"weight"} This option only works for multiple biodiversity datasets with the same type (e.g. \code{"poipo"}).
Individual weight multipliers can be determined while setting up the model
(\strong{Note: Default is 1}). Datasets are then combined for estimation and
weighted respectively, thus giving for example presence-only records less
weight than survey records.
}
\strong{Note that this parameter is ignored for engines that support joint likelihood estimation.}}
\item{aggregate_observations}{\code{\link{logical}} on whether observations covering
the same grid cell should be aggregated (Default: \code{TRUE}).}
\item{clamp}{\code{\link{logical}} whether predictions should be clamped to the range
of predictor values observed during model fitting (Default: \code{FALSE}).}
\item{verbose}{Setting this \code{\link{logical}} value to \code{TRUE} prints out
further information during the model fitting (Default: \code{FALSE}).}
\item{...}{further arguments passed on.}
}
\value{
A \link{DistributionModel} object.
}
\description{
This function trains a \code{\link[=distribution]{distribution()}} model with the specified
engine and furthermore has some generic options that apply to all engines
(regardless of type). See Details with regards to such options.
Users are advised to check the help files for individual engines for advice
on how the estimation is being done.
}
\details{
This function acts as a generic training function that - based on
the provided \code{\linkS4class{BiodiversityDistribution}} object creates a new
distribution model. The resulting object contains both a \code{"fit_best"}
object of the estimated model and, if \code{inference_only} is \code{FALSE}
a \link{SpatRaster} object named \code{"prediction"} that contains the spatial
prediction of the model. These objects can be requested via
\code{object$get_data("fit_best")}.
Other parameters in this function:
\itemize{
\item \code{"filter_predictors"} The parameter can be set to various options to remove highly correlated variables or those
with little additional information gain from the model prior to any
estimation. Available options are \code{"none"} (Default) \code{"pearson"}
for applying a \code{0.7} correlation cutoff, \code{"abess"} for the
regularization framework by Zhu et al. (2020), or \code{"RF"} or
\code{"randomforest"} for removing the least important variables according
to a randomForest model. \strong{Note}: This function is only applied on
predictors for which no prior has been provided (e.g. potentially
non-informative ones).
\item \code{"optim_hyperparam"} This option allows to make use of hyper-parameter search for several models, which can improve
prediction accuracy although through the a substantial increase in
computational cost.
\item \code{"method_integration"} Only relevant if more than one \code{\link{BiodiversityDataset}} is supplied and when
the engine does not support joint integration of likelihoods. See also
Miller et al. (2019) in the references for more details on different types
of integration. Of course, if users want more control about this aspect,
another option is to fit separate models and make use of the \link{add_offset},
\link{add_offset_range} and \link{ensemble} functionalities.
\item \code{"clamp"} Boolean parameter to support a clamping of the projection predictors to the range of values observed
during model training.
}
}
\note{
There are no silver bullets in (correlative) species distribution
modelling and for each model the analyst has to understand the objective,
workflow and parameters than can be used to modify the outcomes. Different
predictions can be obtained from the same data and parameters and not all
necessarily make sense or are useful.
}
\examples{
\dontrun{
# Fit a linear penalized logistic regression model via stan
x <- distribution(background) |>
# Presence-absence data
add_biodiversity_poipa(surveydata) |>
# Add predictors and scale them
add_predictors(env = predictors, transform = "scale", derivates = "none") |>
# Use Stan for estimation
engine_stan(chains = 2, iter = 1000, warmup = 500)
# Train the model
mod <- train(x, only_linear = TRUE, filter_predictors = 'pearson')
mod
}
}
\references{
\itemize{
\item Miller, D.A.W., Pacifici, K., Sanderlin, J.S., Reich, B.J., 2019. The recent past and promising future for data integration methods to estimate species’ distributions. Methods Ecol. Evol. 10, 22–37. https://doi.org/10.1111/2041-210X.13110
\item Zhu, J., Wen, C., Zhu, J., Zhang, H., & Wang, X. (2020). A polynomial algorithm for best-subset selection problem. Proceedings of the National Academy of Sciences, 117(52), 33117-33123.
\item Leung, B., Hudgins, E. J., Potapova, A. & Ruiz‐Jaen, M. C. A new baseline for countrywide α‐diversity and species distributions: illustration using >6,000 plant species in Panama. Ecol. Appl. 29, 1–13 (2019).
}
}
\seealso{
\link{engine_gdb}, \link{engine_xgboost}, \link{engine_bart}, \link{engine_inla},
\link{engine_inlabru}, \link{engine_breg}, \link{engine_stan}
}
|
d14b2dd8ba5b51ccfdea2683ead1e3ece169914e
|
87a01a4adfb9bb4b6bd46210b76763c486e15049
|
/man/getb0.biseq.Rd
|
8d210728e4c327e63acc9f0b383c40018f48f4b6
|
[] |
no_license
|
Shicheng-Guo/deconvSeq
|
14ac09738dae750d59f8372b6d7b9f531d3fd4d6
|
b4de0d17545b09f7f2235814454805761ff75fe6
|
refs/heads/master
| 2020-11-26T07:14:05.522586
| 2019-08-30T23:10:39
| 2019-08-30T23:10:39
| 228,999,883
| 0
| 1
| null | 2019-12-19T07:31:17
| 2019-12-19T07:31:17
| null |
UTF-8
|
R
| false
| true
| 777
|
rd
|
getb0.biseq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/withDoc.R
\name{getb0.biseq}
\alias{getb0.biseq}
\title{compute b0, projection matrix, given methylation counts}
\usage{
getb0.biseq(methmat, design, sigg = NULL)
}
\arguments{
\item{methmat}{a matrix of counts with rows corresponding to methylation sites and columns. Columns are: chr start end strand coverage1 numCs1 numTs1 coverage2 numCs2 numTs2 ....}
\item{design}{design matrix, output of model.matrix}
\item{sigg}{predetermined signature CpG sites. Format sites as chromosome
name, chromosome location, strand: chrN_position(+/-). For example, chr1_906825-}
}
\value{
b0 projection matrix. Coefficients are beta.
}
\description{
compute b0, projection matrix, given methylation counts
}
|
d17c9256c584c537b8112c38bbc4186f56e41b02
|
5edf3ebc52f12c8b7ed4dbc1aa5f97a8e8929605
|
/models/openml_lupus/regression_TIME/eddc80da42940ee05ed92bea2fa85b15/code.R
|
29875ba33701c18fcc07f00496b143a7595036e1
|
[] |
no_license
|
lukaszbrzozowski/CaseStudies2019S
|
15507fa459f195d485dd8a6cef944a4c073a92b6
|
2e840b9ddcc2ba1784c8aba7f8d2e85f5e503232
|
refs/heads/master
| 2020-04-24T04:22:28.141582
| 2019-06-12T17:23:17
| 2019-06-12T17:23:17
| 171,700,054
| 1
| 0
| null | 2019-02-20T15:39:02
| 2019-02-20T15:39:02
| null |
UTF-8
|
R
| false
| false
| 1,324
|
r
|
code.R
|
#:# libraries
library(caret)
library(digest)
library(OpenML)
#:# config
set.seed(1)
#:# data
data <- getOMLDataSet(data.id = 472L)
head(df)
df <- data$data
#:# preprocessing
head(df)
#:# model
regr_rf <- train(TIME ~ ., data = df, method = "ranger", tuneGrid = expand.grid(
mtry = 3,
splitrule = "variance",
min.node.size = 5))
#:# hash
#:# eddc80da42940ee05ed92bea2fa85b15
hash <- digest(list(TIME ~ ., df, "ranger", expand.grid(
mtry = 3,
splitrule = "variance",
min.node.size = 5)))
hash
#:# audit
train_control <- trainControl(method="cv", number=5)
regr_rf_cv <- train(TIME ~ ., data = df, method = "ranger", tuneGrid = expand.grid(
mtry = 3,
splitrule = "variance",
min.node.size = 5),
trControl = train_control,
metric = "RMSE")
print(regr_rf_cv)
results <-regr_rf_cv$results
results
#:# session_info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
bd43994cd8f0096c3db0885e93bf9ad1e368e1e8
|
e2b0f1bac85bf906766e69d65d187228bdaf7e8c
|
/R/gfunction.the.alp.cc.R
|
632168aa0e85b2ba77e704cb38553a7344b10a55
|
[] |
no_license
|
zhangh12/gim
|
0741ecd98fdf0e8d841a7a3316b16025f6b49489
|
f8ebca8e670882928f73a1cbdad3b085e75f82fd
|
refs/heads/master
| 2021-11-16T00:14:08.944377
| 2021-08-26T05:39:20
| 2021-08-26T05:39:20
| 122,691,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
gfunction.the.alp.cc.R
|
gfunction.the.alp.cc <- function(para, map, ref, Delta, delta, ncase, nctrl, xi, pr){
nmodel <- length(map$bet)
the <- para[map$the]
g.the.alp <- list()
nthe <- length(the)
n <- nrow(ref)
const <- list()
for(i in 1:nmodel){
id <- c(alp.index.cc(map, i), map$bet[[i]])
gam <- para[id]
rx <- as.matrix(ref[, names(gam), drop = FALSE])
rho.i <- ncase[i, i] / nctrl[i, i]
const[[i]] <- -rx * (Delta * rho.i * delta[, i] / (1 + rho.i * delta[, i])^2)
}
nlam <- max(map$lam)
offset <- max(map$the)
foo <- function(j, l){
paste0(j,'-',l)
}
for(j in 1:nthe){
fxj <- ref[, names(the)[j]]
for(i in 1:nmodel){
id.a <- alp.index.cc(map, i)
if(is.null(id.a)){
next
}
id <- c(alp.index.cc(map, i), map$bet[[i]])
tmp <- const[[i]] * fxj
for(l in id.a){
fxl <- ref[, names(para)[l]]
gt <- matrix(0, nrow = n, ncol = nlam - 1)
gt[, id - offset] <- tmp * fxl
#g.the.alp[[foo(j,l)]] <- gt
g.the.alp[[foo(j,l)]] <- t(gt %*% xi) %*% pr
rm(gt)
}
}
}
if(length(g.the.alp) == 0){
g.the.alp <- NULL
}
g.the.alp
}
|
207d79923d613cb8b03b4e3e9b5ac6e6ea0da057
|
14c467c1f779dc3f5139d91bacb845ab05b9e3cc
|
/R/segHT v1.4.0/man/run_scenario.Rd
|
7e018d0b82dacf5f7070e4800f15743731b7dad2
|
[] |
no_license
|
milleratotago/Independent_Segments_R
|
3706ba6117b04621d2f57056e8919558c6887c89
|
c54ea4936ef97e794e2d636c10173ec0737512f1
|
refs/heads/master
| 2023-08-20T14:55:33.606406
| 2021-10-28T22:19:36
| 2021-10-28T22:19:36
| 275,279,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,350
|
rd
|
run_scenario.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SegmentedHypTestEngine.R
\name{run_scenario}
\alias{run_scenario}
\title{run_scenario.SegmentedHypTestEngine}
\usage{
run_scenario(
segmented_hyp_test_engine,
segmented_researcher,
stat_procedure,
effects_list
)
}
\arguments{
\item{segmented_hyp_test_engine}{A SegmentedHypTestEngine instance for method
dispatch}
\item{segmented_researcher}{Correctly initialised instance of
SegmentedResearcher}
\item{stat_procedure}{Correctly initialised instance of any class descended
from StatProcedureBase, e.g. OneSampleT, PearsonR, etc.}
\item{effects_list}{A list of TrueEffect instances. This must be a list, not
simply a vector. Cast with list() if necessary. Each TrueEffect instance
holds fields effect_size and effect_size_probability. A TrueEffect with
effect_size 0 (i.e. H0 is true) may be included. Probabilities across all effect
sizes must sum to 1 or an exception is thrown.}
}
\value{
A SegmentedHypTestResult instance which holds the average expected outcomes
across all effect sizes, weighted by associated probability.
}
\description{
\code{run_scenario.SegmentedHypTestEngine} numerically determines expected
outcomes for a complete study scenario. It wraps various internal methods of
class SegmentedHypTestEngine (developers can cf. source code).
}
|
746b73a0804decb840835938845b152a9552ffb5
|
5fec6f72ea804f2c48046c8d9bb48f4586169b8d
|
/man/auditWorkers.Rd
|
4e285832d62f72a1ba0dc7b1bd8fc860d2db1cad
|
[] |
no_license
|
agencer/labelR
|
1d8d81ee64a88c49413f0732ef888217b8bc5d02
|
0dfd92afcce706f31436615c83d1b5bdcebe7db2
|
refs/heads/master
| 2022-11-10T02:14:45.624475
| 2020-06-19T05:10:55
| 2020-06-19T05:10:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 966
|
rd
|
auditWorkers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auditWorkers.R
\name{auditWorkers}
\alias{auditWorkers}
\title{Audit Mechanical Turk workers}
\usage{
auditWorkers(current_experiment_results, reference_results = NULL)
}
\arguments{
\item{current_experiment_results}{An object returned from \code{getResults}
containing HIT results from Mechanical Turk.}
\item{reference_results}{An optional object of HIT results from previous
experiments. When available, this improves the accuracy of worker quality scores.}
}
\value{
A dataframe containing workers' posterior means and HIT frequencies.
}
\description{
\code{auditWorkers} estimates worker quality scores from collected MTurk data.
}
\details{
The function audits worker quality using a Bayesian hierarchical model. Worker
means less than one are generally considered poor quality, though means can be
unreliable for workers with a low number of total HITs.
}
\author{
Ryden Butler
}
|
256ad3a7656c01c8fd9c5fa7fd03b332b5a5ff31
|
62d085c276575b6a6a86d3e0957074db890efb4e
|
/man/appalmswts.Rd
|
a49229a8cf9476bf7638b01957ed0b03dc28bb30
|
[] |
no_license
|
PrecisionLivestockManagement/DMApp
|
d72fbd1561e0f6e559d2ac1a69f83635ac56e976
|
989590e395f529925020b309e6454d974164607e
|
refs/heads/master
| 2023-08-07T22:49:10.413284
| 2023-08-01T03:44:42
| 2023-08-01T03:44:42
| 228,276,174
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,816
|
rd
|
appalmswts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appalmswts.R
\name{appalmswts}
\alias{appalmswts}
\title{Retrieves data for the ALMS Weights graph from the DataMuster database}
\usage{
appalmswts(
property,
sex,
category,
alms,
zoom,
start,
rangewt1,
rangewt2,
timezone,
username,
password
)
}
\arguments{
\item{property}{the name of the property to search the database}
\item{sex}{the sex of the cattle to be returned, determined by the "Males or Females" filter}
\item{category}{the category of cattle to be returned, determined by the "Breeders or Growers" filter}
\item{alms}{the ALMS allocation of the cattle to be returned, determined by selecting an ALMS from the drop down menu}
\item{zoom}{indicates whether to return cattle from the whole property or to filter cattle by paddock, determined by the "Paddock Groups" filter}
\item{start}{the minimum date of data to be returned, determined by the "Period for ALMS graphs" filter}
\item{rangewt1}{the lower value of the ALMS live weight range scale on the "ALMS live weight range" filter}
\item{rangewt2}{the upper value of the ALMS live weight range scale on the "ALMS live weight range" filter}
\item{timezone}{the timezone of the property to display the weekly weight data}
\item{username}{a username to access the DataMuster database}
\item{password}{a password to access the DataMuster database}
}
\value{
A dataframe of summarised data showing the average weight of cattle by date and the number of cattle included in the analysis
}
\description{
This function retreives weekly weight data from the DataMuster database and prepares the data for graphical display on the DataMuster website
}
\author{
Dave Swain \email{d.swain@cqu.edu.au} and Lauren O'Connor \email{l.r.oconnor@cqu.edu.au}
}
|
555c4511062f14308ef42fcc01c6e9ffb4aefc4a
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/pareto/R/PDEscatterApprox.R
|
2a44dbbbf554ae276dd7725c7a738dc40f52ce18
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,867
|
r
|
PDEscatterApprox.R
|
`PDEscatterApprox` <- function(x,y,paretoRadius=0,drawTopView=TRUE,nrOfContourLines=20,dataPointLineSpec='b',nInPspheres=NaN){
# function [AnzInPspheres,ParetoRadius] = PDEscatterApprox(x,y,ParetoRadius,DrawTopView,NrOfContourLines,DataPointLineSpec,AnzInPspheres);
# % schnelle Approximation fuer PDEScatter
# % [AnzInPspheres,ParetoRadius] = PDEscatter(x,y,ParetoRadius,DrawTopView,NrOfContourLines,DataPointLineSpec,AnzInPspheres);
# % plot the PDE on top of a scatterplot
# %
# % INPUT
# % x(1:n) data in x dimension
# % y(1:n) data in y dimension
# % OPTIONAL
# % ParetoRadius the Pareto Radius; if ==0 or not given: ParetoRadius = ParetoRad([x,y])
# % DrawTopView ==1 means contur is drawn(default), otherwise a 3D plot is drawn
# % NrOfContourLines number of contour lines to be drawn
# % DataPointLineSpec LineSpec of data points see LineSpec for documentation; if DataPointLineSpec == 'xx' data is not drawn
# % AnzInPspheres(1:n) if the nr of points inside Pareto Spheres is known this is used as z values for the plot
# % OUTPUT
# % AnzInPspheres the number of Points inside the ParetoSphere around each point of [x,y]
# %
# % Author ALU 2004
# % in /dbt/Pareto/
#
#
# % Methode: benutzt hist2tiles(...) um schnell 2D histogramme zu erzeugen
# % dann werden 8 schifted histogramme gemittelt
#
# [AnzZeilen,AnzSpalten] = size(x); if AnzSpalten> AnzZeilen, x = x'; y = y'; end;% Make Data a column vector
#
# % remove NaN from Data
noNaNInd <- !is.nan(x)&!is.nan(y)
x <- x[noNaNInd]
y <- y[noNaNInd]
# NoNaNInd = find(~isnan(x) & ~isnan(y)) ; % index of not NaN
# x = x(NoNaNInd); y = y(NoNaNInd); % remove NaN
#
if(paretoRadius==0){
xBinWidth <- paretoRadius1D(x,1000,FALSE)
yBinWidth <- paretoRadius1D(y,1000,FALSE)
paretoRadius <- sqrt(xBinWidth^2+yBinWidth^2)
} else {
xBinWidth <- paretoRadius
yBinWidth <- ParetoRadius
}
# if (nargin <3) | (ParetoRadius <= 0) % ParetoRadius not given
# XBinWidth =ParetoRadius1D(x,1000,0);
# YBinWidth =ParetoRadius1D(y,1000,0);
# ParetoRadius = sqrt(XBinWidth^2+YBinWidth^2);
# else % ParetoRadius given
# XBinWidth =ParetoRadius;
# YBinWidth =ParetoRadius;
# end; % Pareto radius is calculated
# if nargin <4, DrawTopView = 1; end; % Default = contourplot
# if nargin <5, NrOfContourLines=20; end ; % set default NrOfContourLines
# if nargin <6, DataPointLineSpec='b.'; end ; % set default NrOfContourLines
if(is.nan(nInPspheres)){
xMin <- min(x)
xMax <- max(x)
yMin <- min(y)
yMax <- max(y)
xedge1 <- seq(xMin,(xMax+xBinWidth),xBinWidth)
yedge1 <- seq(yMin,(yMax+yBinWidth),yBinWidth)
xedge2 <- xedge1-0.5*xBinWidth;
yedge2 <- yedge1 -0.5*yBinWidth;
xedge3 <- xedge1-0.3*xBinWidth;
yedge3 <- yedge1 -0.3*yBinWidth;
n1 <- miniHist2Tiles(x, y, xedge1, yedge1)
n2 <- miniHist2Tiles(x, y, xedge2, yedge2)
n3 <- miniHist2Tiles(x, y, xedge1, yedge2)
n4 <- miniHist2Tiles(x, y, xedge2, yedge1)
n5 <- miniHist2Tiles(x, y, xedge1, yedge3)
n6 <- miniHist2Tiles(x, y, xedge3, yedge1)
n7 <- miniHist2Tiles(x, y, xedge2, yedge3)
n8 <- miniHist2Tiles(x, y, xedge3, yedge3)
nInPspheres <- (n1+n2+n3+n4+n5+n6+n7+n8)/8
}
# if nargin <7 % calculate AnzInPspheres as shifted Histograms
# Xmin = min(x); Xmax = max(x);
# Ymin = min(y); Ymax = max(y);
#
# xedge1 = [Xmin:XBinWidth:Xmax+XBinWidth];
# yedge1 = [Ymin:YBinWidth:Ymax+YBinWidth];
# xedge2 = xedge1-0.5*XBinWidth;
# yedge2 = yedge1 -0.5*YBinWidth;
# xedge3 = xedge1-0.3*XBinWidth;
# yedge3 = yedge1 -0.3*YBinWidth;
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile1] = hist2tiles(x, y, xedge1, yedge1);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile2] = hist2tiles(x, y, xedge2, yedge2);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile3] = hist2tiles(x, y, xedge1, yedge2);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile4] = hist2tiles(x, y, xedge2, yedge1);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile5] = hist2tiles(x, y, xedge1, yedge3);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile6] = hist2tiles(x, y, xedge3, yedge1);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile7] = hist2tiles(x, y, xedge2, yedge3);
# [NrInTiles, xedges, yedges,xBinNr,yBinNr,AnzInTile8] = hist2tiles(x, y, xedge3, yedge3);
# AnzInPspheres = (AnzInTile1+AnzInTile2+AnzInTile3+AnzInTile4+AnzInTile5+AnzInTile6+AnzInTile7+AnzInTile8)/8;
# end;
#
# PlotHandle = zplot(x,y,AnzInPspheres,DrawTopView,NrOfContourLines,DataPointLineSpec);
# title(['PDE-scatterplot, r = ' num2str(ParetoRadius)]);
#####################
# zplot-Aufruf fehlt
#####################
}
miniHist2Tiles <- function(x, y, xedge, yedge){
#see if range fits, extent if necessary
if(min(x)<xedge[1])
xedge <- rbind(min(x),xedge)
if(max(x)>xedge[length(xedge)])
xedge <- rbind(xedge,max(x))
#see if range fits, extent if necessary
if(min(y)<yedge[1])
yedge <- rbind(min(y),yedge)
if(max(y)>yedge[length(yedge)])
yedge <- rbind(yedge,max(y))
e <- hist(x,xedge,plot=FALSE)
xBinNr <- findInterval(x,e$breaks)
e <- hist(y,yedge,plot=FALSE)
yBinNr <- findInterval(y,e$breaks)
xnbin <- length(xedge)
ynbin <- length(yedge)
xyBinEdges <- c(1:(xnbin*ynbin))
xyBin <- (xBinNr-1)*ynbin + yBinNr
nrInTiles <- hist(xyBin,xyBinEdges,right=FALSE,plot=FALSE)$counts
options(warn=-1)
nrInTiles <- matrix(nrInTiles,nrow=ynbin,ncol=xnbin)
options(warn=1)
nInTile <- rep(0,length(x))
for(i in 1:length(x)){
nInTile[i] <- nrInTiles[yBinNr[i],xBinNr[i]]
}
return (nInTile)
}
|
2af08ebc1f4c7ac3ae8938aea237d5ab7bfebf8a
|
6bbed7494fe345f49c6ab2743280110e640015be
|
/man/fill_treatment_selection.Rd
|
d27488cb38ab521b936321fe6a1b2575d67cb7c6
|
[
"MIT"
] |
permissive
|
tpmp-inra/tpmp_shiny_common
|
43912398f92e420e27c9bc97322b7ffa17091192
|
06bb71932e10ef26662ed2f7df7e33489e60fdbe
|
refs/heads/master
| 2021-05-19T09:53:39.758148
| 2020-04-01T10:09:44
| 2020-04-01T10:09:44
| 251,639,231
| 0
| 0
| null | 2020-04-01T10:09:45
| 2020-03-31T15:05:06
|
R
|
UTF-8
|
R
| false
| true
| 585
|
rd
|
fill_treatment_selection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_common_all.R
\name{fill_treatment_selection}
\alias{fill_treatment_selection}
\title{fill_treatment_selection}
\usage{
fill_treatment_selection(
df,
input_id = "cbTreatmentSelection",
label_id = "Select treatments to be displayed",
selected_text_format = "count > 3"
)
}
\arguments{
\item{df}{A dataframe}
\item{input_id}{A widget's Id}
\item{label_id}{The widget's label}
\item{selected_text_format}{Format to use when selecting test}
}
\value{
}
\description{
fill_treatment_selection
}
|
86f998bda2fa849b2e38423add23cd42d6b0392f
|
17a46d56660cd2a48ca1505b6ede37653f63c0f5
|
/run_model_and_decomp.R
|
49590f07f1b097f630be069334aff81d3151e468
|
[] |
no_license
|
ngraetz/rwjf_counties
|
9b4f21fae2705caa6330f6adeb915b23e500da7e
|
8f205d59150fbb19f93e72716d3ee47af79cbc0f
|
refs/heads/master
| 2020-03-24T14:12:58.477014
| 2019-06-26T13:53:30
| 2019-06-26T13:53:30
| 142,762,592
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,471
|
r
|
run_model_and_decomp.R
|
## Set location of repo.
repo <- '/share/code/geospatial/ngraetz/rwjf_counties/'
## Load all libraries and functions.
library(INLA)
library(data.table)
library(ggplot2)
library(raster)
library(rgdal)
library(rgeos)
library(gridExtra)
library(grid)
library(plyr)
library(RColorBrewer)
library(Hmisc)
library(spdep)
source(paste0(repo, 'functions.R'))
source(paste0(repo, 'functions_shapley.R'))
input_dir <- paste0(repo, 'nmx_clean_data/')
cov_dir <- paste0(repo, 'covariate_clean_data/')
#out_dir <- 'C:/Users/ngraetz/Dropbox/Penn/papers/rwjf/paa_materials/'
out_dir <- paste0(repo, '/results')
## Set options for this run (data domain and covariates).
## Current datasets: 25-64 ASDR for national NHW male, national NHW female, South NHW male, South NWH female, South NHB male, South NHB female.
## Current possible covariates:
## BEA: "percent_transfers","percent_wage_salary_employment","income_per_capita","total_employees"
## BLS: "labor_force","employed","unemployed","percent_unemployment"
## SAIPE: "poverty_all","poverty_0_17","poverty_5_17","median_hh_income"
## FactFinder (Census + ACS): "fb","less_12","hs","assoc","college"
## Pops: "perc_25_64"
## AHRF: "mds_pc"
race <- 'nhw'
sex_option <- 1
domain <- 'national'
covs <- c('college','poverty_all','log_hh_income','percent_transfers','percent_unemployment') ## To add: eviction_rate, perc_manufacturing
#covs <- c('log_mds_pc','chr_mammography','chr_diabetes_monitoring') ## To add: insurance (SAHIE)
#covs <- c('as_diabetes_prev','current_smoker_prev','obesity_prev','as_heavy_drinking_prev')
#covs <- c('fb','perc_25_64') ## To add: perc_black, perc_hispanic, perc_native, net_migration
year_range <- c(2000,2010,2015)
plot_trends <- FALSE
start_year <- min(year_range)
end_year <- max(year_range)
sex_name <- ifelse(sex_option==1,'Male','Female')
output_name <- paste0(capitalize(domain), ' ', toupper(race), ' ', capitalize(sex_name))
## Load master shapefile.
counties <- readOGR(paste0(repo, "/cb_2016_us_county_20m"), 'cb_2016_us_county_20m')
counties@data <- transform(counties@data, fips = paste0(STATEFP, COUNTYFP)) # create unique county 5-digit fips
counties <- counties[counties@data$fips != "02016", ] # Drop Aleutians West, AK - screws up plots
counties <- counties[counties$STATEFP != '02' &
counties$STATEFP != '15' &
counties$STATEFP != '72',]
background_counties <- counties[counties$STATEFP != '02' &
counties$STATEFP != '15' &
counties$STATEFP != '72',]
background.dt <- as.data.table(fortify(background_counties, region = 'STATEFP'))
map_colors <- c('#a50026','#d73027','#f46d43','#fdae61','#fee08b','#ffffbf','#d9ef8b','#a6d96a','#66bd63','#1a9850','#006837')
counties$state <- as.numeric(counties$STATEFP)
states <- gUnaryUnion(counties, id = counties@data$state)
background.dt.states <- as.data.table(fortify(states, region = 'state'))
## Load mortality data, subset to race/domain, merge all contextual covariates, and merge total population to create age-standardized deaths for binomial model.
#mort <- readRDS(paste0(input_dir, 'asdr_', race, '_', domain, '.RDS'))
mort <- readRDS(paste0(input_dir, 'age_specific_mort.RDS'))
setnames(mort, c('total_deaths','pooled_year'), c('deaths','year'))
pop_nhw <- readRDS(paste0(input_dir, 'nhw', '_total_pop.RDS'))
pop_nhb <- readRDS(paste0(input_dir, 'nhb', '_total_pop.RDS'))
pop <- rbind(pop_nhw,pop_nhb)
pop <- readRDS(paste0(input_dir, race, '_total_pop.RDS'))
all_covs <- readRDS(paste0(cov_dir, 'combined_covs.RDS'))
all_covs[, log_mds_pc := log(mds_pc)]
## Old method by race
# mort <- merge(mort, pop, by=c('fips','year','sex','race'))
# mort <- merge(mort, all_covs, by=c('fips','year','sex','race'))
# race_option <- ifelse(race=='nhw',0,1)
# mort <- mort[sex==sex_option & race==race_option, ]
# mort[, deaths := round(nmx * total_pop)]
## Now with all-race, collapse covs using pops
pop <- merge(pop, all_covs, by=c('fips','year','sex','race'))
pop <- pop[year %in% year_range, lapply(.SD, weighted.mean, w=total_pop, na.rm=TRUE), by=c('fips','year','sex'), .SDcols=covs]
mort <- merge(mort, pop, by=c('fips','year','sex'))
mort[is.na(total_pop) | total_pop == 0, total_pop := 1]
mort <- mort[year %in% year_range, ]
mort <- mort[sex == sex_option, ]
## Drop Alaska, Hawaii, Puerto Rico
mort <- mort[!grep(paste(c('^02','^15','^72'),collapse="|"), fips, value=TRUE), ]
## Merge region and metro codes.
metro_codes <- fread(paste0(repo, 'covariate_clean_data/FIPSmetroregion.csv'))
metro_codes[, fips := as.character(fips)]
metro_codes[nchar(fips)==4, fips := paste0('0',fips)]
mort <- merge(mort, metro_codes[, c('fips','metroname','regionname','metro','region')], by=c('fips'), all.x=TRUE)
mort[metroname %in% c('Nonmetro, adjacent', 'Nonmetro, nonadjacent'), metroname := 'Nonmetro']
mort[, metro_region := paste0(metroname, '_', regionname)]
mort[, metro_region := factor(metro_region, levels=c('Lg central metro_Pacific',unique(mort[metro_region!='Lg central metro_Pacific', metro_region])))]
## subset counties in the South, according to specified domain for this model run.
## https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf
## Texas, Oklahoma, Arkansas, Louisianna, Mississippi
## Alabama, Georgia, Tennessee, Kentucky, West Virginia, Florida
## South Carolina, North Carolina, Virginia, DC, Maryland, Delaware
south_states <- c('TX','OK','AR','LA','MS','AL','TN','KY','WV','VA','MD','DE','NC','SC','GA','FL')
south_fips <- metro_codes[state %in% south_states, fips]
if(domain=='south') {
message('Subsetting to Southern FIPS codes')
mort <- mort[fips %in% south_fips, ]
}
## Drop if missing any covariates and log. Combine nonmetro categories and create interaction variable.
# message('Counties before dropping missing values in any year: ', length(unique(mort[, fips])))
# drop_counties <- c()
# for(fe in covs) drop_counties <- c(drop_counties, mort[is.na(get(fe)), fips])
# drop_counties <- c(drop_counties, mort[is.na(metroname) | is.na(regionname), fips])
# mort <- mort[!(fips %in% unique(drop_counties)), ]
# message('Counties after dropping missing values in any year: ', length(unique(mort[, fips])))
## Create trend plots by metro (color) and region (facet)
if(plot_trends==TRUE) {
trends <- mort[, lapply(.SD, weighted.mean, w=total_pop, na.rm=TRUE), .SDcols=covs, by=c('metroname','regionname','year')]
cov_names <- get_clean_cov_names()
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/rwjf/covariates/trend_plots/', domain, '_', race, '_', sex_name, '_trends.pdf'), width = 12, height = 8)
for(c in covs) {
gg <- ggplot() +
geom_line(data=trends,
aes(x=year,
y=get(c),
color=metroname),
size=2) +
theme_minimal() +
scale_color_manual(values = brewer.pal(length(unique(collapsed[, cov_name])),'Dark2'), name='Metro') +
labs(x='Year',y=cov_names[fe==c, cov_name], title=cov_names[fe==c, cov_name]) +
facet_wrap(~regionname)
print(gg)
}
dev.off()
}
## Create a neighborhood file using Queens convention.
message('Creating spatial weight matrix...')
nb.FOQ <- poly2nb(counties, queen=TRUE)
## Create an INLA weight matrix.
lw.FOQ <- nb2INLA("FOQ_INLA",nb.FOQ)
an <- data.frame(1:length(counties),counties$fips)
o <- match(mort$fips,an$counties.fips)
ID <- an$X1.length.counties.[o]
mort[, ID := ID]
for(c in covs) {
mort[is.nan(get(c)), (c) := NA]
mort[is.na(get(c)), drop := 1]
}
mort <- mort[is.na(drop),]
## Fit INLA model, save coefficients for table.
## Make tables comparing coefficients
## 1. Metro * Regions, AR1 on year
## 2. Metro * Regions + Covariates, AR1 on year
## 3. Metro * Regions + Covariates, AR1 on year + Besag on county
message('Fitting INLA models...')
mort[, regionname := factor(regionname, levels = c('Pacific', unique(mort[!(regionname %in% 'Pacific'), regionname])))]
mort[, metroname := factor(metroname, levels = c('Lg central metro', unique(mort[!(metroname %in% 'Lg central metro'), metroname])))]
## Create broad age groups for interacting.
mort[agegrp <= 20, 0:20, broad_age := '0_24']
mort[agegrp %in% 25:40, broad_age := '25_44']
mort[agegrp %in% 45:60, broad_age := '45_64']
mort[agegrp >= 65, broad_age := '65_100']
broad_ages <- c('0_24','25_44','45_64','65_100')
inla_formulas <- c(paste0('deaths ~ as.factor(agegrp) + as.factor(metroname)'),
paste0('deaths ~ as.factor(agegrp) + as.factor(metroname) + as.factor(regionname) + year'),
paste0('deaths ~ ', paste(covs, collapse = ' + '), ' + as.factor(agegrp) + as.factor(metroname) + as.factor(regionname) + year'),
paste0('deaths ~ ', paste(covs, collapse = ' + '), ' + as.factor(agegrp) + as.factor(metroname) + as.factor(regionname) + year + f(ID,model="besag",graph="FOQ_INLA")'),
paste0('deaths ~ ', paste(covs, collapse = ' + '), ' + as.factor(agegrp) + as.factor(metro_region) + year + f(ID,model="besag",graph="FOQ_INLA")'),
paste0('deaths ~ ', paste(paste0(covs,'*as.factor(broad_age)'), collapse = ' + '), ' + as.factor(agegrp) + as.factor(metro_region) + year + f(ID,model="besag",graph="FOQ_INLA")'))
for(f in 1:4) {
inla_model = inla(as.formula(inla_formulas[f]),
family = "binomial",
data = mort,
Ntrials = mort[, total_pop],
verbose = FALSE,
control.compute=list(config = TRUE, dic = TRUE),
control.inla=list(int.strategy='eb', h = 1e-3, tolerance = 1e-6),
control.fixed=list(prec.intercept = 0,
prec = 1),
num.threads = 4)
assign(paste0('inla_model_', f), inla_model)
mort[, (paste0('inla_pred_', f)) := inla_model$summary.fitted.values$mean]
mort[, (paste0('inla_residual_', f)) := nmx - get(paste0('inla_pred_', f))]
}
all_tables <- lapply(1:4, make_coef_table)
all_coefs <- rbindlist(all_tables, fill=TRUE)
all_dic_morans <- data.table(model = rep(paste0('Model ', 1:4),3),
name = c(rep("Global Moran's I",4), rep("DIC",4), rep("RMSE",4)),
coef = rep(.00001,12))
for(m in 1:4) {
lisa <- plot_lisa(lisa_var = paste0('inla_residual_', m),
lisa_dt = mort[year==2010,],
lisa_sp = counties,
lisa_id = 'fips',
lisa_var_name = 'Residuals',
sig = 0.05)
all_dic_morans[model == paste0('Model ', m) & name == "Global Moran's I", coef := lisa[[3]]]
all_dic_morans[model == paste0('Model ', m) & name == "DIC", coef := ifelse(is.nan(get(paste0('inla_model_',m))$dic$dic) | is.infinite(get(paste0('inla_model_',m))$dic$dic),
get(paste0('inla_model_',m))$dic$deviance.mean,
get(paste0('inla_model_',m))$dic$dic)]
all_dic_morans[model == paste0('Model ', m) & name == 'RMSE', coef := mort[, sqrt(weighted.mean(get(paste0('inla_residual_', m))^2, w = total_pop))]]
}
all_coefs <- rbind(all_coefs, all_dic_morans, fill = TRUE)
saveRDS(all_coefs, file = paste0(out_dir, '/sex_', sex_option, '_race_', race_option, '_coef_table_just_inla.RDS'))
lisa <- plot_lisa(lisa_var = 'nmx',
lisa_dt = mort[year==2015,],
lisa_sp = counties,
lisa_id = 'fips',
lisa_var_name = 'nMx',
sig = 0.05)
global_morans <- lisa[[3]]
panel_a_title <- 'A)'
panel_b_title <- paste0('B)\nGlobal Morans I: ', global_morans)
title_a <- textGrob(
label = "A)",
x = unit(0, "lines"),
y = unit(0, "lines"),
hjust = 0, vjust = 0,
gp = gpar(fontsize = 16))
panel_a <- arrangeGrob(lisa[[2]] + theme(legend.position="none"), top = title_a)
panel_a$vp <- vplayout(1:6, 1:12)
title_b <- textGrob(
label = "B)",
x = unit(0, "lines"),
y = unit(0, "lines"),
hjust = 0, vjust = 0,
gp = gpar(fontsize = 16))
panel_b <- arrangeGrob(lisa[[1]] + theme(legend.position="none"), top = title_b)
panel_b$vp <- vplayout(7:14, 1:12)
png(paste0(out_dir, '/lisa_data_',m,'_sex', sex_option, '_race_', race_option, '.png'), width = 1200, height = 1400, res = 120)
grid.newpage()
pushViewport(viewport(layout = grid.layout(14, 12)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
#print(lisa[[2]] + ggtitle(panel_a_title) + theme(legend.position="none"), vp = vplayout(1:6, 1:12))
grid.draw(panel_a)
#print(lisa[[1]] + ggtitle(panel_b_title) + theme(legend.position="none"), vp = vplayout(7:14, 1:12))
grid.draw(panel_b)
dev.off()
## Do everything else with the full spatial model and metro*region interaction.
## With all-age covariates DIC = 1025807.50
## With broad-age-specific covariates DIC = 982275.99
full_mort <- copy(mort)
# mort <- copy(full_mort)
# mort <- mort[agegrp %in% 45:60, ]
# inla_model = inla(as.formula(inla_formulas[5]),
# family = "binomial",
# data = mort,
# Ntrials = mort[, total_pop],
# verbose = FALSE,
# control.compute=list(config = TRUE, dic = TRUE),
# control.inla=list(int.strategy='eb', h = 1e-3, tolerance = 1e-6),
# control.fixed=list(prec.intercept = 0,
# prec = 1),
# num.threads = 4)
## Make full prediction, full residual, and load posterior mean for all components.
# mort[, inla_pred := inla_model$summary.fitted.values$mean]
# mort[, inla_residual := logit(nmx) - logit(inla_pred)]
# mort[nmx==0, inla_residual := logit(nmx+0.000001) - logit(inla_pred)]
# coefs <- make_beta_table(inla_model, paste0(race,' ',sex_option,' ',domain))
#saveRDS(coefs, file = paste0(out_dir,'coefs_', output_name, '.RDS'))
## Plot fitted age curve
# age_curve <- copy(coefs[grep('agegrp',name),])
# age_curve[, odds := exp(coef + coefs[name=='(Intercept)', coef] + coefs[name=='year', coef * 2015])]
# age_curve[, coef := odds / (1 + odds)]
# age_curve[, age := as.numeric(gsub('as.factor\\(agegrp\\)','',name))]
# age_obs <- mort[, list(deaths=sum(deaths),total_pop=sum(total_pop)), by=c('agegrp')]
# age_obs[, nmx := deaths / total_pop]
# #pdf(paste0(out_dir,'/fitted_age_pattern.pdf'))
# ggplot() +
# geom_line(data=age_curve,
# aes(x=age,
# y=coef),
# color='red') +
# geom_point(data=age_obs,
# aes(x=agegrp,
# y=nmx),
# color='black') + theme_minimal()
#dev.off()
## Run Shapley decomposition on change over time in ASDR.
## Create permutations (2010-1990, 6 changes, total permutations = 2^6 = 64, 32 pairs)
## i.e. one pair for poverty is delta_m|PV=2013,IS=1990,CE=1990,FB=1990,time=1990,residual=1990 -
## delta_m|PV=1990,IS=1990,CE=1990,FB=1990,time=1990,residual=1990
# permutations <- make_permutations(fes = covs,
# start_year = start_year,
# end_year = end_year)
## Prep and reshape input data from model (all fixed effects of interest + geographic random effects +
## time + spatial random effects + intercept + residual, wide by year)
d <- copy(full_mort)
d <- merge(d, inla_model$summary.random$ID[c('ID','mean')], by='ID')
setnames(d, 'mean', 'spatial_effect')
d <- d[year %in% c(start_year,end_year), ]
d <- dcast(d, fips + agegrp ~ year, value.var = c(covs, 'inla_residual', 'inla_pred', 'total_pop', 'metro_region', 'nmx', 'spatial_effect'))
d[, (paste0('year_', start_year)) := start_year]
d[, (paste0('year_', end_year)) := end_year]
## Calculate contribution attributable to each time-varying component. By definition this adds up to total observed change in the outcome
## because of inclusion of the residual.
## Change decompositions occur at the county-age-level.
# all_contributions <- rbindlist(lapply(c(covs, 'year','residual'), calculate_contribution,
# fes=covs,
# start_year=start_year,
# end_year=end_year,
# d=d))
## Try running all age groups separately and binding together.
age_groups <- list(c(0,20), c(25,40), c(45,60), c(65,85))
all_contributions <- rbindlist(lapply(age_groups, shapley_ages,
data=full_mort, inla_f=inla_formulas[5],
coef_file=paste0('age_', ages[1], '_', ages[2], '_', sex_option,'_',cov_domain),
shapley=FALSE))
## Scatter total predicted change from decomp (sum of contributions) with observed change.
plot_metro_regions <- c('Lg central metro_Middle Atlantic',
'Nonmetro_East South Central')
decomp_change <- all_contributions[, list(decomp_change=sum(contribution_mort)), by=c('fips','agegrp')]
decomp_change <- merge(decomp_change, d[, c('fips','agegrp','nmx_2000','nmx_2015','total_pop_2015','metro_region_2015')], by=c('fips','agegrp'))
decomp_change[, obs_change := nmx_2015 - nmx_2000]
round(cor(decomp_change[, c('decomp_change','obs_change')], use='complete.obs')[1,2],2)
decomp_change <- decomp_change[, list(decomp_change=weighted.mean(decomp_change,total_pop_2015,na.rm=T)), by=c('agegrp','metro_region_2015')]
setnames(decomp_change, 'metro_region_2015', 'metro_region')
decomp_change <- decomp_change[metro_region %in% plot_metro_regions, ]
all_contributions_age <- merge(full_mort[year==2015, c('agegrp','fips','total_pop','metro_region')], all_contributions, by=c('agegrp','fips'))
all_contributions_age <- all_contributions_age[, list(contribution_mort=weighted.mean(contribution_mort, total_pop, na.rm=T)), by=c('fe','agegrp','metro_region')]
all_contributions_age <- all_contributions_age[metro_region %in% plot_metro_regions, ]
ggplot() +
geom_bar(data=all_contributions_age,
aes(x=as.factor(agegrp),
y=contribution_mort*100000,
fill=fe),
color='black',
stat='identity') +
geom_point(data=decomp_change,
aes(x=as.factor(agegrp),
y=decomp_change*100000),
size=3) +
labs(x = '', y = 'Change in mortality rate (per 100,000)', title = paste0('Change in mortality rate by age, 2000-2015.')) +
theme_minimal() +
facet_wrap(~metro_region) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 10)) +
scale_fill_manual(values = brewer.pal(length(unique(all_contributions[, fe])),'Spectral'), name='Component')
## Create age-standardized contributions over some age range using 2000 Census age structure.
age_start <- 45
age_end <- 60
pops_2000 <- full_mort[year==2000 & agegrp >= age_start & agegrp <= age_end, ]
age_wts_combined <- pops_2000[, list(pop=sum(total_pop)), by=c('agegrp','sex')]
totals <- pops_2000[, list(total=sum(total_pop)), by=c('sex')]
age_wts_combined <- age_wts_combined[!is.na(agegrp), ]
age_wts_combined <- merge(age_wts_combined, totals, by='sex')
age_wts_combined[, age_wt := pop/total]
age_wts_combined <- age_wts_combined[, c('age_wt','agegrp')]
collapsed <- merge(all_contributions, age_wts_combined, by='agegrp')
collapsed <- collapsed[, list(contribution_mort=weighted.mean(contribution_mort, age_wt)), by=c('fips','fe')]
## Collapse contributions to metro-region.
all_pops <- d[agegrp >= age_start & agegrp <= age_end, list(total_pop_2015=sum(total_pop_2015), total_pop_2000=sum(total_pop_2000)), by=c('fips','metro_region_2015')]
#collapsed <- merge(all_contributions, unique(all_pops[, c('fips','metro_region_2015','total_pop_2015','total_pop_2000')]), by='fips')
collapsed <- merge(collapsed, all_pops, by='fips')
collapsed <- collapsed[, list(contribution_mort=wtd.mean(contribution_mort, total_pop_2015, na.rm=TRUE),
total_pop_2015=sum(total_pop_2015, na.rm=TRUE)),
by=c('metro_region_2015','fe')]
collapsed <- merge(collapsed, collapsed[, list(total_change=sum(contribution_mort, na.rm=TRUE)), by='metro_region_2015'], by='metro_region_2015')
collapsed[, metro_region := factor(metro_region_2015, levels=unique(collapsed$metro_region_2015[order(collapsed[, total_change])]))]
## Calculate observed age-standardized change at the metro-region for comparison.
obs <- merge(d, age_wts_combined, by='agegrp')
obs <- obs[, list(nmx_2000=weighted.mean(nmx_2000, age_wt),
nmx_2015=weighted.mean(nmx_2015, age_wt)), by=c('fips')]
obs <- merge(obs, all_pops, by='fips')
obs <- obs[, list(nmx_2000=weighted.mean(nmx_2000, total_pop_2000),
nmx_2015=weighted.mean(nmx_2015, total_pop_2015)), by=c('metro_region_2015')]
obs[, nmx_change := nmx_2015 - nmx_2000]
## Format for plotting
cov_names <- get_clean_cov_names()
collapsed[, fe := factor(fe, levels=c(covs,"year","residual"))]
collapsed <- merge(collapsed, cov_names, by='fe')
collapsed[, cov_name := factor(cov_name, levels=cov_names[fe %in% collapsed[, fe], cov_name])]
## Plot results of decomposition by metro-region.
collapsed[, metro_region_clean := gsub('_',' - ',metro_region)]
collapsed[, metro_region_clean := factor(metro_region_clean, levels=unique(collapsed$metro_region_clean[order(collapsed[, total_change])]))]
#pdf(paste0(out_dir,'/decomp_', output_name, '.pdf'), width = 11, height = 11*(2/3))
ggplot() +
geom_bar(data=collapsed,
aes(x=metro_region_clean,
y=contribution_mort*100000,
fill=cov_name),
color='black',
stat='identity') +
geom_point(data=collapsed[, list(contribution_mort=sum(contribution_mort)), by=c('metro_region_clean')],
aes(x=metro_region_clean,
y=contribution_mort*100000),
size=3) +
labs(x = '', y = 'Change in ASDR', title = paste0('Change in ', output_name, ' ASDR, ',start_year,'-',end_year)) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 10)) +
scale_fill_manual(values = brewer.pal(length(unique(collapsed[, cov_name])),'Spectral'), name='Component')
#dev.off()
## Plot maps of each contribution by county.
pdf(paste0(out_dir,'/maps_', output_name, '.pdf'), height=6, width=9)
all_contributions[, contribution_mort_per_100000 := contribution_mort * 100000]
top_code <- quantile(all_contributions[, contribution_mort_per_100000], probs=0.90, na.rm=T)
bottom_code <- quantile(all_contributions[, contribution_mort_per_100000], probs=0.10, na.rm=T)
for(c in c(covs)) {
m <- make_county_map(map_dt = all_contributions[fe==c, ],
map_sp = counties,
map_var = 'contribution_mort_per_100000',
legend_title = 'Contribution',
high_is_good = FALSE,
map_title = paste0('Contribution of ', cov_names[fe==c, cov_name], ' to change in ASDR, ', start_year,'-',end_year),
map_limits = c(-20,50),
diverge = TRUE)
m <- m + scale_fill_gradientn(guide = guide_legend(title = 'Contribution'),
limits = c(-20,50),
breaks = c(-20,-10,0,10,20,30,40,50),
colours=rev(brewer.pal(10,'Spectral')),
values=c(-20,0,50), na.value = "#000000", rescaler = function(x, ...) x, oob = identity)
print(m)
}
c <- 'residual'
top_code <- quantile(all_contributions[, contribution_mort], probs=0.99, na.rm=T)
bottom_code <- quantile(all_contributions[, contribution_mort], probs=0.1, na.rm=T)
m <- make_county_map(map_dt = all_contributions[fe==c, ],
map_sp = counties,
map_var = 'contribution_mort',
legend_title = 'Contribution',
high_is_good = FALSE,
map_title = paste0('Contribution of ', cov_names[fe==c, cov_name], ' to change in ASDR, ', start_year,'-',end_year),
map_limits = c(bottom_code,top_code),
diverge = TRUE)
print(m)
dev.off()
pdf(paste0(out_dir,'/female_transfers.pdf'), height=6, width=9)
c <- 'percent_transfers'
m <- make_county_map(map_dt = all_contributions[fe==c, ],
map_sp = counties,
map_var = 'contribution_mort',
legend_title = 'Contribution',
high_is_good = FALSE,
map_title = paste0('Contribution of ', cov_names[fe==c, cov_name], ' to change in ASDR, ', start_year,'-',end_year),
map_limits = c(bottom_code,0.0007),
diverge = TRUE)
print(m)
dev.off()
c <- 'fb'
m <- make_county_map(map_dt = all_contributions[fe==c, ],
map_sp = counties,
map_var = 'contribution_mort',
legend_title = 'Contribution',
high_is_good = FALSE,
map_title = paste0('Contribution of ', cov_names[fe==c, cov_name], ' to change in ASDR, ', start_year,'-',end_year),
map_limits = c(-0.0002,0.0007),
diverge = TRUE)
|
473611599907db895eeb9d1fb79e3539fdc575bf
|
74d94399398d71541b02452697500fb8bed86d6b
|
/docs/FunctionToAddLeadingZero.R
|
69aae69dd2d74f4fa57045e12a88f1df9306991e
|
[] |
no_license
|
xiehanding/Heart-Failure-Readmission
|
4be518abc625c5df487bda969144aa6d18a168d5
|
d7bd6384353d237f1b3d62a3ab34df5ba29f6dd1
|
refs/heads/master
| 2020-04-17T14:19:31.637680
| 2017-01-06T17:28:23
| 2017-01-06T17:28:23
| 67,823,660
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,500
|
r
|
FunctionToAddLeadingZero.R
|
###############################
# Now we wanna write a function to add zero to every code so that they have equal length, say five digits
# nchar function needs a character variable
is.character(pridgns)
st1 <- as.character(pridgns)
is.character(pridgns)
table(pridgns)
table(st1)
dim(pridgns); dim(st1)
nchar(st1)[1:3]
##################################
#Some tries
j= 5-nchar(st1)[1:3]; j;
paste(rep("0",5))
paste(rep("0",j[1:2]))
paste(rep("0",j[1]))
rm(j)
##################################
f1= function(x){
paste(rep("0",x),sep="")
}
f1(3)
rm(f1)
##################################
# f2 is the function we need to calculate how many zero to add to st1
f2= function(x){
if(x>0) paste(rep("0",x),collapse ="")
else ""
}
f2(3)
###################################
#This line is not working, coding format is wrong
sapply(st1[1:20],f2(3))
sapply(st1,f2)[1:20]
st1
########################################################
gg = function(x) { k = 5-nchar(x); paste(ff(k,x),sep="")}
gg(st1[1])
st1[1]
gg = function(x) { k = 5-nchar(x); paste(ff(k),x,sep="")}
gg(st1[1])
st2 = sapply(st1,gg)
#########################################################
# f3 is the function that we need to add several zero to st1
f3 <- function(x){
k <- 5 - nchar(x)
paste(f2(k),x,sep = "")
}
st1[1]
f3(st1[1])
##################################################
# Now we need to apply this f3 to every item in st1
# Let's test on the first 20 rows at first
sapply(st1,f3)[1:20]
# 5920 5920 53541 4239 29623 2113 7804 7804 7211 53081 38630 8472 311 78791 41110 41110
# "05920" "05920" "53541" "04239" "29623" "02113" "07804" "07804" "07211" "53081" "38630" "08472" "00311" "78791" "41110" "41110"
# 41110 40391 41091 36613
# "41110" "40391" "41091" "36613"
# We can see the results are correct!
# Now let's apply the f3 to st1 to obtain st2
st2<- sapply(st1,f3)
head(st2)
is.character(st1)
is.character(st2)
class(pridgns)
pridgns5d <- as.character(st2)
pridgns5d
table(pridgns5d)
#################################################
# This pridgns5d is the character variable that we need.
#################################################
# Notice that here pridgns5d's level is different from pridgns and that pridgn5d have duplication at each level
# the problem is at the function of f3, you can see that st2's attribute is different from st1
head(st1)
head(st2)
dim(st1)
dim(st2)
####################################################
pridgns5d[1:100]
|
bc13ae5b13706bcd48e497f0eca9d540363c4b39
|
ab6350fe9c40847991ff657c67996a96517d58d5
|
/R/plot_results.R
|
7bfd7524f3f8b97eaab6c61f5ab8f565953f5a55
|
[] |
no_license
|
sonthuybacha/benchmarkme
|
544d02ec0e01bb8daba31bcea952a1401dacea2c
|
3b4674e49e773c56eba089a8ccd6f02b7fc5af07
|
refs/heads/master
| 2020-09-24T01:08:49.051130
| 2017-11-06T21:54:41
| 2017-11-06T21:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,974
|
r
|
plot_results.R
|
nice_palette = function(){
alpha =150
palette(c(rgb(85,130,169, alpha=alpha, maxColorValue=255),
rgb(200,79,178, alpha=alpha,maxColorValue=255),
rgb(105,147,45, alpha=alpha, maxColorValue=255),
rgb(204,74,83, alpha=alpha, maxColorValue=255),
rgb(183,110,39, alpha=alpha, maxColorValue=255),
rgb(131,108,192, alpha=alpha, maxColorValue=255)))
}
#' Compare results to past tests
#'
#' Plotting
#' @param x The output from a \code{benchmark_*} call.
#' @param test_group Default \code{unique(x$test_group)}.
#' The default behaviour is select the groups from your benchmark results.
#' @param byte_optimize The default behaviour is to compare your results with results that use the same
#' byte_optimized setting. To use all results, set to \code{NULL}.
#' @param blas_optimize Logical. Default The default behaviour
#' is to compare your results with results that use the same
#' blas_optimize setting. To use all results, set to \code{NULL}.
#' @param log By default the y axis is plotted on the log scale. To change, set the
#' the argument equal to the empty parameter string, \code{""}.
#' @param ... Arguments to be passed to other downstream methods.
#' @importFrom graphics abline grid par plot points text legend title
#' @importFrom grDevices palette rgb
#' @importFrom utils data
#' @importFrom stats aggregate
#' @importFrom benchmarkmeData select_results is_blas_optimize
#' @export
#' @examples
#' data(sample_results)
#' plot(sample_results)
#' plot(sample_results, byte_optimze=NULL)
plot.ben_results = function(x,
test_group=unique(x$test_group),
byte_optimize=get_byte_compiler(),
blas_optimize=is_blas_optimize(x),
log="y", ...) {
for(i in seq_along(test_group)) {
make_plot(x, test_group[i], byte_optimize, blas_optimize, log, ...)
if(length(test_group) != i)
readline("Press return to get next plot ")
}
}
make_plot = function(x, test_group, byte_optimize, blas_optimize, log, ...){
results = select_results(test_group,
byte_optimize = byte_optimize,
blas_optimize = blas_optimize)
## Manipulate new data
x = x[x$test_group %in% test_group,]
no_of_reps = length(x$test)/length(unique(x$test))
ben_sum = sum(x[,3])/no_of_reps
ben_rank = which(ben_sum < results$time)[1]
if(is.na(ben_rank)) ben_rank = nrow(results) + 1
message("You are ranked ", ben_rank, " out of ", nrow(results)+1, " machines.")
## Arrange plot colours and layout
op = par(mar=c(3,3,2,1),
mgp=c(2,0.4,0), tck=-.01,
cex.axis=0.8, las=1, mfrow=c(1,2))
old_pal = palette()
on.exit({palette(old_pal); par(op)})
nice_palette()
## Calculate adjustment for sensible "You" placement
adj = ifelse(ben_rank < nrow(results)/2, -1.5, 1.5)
## Plot limits
ymin = min(results$time, ben_sum)
ymax = max(results$time, ben_sum)
## Standard timings
plot(results$time, xlab="Rank", ylab="Total timing (secs)",
ylim=c(ymin, ymax), xlim=c(0.5, nrow(results)+1),
panel.first=grid(), cex=0.7, log=log, ...)
points(ben_rank-1/2,ben_sum, bg=4, pch=21)
abline(v=ben_rank-1/2, col=4, lty=3)
text(ben_rank-1/2, ymin, "You", col=4, adj=adj)
title(paste("Benchmark:", test_group), cex=0.9)
## Relative timings
fastest = min(ben_sum, results$time)
ymax= ymax/fastest
plot(results$time/fastest, xlab="Rank", ylab="Relative timing",
ylim=c(1, ymax), xlim=c(0.5, nrow(results)+1),
panel.first=grid(), cex=0.7, log=log, ...)
abline(h=1, lty=3)
abline(v=ben_rank-1/2, col=4, lty=3)
points(ben_rank-1/2,ben_sum/fastest, bg=4, pch=21)
text(ben_rank-1/2, 1.2, "You", col=4, adj=adj)
title(paste("Benchmark:", test_group), cex=0.9)
}
#' @importFrom benchmarkmeData plot_past
#' @export
benchmarkmeData::plot_past
|
d57084e0e5dff462d008c6152f5b0f939695649d
|
1d6be29caa42ddd1c0087a1959db1a9f56fb2eed
|
/src/onh_variant_annotation.R
|
b5e0cf4e1c9323feaefbe6bb6d8e07aad82221b5
|
[] |
no_license
|
cobriniklab/onh_pipeline
|
2c332bfcbb882f82d40e301ae11c3db934f63ff2
|
afb8daf26b69d8c0f47d9283a8ee5d33611c706a
|
refs/heads/master
| 2021-07-14T21:12:04.086940
| 2017-10-19T19:12:29
| 2017-10-19T19:12:29
| 107,584,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,327
|
r
|
onh_variant_annotation.R
|
#!/usr/bin/Rsript
# load required libraries -------------------------------------------------
library(VariantAnnotation)
library(biobroom)
library(BSgenome.Hsapiens.UCSC.hg19)
library(org.Hs.eg.db)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(tibble)
library(dplyr)
library(data.table)
library(purrr)
# load input --------------------------------------------------------------
# lli_filenames <- list.files(path="./laurali_output/ll_gno_anno_vcfs", pattern="*.vcf", full.names = TRUE)
annotated_filenames <- list.files(path="./output/gatk", pattern="[[:digit:]]_anno.vcf.gz$", full.names = TRUE)
genmod_filenames <- list.files(path="./output/gatk", pattern="[[:digit:]]_genmod.vcf.gz$", full.names = TRUE)
# lli_names <- substr(lli_filenames,35,41)
annotated_names <-substr(annotated_filenames,15,17) #format for haplocaller samples
genmod_names <-substr(genmod_filenames,15,17) #format for haplocaller samples
# read in vcfs ------------------------------------------------------------
# lli_list <- map(lli_filenames, readVcf, "hg19")
annotated_list <- lapply(annotated_filenames, function(x)readVcf(x, "hg19"))
genmod_list <- lapply(genmod_filenames, function(x)readVcf(x, "hg19"))
# names(lli_list) <- lli_names
names(annotated_list) <- annotated_names
names(genmod_list) <- genmod_names
anno_vcf <- annotated_list[[21]]
genmod_vcf <- genmod_list[[21]]
# lli_vcf <- lli_list[[1]]
# lli_evcf <- S4Vectors::expand(lli_vcf)
#genmod_list <- genmod_list[1:2]
# define required functions -----------------------------------------------
# single_lli <- function(lli_vcf){
#
# lli_evcf <- S4Vectors::expand(lli_vcf)
#
# anno_df <- data.frame(rowRanges(lli_evcf),
# GT=geno(lli_evcf)$GT,
# DP=info(lli_evcf)$DP,
# AF=info(lli_evcf)$AF,
# FC=as.character(info(lli_evcf)$FC),
# GENEID=as.character(info(lli_evcf)$SGGI),
# TXID=as.character(info(lli_evcf)$SGTI),
# gnomad.AF=info(lli_evcf)$gno_af_all,
# gno_filter=info(lli_evcf)$gno_filter,
# gno_id=info(lli_evcf)$gno_id
# ) %>%
# dplyr::rename_all(~gsub('^.*.pjt$', 'GT', .))
#
# ###################################################
# ### code chunk number 30: predictCoding_frameshift
# ###################################################
# ## CONSEQUENCE is 'frameshift' where translation is not possible
#
#
# anno_df <- dplyr::filter(anno_df, !is.na(GENEID))
#
# }
single_tidy <- function(anno_vcf, genmod_vcf){
anno_evcf <- S4Vectors::expand(anno_vcf)
genmod_evcf <- S4Vectors::expand(genmod_vcf)
anno_df <- data.frame(rowRanges(anno_evcf),
snp_id=info(anno_evcf)$dbsnp_id,
GT=geno(anno_evcf)$GT,
AF=info(anno_evcf)$AF,
QD=info(anno_evcf)$QD,
AD=geno(anno_evcf)$AD,
DP=geno(anno_evcf)$DP,
FS=info(anno_evcf)$FS,
MQ=info(anno_evcf)$MQ,
CAF=unstrsplit(info(anno_evcf)[,"dbsnp_af"], " "),
VQSLOD=info(anno_evcf)$VQSLOD,
gnomad.wes.AF=info(anno_evcf)$gno_wes_af_all,
gnomad.wgs.AF=info(anno_evcf)$gno_wgs_af_all,
gno_filter=info(anno_evcf)$gno_filter,
gno_id=info(anno_evcf)$gno_id,
SOR=info(anno_evcf)$SOR,
MQRankSum=info(anno_evcf)$MQRankSum,
ReadPosRankSum=info(anno_evcf)$ReadPosRankSum,
VT=info(anno_evcf)$VariantType,
Func.refGene=info(anno_evcf)$Func.refGene,
Gene.refGene=unstrsplit(info(anno_evcf)$Gene.refGene),
GeneDetail.refGene=unstrsplit(info(anno_evcf)$GeneDetail.refGene),
ExonicFunc.refGene=unstrsplit(info(anno_evcf)$ExonicFunc.refGene),
AAChange.refGene=unstrsplit(info(anno_evcf)$AAChange.refGene),
hiDN=info(anno_evcf)$hiConfDeNovo,
loDN=info(anno_evcf)$loConfDeNovo
) %>%
dplyr::rename_all(~gsub('\\.\\d+\\.', '\\.', .)) %>%
dplyr::rename_all(~gsub('\\.\\d+_', '_pro_', .))
variants <- group_by(anno_df, seqnames, start, end) %>%
filter(row_number() == 1) %>%
ungroup()
variants$CAF <- sapply(strsplit(as.character(variants$CAF), " "), "[", 2)
variants$CAF <- as.numeric(variants$CAF)
###################################################
### code chunk number 32: genetic models
###################################################
genmod_df <- data.frame(rowRanges(genmod_evcf),
GeneticModels=unlist(info(genmod_evcf)$GeneticModels)) %>%
dplyr::select(seqnames, start, end, REF, ALT, GeneticModels) %>%
mutate(GeneticModels = gsub("^.*:", "", GeneticModels))
join_df <- left_join(variants, genmod_df, by=c("seqnames", "start", "end", "REF", "ALT")) %>%
dplyr::filter(!is.na(Gene.refGene)) %>%
dplyr::filter(ExonicFunc.refGene != "synonymous_SNV") %>%
dplyr::group_by(snp_id) %>%
dplyr::filter(row_number() == 1)
}
# test <- single_tidy(anno_vcf, genmod_vcf)
collate_vcfs <- function(anno_vcf_list, genmod_vcf_list){
evcf_list <- mapply(single_tidy, annotated_list, genmod_list, SIMPLIFY = FALSE)
tidy_vcfs <- data.table::rbindlist(evcf_list, idcol = "sample", fill = TRUE)
library(rfPred)
rfp_input <- dplyr::select(data.frame(tidy_vcfs), chr = seqnames, pos = start, ref = REF, alt = ALT) %>%
mutate(chr = gsub("chr", "", chr))
rfp0 <- rfPred_scores(variant_list=rfp_input,
data="./bin/all_chr_rfPred.txtz",
index="./bin/all_chr_rfPred.txtz.tbi")
tidy_vcfs <- mutate(tidy_vcfs, seqnames = gsub("chr","", seqnames))
rfp <- left_join(tidy_vcfs, rfp0, by = c("seqnames" = "chromosome", "start" = "position_hg19", "REF" = "reference", "ALT" = "alteration")) %>%
mutate(VAF.Dad = AD.Dad_1.2/DP.Dad_1) %>%
mutate(VAF.Mom = AD.Mom_1.2/DP.Mom_1) %>%
mutate(VAF.pro = AD_pro_1.2/DP_pro_1)
}
tidy_vcfs0 <- collate_vcfs(annotated_list, genmod_list)
saveRDS(tidy_vcfs0, "./results/tidy_vcfs0_20171004.rds")
tidy_vcfs0 <- readRDS("./results/tidy_vcfs0_20171004.rds")
retidy_vcfs <- function(my_vcfs){
browser()
variants <- dplyr::filter(my_vcfs, !is.na(Gene.refGene)) %>%
dplyr::filter(ExonicFunc.refGene != "synonymous_SNV") %>%
dplyr::group_by(sample, snp_id) %>%
dplyr::filter(row_number() == 1) %>%
dplyr::group_by(snp_id) %>%
dplyr::filter(AD_pro_1.1 != 0 | AD_pro_1.2 != 0) %>%
dplyr::filter(AD.Dad_1.1 != 0 | AD.Dad_1.2 != 0) %>%
dplyr::filter(AD.Mom_1.1 != 0 | AD.Mom_1.2 != 0) %>%
filter(GT.Dad_1 != "0/0" & GT.Mom_1 != "0/0" & GT_pro_1 != "0/0") %>%
dplyr::filter(gnomad.wes.AF < 0.10 | is.na(gnomad.wes.AF)) %>%
dplyr::filter(gnomad.wgs.AF < 0.10 | is.na(gnomad.wgs.AF)) %>%
dplyr::filter(CAF < 0.10 | is.na(CAF)) %>%
dplyr::filter(!is.na(sample)) %>% # check up on significance of this threshold
group_by(snp_id) %>%
mutate(recurrence=paste(sample,collapse=';')) %>%
mutate(counts = n()) %>%
dplyr::arrange(desc(counts))
genes <- variants %>%
group_by(snp_id) %>%
filter(row_number() == 1) %>%
group_by(Gene.refGene) %>%
mutate(gene_counts = sum(counts)) %>%
mutate(gene_recurrence = paste(recurrence, collapse=";")) %>%
mutate(gene_recurrence = map_chr(strsplit(as.character(gene_recurrence) ,";"), function(x) paste(unique(x), collapse=";"))) %>%
mutate(gene_recurrence_counts = map_int(strsplit(as.character(gene_recurrence) ,";"), function(x) length(unique(x)))) %>%
dplyr::arrange(desc(counts)) %>%
ungroup()
samples <- variants %>%
dplyr::group_by(sample, snp_id) %>%
dplyr::filter(row_number() == 1) %>%
dplyr::ungroup() %>%
dplyr::select(sample, Gene.refGene) %>%
group_by(sample) %>%
dplyr::summarise(Gene.refGene = paste(Gene.refGene, collapse=";")) %>%
dplyr::arrange(desc(sample))
my_list <- list("variants" = variants, "genes" = genes, "samples" = samples)
}
tidy_vcfs <- retidy_vcfs(tidy_vcfs0)
# dplyr::select(-GT.394MOM_1, -AD.394MOM_1.1, -AD.394MOM_1.2, -DP.394MOM_1)
tidy_base_path = "./results/20171004_tidy_vcfs/"
dir.create(tidy_base_path)
variants_path <- paste(tidy_base_path, "variants", "_tidy_table.csv", sep = "")
genes_path <- paste(tidy_base_path, "genes", "_tidy_table.csv", sep = "")
samples_path <- paste(tidy_base_path, "samples", "_tidy_table.csv", sep = "")
write.table(tidy_vcfs$variants, variants_path, sep = ",", row.names = FALSE)
write.table(tidy_vcfs$genes, genes_path, sep = ",", row.names = FALSE)
write.table(tidy_vcfs$samples, samples_path, sep = ",", row.names = FALSE)
my_path <- (c(variants_path, genes_path, samples_path))
tidy_vcfs <- lapply(my_path, read.table, sep=",", header = TRUE)
tidy_vcfs <- setNames(tidy_vcfs, c("variants", "genes", "samples"))
|
541d0a00950170f18a8dba5d2ebb33da6d1f18d8
|
314a08065adb696b530306a41fdc65b9d49da1b3
|
/01_top_frequented_nearby.R
|
b14503cf07accba8adc793b2fb166f3380d1ec5b
|
[] |
no_license
|
DrPav/Kaggle-FBV
|
4b628082791f03003554f462e618b582d5581e20
|
0db959223b6eeccb0a9e43508ee609dedbd319d0
|
refs/heads/master
| 2016-09-14T19:11:39.514213
| 2016-06-09T00:39:22
| 2016-06-09T00:39:22
| 59,562,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,896
|
r
|
01_top_frequented_nearby.R
|
#Find the top 3 visited places in range of the xy co-ords
require(data.table)
require(dplyr)
require(magrittr)
require(bit64)
raw_train = fread("data/train.csv", integer64 = "character")
# Get errors later on when joining on placeID that is integer64.
#Use "Character" instead as it is only a key
#Use as.data.frame to get rid of errors when joining saying its not a dataframe
#For each place take the average co-ordinates and number of times visited
places = raw_train %>% group_by(place_id) %>%
summarise(x = mean(x), y = mean(y), count = n()) %>%
as.data.frame()
#Turn below into function (x_coord, y_coord, distance)
#testing
#Get the places in range
distance = 2
x_range = 0.77*distance
y_range = 0.03*distance
getTop3 <- function(x_coord, y_coord){
in_range = places %>% filter(x < x_coord + x_range,
x > x_coord - x_range,
y < y_coord + y_range,
y > y_coord - y_range)
top_3 = arrange(in_range, desc(count))[1:3,]
return(top_3$place_id)
}
test_set = fread("data/test.csv")
test_set_small = test_set[1:10000,] %>% select(row_id, x,y)
print("Start")
a = Sys.time()
print(a)
results = apply(test_set_small %>% as.matrix(), 1, function(r) getTop3(r['x'],r['y'])) %>% t()
results_named = cbind(select(test_set_small, row_id), results)
b = Sys.time()
print(b)
print("End 10000 done")
print(b-a)
# 1,000 takes 14 seconds
# 10,000 takes 149 seconds
# Estimate that all 8 Millio will take 33 hours - doing 250,000 an hour
#Very slow
#Export the data sets and try in Python
write.csv(places, "data/01_places.csv", row.names = F)
write.csv(test_set_small, "data/01_test_10kobs.csv", row.names = F)
write.csv(select(test_set, row_id, x, y), "data/01_test_860kobs.csv", row.names = F)
test_set1 = test_set[1:200000,] %>% select(row_id, x,y)
write.csv(select(test_set1, row_id, x, y), "data/01_test_p1.csv", row.names = F)
test_set2 = test_set[200000:400000,] %>% select(row_id, x,y)
write.csv(select(test_set2, row_id, x, y), "data/01_test_p2.csv", row.names = F)
test_set3 = test_set[400000:600000,] %>% select(row_id, x,y)
write.csv(select(test_set3, row_id, x, y), "data/01_test_p3.csv", row.names = F)
test_set4 = test_set[600000:8607230, ] %>% select(row_id, x,y)
write.csv(select(test_set4, row_id, x, y), "data/01_test_p4.csv", row.names = F)
#Create an artifical test set of only 100*100 points across space.
#Then map the actual test values to closest answer in the artificial space
x_points = seq(from =0, to = 10, length.out = 100 )
y_points = seq(from = 0, to = 10, length.out = 1000)
x = numeric()
y = numeric()
counter = 1
for(x_value in x_points){
for(y_value in y_points){
x[counter] = x_value
y[counter] = y_value
counter = counter + 1
}
}
test_artificial = data.frame(id = 1:100000, x, y)
write.csv(test_artificial, "data/01_test_grid.csv", row.names = F)
|
9bc0e6bba975a1663d17ecd41de65c2565a0eb19
|
17e0f15ea9dd36e747d9102d96e414bc5c6acbc8
|
/clase 02_04.R
|
067723f25121de7b75364784b871b6b178e2e06d
|
[] |
no_license
|
noelzap10/Programacion_Actuarial_lll
|
b1066c059cebbefddc1b72252663da5fbba4db9e
|
3b36367f1a676a29176870eaf23dce16ab2ef397
|
refs/heads/master
| 2021-09-14T23:12:45.457302
| 2018-05-22T01:22:38
| 2018-05-22T01:22:38
| 119,411,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
clase 02_04.R
|
#Scoping Rules
setwd("~/GitHub/Programacion_Actuarial_lll")
^
lm
lm <- function(x){x*x}
lm
rm(lm)
lm
search() #te muestra la lista de busqueda
library(swirl)
search()
hacer.potencia <- function(n) {
potencia <- function(x) {
x^n
}
potencia
}
cubica <- hacer.potencia(3)
cubica (3)
cuadrada <- hacer.potencia(2)
cuadrada(2)
ls (evironment(cubica))
get("n", environment(cubica))
ls (evironment(cuadrada))
get("n", environment(cuadrada))
y <- 10
f <- function(x) {
y <- 2
y^2 + g(x)
}
g <- function (x){
x*y
}
f(3)
#Estandares de escritura
#Fechas y Tiempo
x <- as.Date("1970-01-01")
x
unclass(x)
unclass(as.Date("1970-01-02"))
#MaryPaz 19/06/1998
inicio <- unclass(as.Date("1998-06-19"))
final <- unclass(as.Date("2018-04-02"))
final-inicio #dias vividos
weekdays(as.Date("1998-11-07"))
a <- as.POSIXct("1998-11-07")
b <- as.POSIXlt("1998-11-07")
|
fde72d3183a7baa1c6be940e5bcb441bd009cb22
|
50cf2f989dea08b97b43e80e9a334619e4909dc4
|
/scripts/loaddata.R
|
6cc909d4296f4d2e8ed2bea51e52961684f7c5b2
|
[] |
no_license
|
seanspicer/ExData_Plotting1
|
925325faa647f5d836b06772dd7d26bbf1d79195
|
c95282a350de154be8ff63a8c8389c5a9833865a
|
refs/heads/master
| 2020-04-10T17:21:39.320030
| 2015-07-11T13:20:14
| 2015-07-11T13:20:14
| 38,724,045
| 0
| 0
| null | 2015-07-08T01:18:11
| 2015-07-08T01:18:11
| null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
loaddata.R
|
#
# loaddata.R
#
# Author: Sean Spicer (sean.spicer@gmail.com)
# Date: 11-Jul-2015
#
#
# If the raw data has not been processed, download- unzip- and load- it.
# Save it to a native R object file for subsequent runs.
#
# Source Location: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#
#
# Cache Downlaoded Data
#
CacheLoad = T;
if (!file.exists('household_power_consumption.rds')) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip',
destfile='household_power_consumption.zip', method='curl');
unzip('household_power_consumption.zip');
#
# Read data into table
#
data = read.table('household_power_consumption.txt',
header=TRUE,
sep=';',
na.strings='?',
colClasses=c(rep('character', 2), rep('numeric', 7)));
#
# Create DateTime object
#
data$Date.Time = strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S");
#
# Set begining/end
#
start = as.Date(strptime("2007-02-01", "%Y-%m-%d"));
end = as.Date(strptime("2007-02-02", "%Y-%m-%d"));
#
# Subset between beginning and end
#
subdata = subset(data, as.Date(Date.Time) >= start & as.Date(Date.Time) <= end);
#
# Save file
#
if(CacheLoad) {
saveRDS(subdata, file='household_power_consumption.rds');
}
#
# Remove Temp Data / Downlaods
#
rm(data, start, end);
file.remove('household_power_consumption.txt');
file.remove('household_power_consumption.zip');
} else {
subdata = readRDS('household_power_consumption.rds')
}
|
cb53b61e97d5e07d3d44e846faacae3dbde91753
|
864b08c705ff5cd6947cea43f06781f2793f0b83
|
/R/linear.R
|
6c9ecf03a2e77cdfaa56bfbf4dd0a2c1da03dae7
|
[] |
no_license
|
data-cleaning/errorlocate
|
4a90594c2f779cf3eff0e22e8d28c48ff92fe260
|
db111b170db30101586cf1b15521450952eed340
|
refs/heads/master
| 2022-07-09T21:16:40.482706
| 2022-06-29T07:32:52
| 2022-06-29T07:32:52
| 38,886,469
| 19
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,758
|
r
|
linear.R
|
LOGS <- c("log", "log1p", "log10", "log2")
# code is mainly copied from validate, but needed for linear sub expressions in
# conditional statements.
#' Check which rules are linear rules.
#'
#' Check which rules are linear rules.
#'
#' @note `errorlocate` supports linear,
#' categorical and conditional rules to be used in finding errors. Other rule types
#' are ignored during error finding.
#' @export
#' @param x [validator()] object containing data validation rules
#' @param ... not used
#' @return `logical` indicating which rules are (purely) linear.
#' @family rule type
is_linear <- function(x, ...){
if (is.expression(x)){
return(sapply(x, is_lin_))
}
stopifnot(inherits(x, "validator"))
sapply(x$rules, function(rule){
is_lin_(rule@expr)
})
}
# HACK
lin_as_mip_rules <- function(x, ...){
lin_rules <- x[is_linear(x)]
lapply(lin_rules$rules, function(rule){
rewrite_mip_rule(lin_mip_rule_(rule@expr, name=rule@name), eps=0)
})
}
# check if a (sub) expression is linear
is_lin_ <- function(expr, top=TRUE, ...){
op <- op_to_s(expr)
l <- consume(left(expr))
r <- consume(right(expr))
if (top){
if (is_num_range(expr)){
return(TRUE)
}
if (!(op %in% c("==", ">", ">=", "<=", "<"))){ return(FALSE) }
return(is_lin_(l, FALSE) && is_lin_(r, FALSE))
}
if (is.atomic(expr)){
return(is.numeric(expr) || is.null(expr))
}
if (is.symbol(expr) || op == "var_group"){ return(TRUE) }
if (op %in% c("+","-")){
return( is_lin_(l, FALSE) && is_lin_(r, FALSE))
}
if (op == "*"){
if (is.numeric(l) || is.numeric(left(l))){ return(is_lin_(r, FALSE)) }
if (is.numeric(r) || is.numeric(left(r))){ return(is_lin_(l, FALSE)) }
}
if ( op %in% LOGS
&& isTRUE(getOption("errorlocate.allow_log"))
){
if (is.numeric(l)){
return(TRUE)
}
# this is a log transformed variable...
if (is.symbol(l)){
return(TRUE)
}
# TODO make this work for all linear subexpressions (takes more administration)
}
FALSE
}
#
# create a linear mip_rule from a linear expression.
# assumes that it is checked with is_lin_
lin_mip_rule_ <- function(e, sign = 1, name, ...){
if (is.symbol(e)){
return(setNames(sign, deparse(e)))
}
if (is.numeric(e)){
return(c(.b=sign*e))
}
if (is.null(e)){ # catches unary operators +-
return(NULL)
}
op <- op_to_s(e)
l <- consume(left(e))
r <- consume(right(e))
if (op %in% c("==", ">", ">=", "<=", "<")){
coef <- c(lin_mip_rule_(l, sign), lin_mip_rule_(r, -sign), .b=0) # makes sure that .b exists
coef <- tapply(coef, names(coef), sum) # sum up coefficients
b <- names(coef) == ".b"
return(mip_rule(coef[!b], op, -coef[b], rule = name))
}
if (op == '-'){
if (is.null(r)){ # unary "-l"
return(lin_mip_rule_(l, -sign))
} # else binary "l-r"
return(c(lin_mip_rule_(l, sign), lin_mip_rule_(r, -sign)))
}
if (op == '+'){
if (is.null(r)){ # unary "+l"
return(lin_mip_rule_(l, sign))
} # else binary "l+r"
return(c(lin_mip_rule_(l, sign), lin_mip_rule_(r, sign)))
}
if (op == '*'){
if (is.numeric(left(l))){
l <- eval(l) # to deal with negative coefficients
}
if (is.numeric(l)){ return(lin_mip_rule_(r, sign*l)) }
if (is.numeric(left(r))){
r <- eval(r) # to deal with negative coefficients
}
if (is.numeric(r)){ return(lin_mip_rule_(l, sign*r)) }
}
if (op %in% LOGS){
if (is.numeric(l)){
l <- eval(e)
return(lin_mip_rule_(l, sign))
}
if (is.symbol(l)){ # derive a new variable <var>._<logfn>
n <- paste0(deparse(l), "._", op)
return(setNames(sign, n))
}
stop("to be implemented")
}
stop("Invalid linear statement")
}
|
fa04df4d897b609ab717707257f58671e6bd412a
|
2ccc18877d7cfcec3b35fe38f6cee88f4eb30ee0
|
/Lecture 3/Lecture3_looping_and_functions.R
|
6ced113be20dfefd6b4d3cd841cd2ef50298994a
|
[] |
no_license
|
srodrb/Visualization_MSE
|
442625f7fe272b5079e1953cf0a4630f5b509a90
|
08a27c88a200ca53d80c24895d81e55fea3d62aa
|
refs/heads/master
| 2020-12-24T15:05:33.118872
| 2014-10-08T14:48:25
| 2014-10-08T14:48:45
| 24,328,669
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,070
|
r
|
Lecture3_looping_and_functions.R
|
# The script works on the current directory
#
# Samuel Rodriguez Bernabeu
# Assignment: lecture 3
# deliver date: 06/10/14
#
#
# Exercise 1: For each of the following code sequences, predict the result.
# Then do the computation:
answer <- 0
for(j in 3:5) { answer <- j + answer }
cat("Answer ", answer)
answer <- 10 j <- 0
while(j < 5) {
j <- j + 1
if(j == 3)
next else
answer <- answer + j*answer
}
cat("Answer ", answer)
# Exercise 2: Add up all the numbers for 1 to 100 in two diffrent ways: using a f
# or loop and using sum.
result_for <- 0
for(i in 0:100){
result_for <- result_for + i
}
result_sum <- sum(0:100)
cat("Result using for: ", result_for, " result using sum function ", result_sum)
# Exercise 3: Create a vector x <- seq(0, 1, 0.05). Plot x versus x and use type="l".
# Label the y-axis "y". Add the lines x versus x^j where j can have values
# 3 to 5 using either a for loop or a while loop.
x <- seq(0, 1, 0.05)
plot(x ~x, type='l', ylab="y")
for(i in 3:5){ lines(x , x^i)}
|
7fbbcee0a67e21ca30b12ab3d0f80eb4829a8ce8
|
e4564823bda709beb5ac12295401be810372ed6d
|
/R/Hpoints.R
|
ecb23a7aba11c075a263dcaf3e9156196ab4699a
|
[] |
no_license
|
cran/Renext
|
ee3a3a53bd4ee7473c6d38c59726f0a3556473e8
|
c7d43fcd7c0aeff7bc632d72b247131b53fbb923
|
refs/heads/master
| 2022-12-05T08:20:08.238641
| 2022-11-24T14:20:02
| 2022-11-24T14:20:02
| 17,693,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
Hpoints.R
|
##' Plotting positions for exponential retun level plots.
##'
##' The plotting positions are numeric values to use as the abscissae
##' corresponding to the order statistics in an exponential return
##' level plot. They range from 1 to about \eqn{\log n}{log(n)}.
##' They can be related to the plotting positions given by
##' \code{\link{ppoints}}.
##'
##' @title Plotting positions for exponential return levels
##' @param n sample size.
##' @return numeric vector of plotting positions with length
##' \code{n}.
##' @author Yves Deville
##' @seealso \code{\link{ppoints}}.
##' @details
##' The computed values
##' deqn{H_{i} = \frac{1}{n} + \frac{1}{n-1} + \dots + \frac{1}{n + 1 -i}}{
##' H[i] = 1 / n + 1 / (n + 1) + ... + 1 / (n + 1 - i)}
Hpoints <- function(n) {
if (length(n) > 1L) n <- length(n)
if (n == 0) return(numeric(0))
cumsum(1 / rev(seq(1L:n)))
}
|
68ebef2bde0c9ed148d9b7b2937373f9c71a390c
|
d635576d8e4823765313f3015735ebe0017ba18a
|
/website/static/slides/05-data-wrangling/met-datatable.R
|
db99b67b91a9daca5ed27d566f4c968ed296ee66
|
[] |
no_license
|
USCbiostats/PM566
|
4590498e4e0ec495da689c7d38832fe41e00069c
|
5d6f69ade27326042965dde4317db7db012531d1
|
refs/heads/master
| 2023-08-18T00:39:21.873340
| 2023-08-07T17:47:52
| 2023-08-07T17:47:52
| 252,278,014
| 20
| 33
| null | 2022-08-04T19:17:12
| 2020-04-01T20:19:30
|
HTML
|
UTF-8
|
R
| false
| false
| 4,726
|
r
|
met-datatable.R
|
# Tailor to download a subset of the data for the lab
library(data.table)
# 1. Download the data
stations <- fread(
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.csv",
check.names = TRUE
)
# continental US only, remove AK, HI, territories and weather stations with missing WBAN numbers
st_us <- stations[
CTRY=="US" &
!(STATE %in% c("AK", "HI", "", "PR", "VI")) &
WBAN < 99999 &
USAF < 999999 # This comparison may be wrong b/c USAF is str.
]
#check which states are included
table(st_us$STATE)
# Remove Islands, platforms, and buoys
st_us <- st_us[!grep("BUOY|ISLAND|PLATFORM", STATION.NAME)]
# Extract year from station start and end date
st_us[, BEGIN_YR := floor(BEGIN/1e4)]
st_us[, END_YR := floor(END/1e4)]
# If you check the main dataset, you will see that some USAF codes are not
# integers stations[which(stations[,grepl("[^0-9]", USAF)])] which is why
# data.table read it as a string.
st_us[, USAF := as.integer(USAF)]
# Only take stations that have current data (>=2019)
st_us <- st_us[END_YR >= 2019]
# Extract data from downloaded files
# Need to define column widths, see ftp://ftp.ncdc.noaa.gov/pub/data/noaa/ish-format-document.pdf
column_widths <- c(4, 6, 5, 4, 2, 2, 2, 2, 1, 6, 7, 5, 5, 5, 4, 3, 1, 1, 4, 1, 5, 1, 1, 1, 6, 1, 1, 1, 5, 1, 5, 1, 5, 1)
column_names <- c(
"ID","USAFID", "WBAN", "year", "month","day", "hour", "min","srcflag", "lat",
"lon", "typecode","elev","callid","qcname","wind.dir", "wind.dir.qc",
"wind.type.code","wind.sp","wind.sp.qc", "ceiling.ht","ceiling.ht.qc",
"ceiling.ht.method","sky.cond","vis.dist","vis.dist.qc","vis.var","vis.var.qc",
"temp","temp.qc", "dew.point","dew.point.qc","atm.press","atm.press.qc"
)
met_all <- NULL
for (y in 2019){
y_list <- st_us[BEGIN_YR <= y & END_YR>=y]
# Ways to download/read fixed-width-files is a common task. Other solutions
# for R can be found here:
# https://stackoverflow.com/a/34190156/2097171
for (s in 317:nrow(y_list)) {
# Building the URL
uri <- sprintf(
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/%i/%s-%05d-%1$i.gz",
y, y_list$USAF[s], y_list$WBAN[s]
)
# Downloading the file
tmpf <- tempfile(fileext = ".gz")
download_try <- tryCatch(
download.file(uri, destfile = tmpf, quiet = TRUE),
error = function(e) e
)
if (inherits(download_try, "error")) {
warning(
sprintf(
"The file:\n %s\nWas not able to be downloaded (record: %i).",
uri, s),
immediate. = TRUE
)
# Skipping to the next
next
}
# Reading the file
message("Reading the data for recor ", s, " in year ", y, "... ", appendLF = FALSE)
tmpdat <- suppressMessages(readr::read_fwf(
file = tmpf,
col_positions = readr::fwf_widths(
widths = column_widths,
col_names = column_names
),
progress = FALSE #,
# col_types = "cciiiciicicciccicicccicccicicic"
))
tmpdat <- data.table(tmpdat)
# Right types
tmpdat[,year := as.integer(year)]
tmpdat[,month := as.integer(month)]
tmpdat[,day := as.integer(day)]
tmpdat[,hour := as.integer(hour)]
tmpdat[,lat := as.integer(lat)]
tmpdat[,lon := as.integer(lon)]
tmpdat[,elev := as.integer(elev)]
tmpdat[,wind.sp := as.integer(wind.sp)]
tmpdat[,atm.press := as.integer(atm.press)]
# change 9999, 99999, 999999 to NA
tmpdat[,wind.dir := ifelse(as.integer(wind.dir) == 999, NA, wind.dir)]
tmpdat[,wind.sp := ifelse(as.integer(wind.sp) == 9999, NA, wind.sp)]
tmpdat[,ceiling.ht := ifelse(as.integer(ceiling.ht) == 99999, NA, ceiling.ht)]
tmpdat[,vis.dist := ifelse(as.integer(vis.dist) == 999999, NA, vis.dist)]
tmpdat[,temp := ifelse(as.integer(temp) == 9999, NA, temp)]
tmpdat[,dew.point := ifelse(as.integer(dew.point) == 9999, NA, dew.point)]
tmpdat[,atm.press := ifelse(as.integer(atm.press) == 99999, NA, atm.press)]
# conversions and scaling factors
tmpdat[,lat := lat/1000]
tmpdat[,lon := lon/1000]
tmpdat[,wind.sp := wind.sp/10]
tmpdat[,temp := temp/10]
tmpdat[,dew.point := dew.point/10]
tmpdat[,atm.press := atm.press/10]
tmpdat[,rh := 100*((112-0.1*temp + dew.point)/(112+0.9 * temp))^8]
#drop some variables
tmpdat[,c("ID", "srcflag", "typecode", "callid", "qcname") := NULL]
# keep august only for class example
met_all <- rbind(met_all, tmpdat[month == 8])
message("Record ", s, "/", nrow(y_list)," for year ", y, " done.")
}
}
fwrite(met_all, file = "met_all_dt.gz", compress = "gzip")
|
cf4f8a9e4f62f6aa973eb9cef51e59c39bc51102
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mtk/R/mtkParsor.R
|
2d882126b5a974e4f71a3a91c83e53a5864953ef
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,097
|
r
|
mtkParsor.R
|
# Mexico Toolkit
#
# version : 0.01
# date : 30 nov 2009
# MAJ : 10 dec 2009
# licence : GPL
# Author(s) : Juhui Wang, MIA-Jouy en Josas, INRA, 78352
# repository: https://mulcyber.toulouse.inra.fr/projects/baomexico/
# Web : http://www.reseau-mexico.fr/
#
#' $Rev:: 240 $: revision number of the last spread
#' $Author:: jwang $: author of the last spread
#' $Date:: 2011-11-10 11:16:2#$: date of the last spread
#---------------------------------------------------------------------------
##########################################################################
## mtkParsor class, definition and methods
DEBUG.mtkParsor=FALSE
#' The mtkParsor class parses the XML file and extracts the information about the factors
#' and processes used in a sensitivity analysis session.
#' @slot xmlPath the XML file's path and name
#' @exportClass mtkParsor
#' @title The mtkParsor class
setClass(Class="mtkParsor",
representation=representation(xmlPath="character"),
validity = function(object){
if(!file.exists(object@xmlPath)) stop("The file ",object@xmlPath," does not exist!\n")
}
)
###########################################################################
## Methods definition
###########################################################################
###########################################################################
#' The constructor
#' @param xmlPath the XML file's path and name.
#' @return an object of class \code{\linkS4class{mtkParsor}}
#' @export mtkParsor
mtkParsor= function(xmlPath) {
res <- new("mtkParsor", xmlPath=xmlPath)
return(res)
}
###########################################################################
#' Sets the xml File and tests if it's openable.
#' @param this an object of class \code{\linkS4class{mtkParsor}}
#' @param context an object of class \code{\linkS4class{mtkExpWorkflow}}
#' @return invisible()
#' @exportMethod setXMLFilePath
setMethod(f="setXMLFilePath", signature=c(this="mtkParsor",xmlPath="character"),
definition=function(this,xmlPath) {
nameThis <- deparse(substitute(this))
if(file.exists(xmlPath))
this@xmlPath <- xmlPath
assign(nameThis, this, envir=parent.frame())
return(invisible())
})
###########################################################################
#' Parses the xml file and creates the factors and processes from the XML file.
#' @param this the underlying object of class \code{\linkS4class{mtkParsor}}
#' @param context an object of class \code{\linkS4class{mtkExpWorkflow}}
#' @return invisible()
#' @exportMethod run
#' @title The run method
setMethod(f="run", signature=c(this="mtkParsor", context="mtkExpWorkflow"),
definition=function(this, context){
nameContext <- deparse(substitute(context))
# Parsor the file this@xmlPath, create and modify the slots of the context
# according to the data extracted from the xmlFile
doc <- xmlTreeParse(this@xmlPath,ignoreBlanks=TRUE, trim=TRUE, useInternalNodes=TRUE)
facteurs <- getNodeSet(xmlRoot(doc), "//mxd:factor")
factors <- list()
for (facteur in facteurs ) {
if(DEBUG.mtkParsor)show(facteur)
facteur.id<-xmlGetAttr(facteur,"id",converter=str_trim) ## get la valeur de l'attribut "id", si "id" n'existe pas, retourne NULL
facteur.name <- xmlGetAttr(facteur,"name",converter=str_trim)
facteur.unit <- xmlGetAttr(facteur,"unit",converter=str_trim)
if(is.null(facteur.id)) facteur.id <- 'unknown'
if(is.null(facteur.name)) facteur.name <- 'unknown'
if(is.null(facteur.unit)) facteur.unit <- ''
facteur.domaine <- facteur[["domain"]] ## le noeud "domaine" du facteur, si le noeud "domaine" n'existe pas retourne NULL
domaine.nominalValue <- xmlGetAttr(facteur.domaine,"nominalValue",converter=str_trim)
domaine.distributionName <- xmlGetAttr(facteur.domaine,"distributionName",converter=str_trim)
domaine.valueType <- xmlGetAttr(facteur.domaine,"valueType",converter=str_trim) # str_trim: a function from the package stringr
if(is.null(domaine.nominalValue)) domaine.nominalValue <- 0
if(is.null(domaine.distributionName)) domaine.distributionName <- 'unknown'
if(is.null(domaine.valueType)) domaine.valueType <- 'float'
# dom <- mtkDomain(distributionName=domaine.distributionName,
# nominalValType=xsOff(domaine.valueType),
# nominalValString=domaine.nominalValue)
## TODO il faut peut etre gerer le cas ou domaine.valueType ne contient pas de : (pas de namespace)
domValType <- str_sub(domaine.valueType, str_locate(domaine.valueType,":")[1]+1 )
domNominalVal <- switch(domValType,
"character" = as.character(domaine.nominalValue),
"string" = as.character(value),
"double" = as.double(domaine.nominalValue),
"float" = as.double(domaine.nominalValue),
"logical" = as.logical (domaine.nominalValue),
"integer" = as.integer (value)
)
dom <- mtkDomain( distributionName=domaine.distributionName, domNominalVal)
##if(DEBUG.mtkParsor) cat("domaine:*", domaine.distributionName, domaine.nominalValue,domaine.valueType, "*\n")
if(DEBUG.mtkParsor) cat("domaine:*", domaine.distributionName, domNominalVal, domValType,"*\n")
levels <- list()
weights <- list()
domaine.levels <- facteur.domaine["level"]
if(length(domaine.levels)!=0)
for (level in domaine.levels) {
if(DEBUG.mtkParsor)show(level) # TODO show plante
value <- xmlGetAttr(level,"value",converter=str_trim)
weight <- as.numeric(xmlGetAttr(level,"weight"),converter=str_trim)
levels <- c(levels,value)
weights <- c(weights, weight)
}
if(length(levels) != 0) {
mtkL <- mtkLevels('categorical', levels, weights)
setLevels(dom,mtkL)
}
domaine.distributionParameters <- facteur.domaine["distributionParameter"]
parameters<-list()
if(length(domaine.distributionParameters)!=0)
for (parameter in domaine.distributionParameters) {
if(DEBUG.mtkParsor)show(parameter)
name <- xmlGetAttr(parameter,"name",converter=str_trim)
value <- xmlGetAttr(parameter,"value",converter=str_trim)
valueType <- xmlGetAttr(parameter,"valueType",converter=str_trim)
if(is.null(name)) name <- 'unknown'
if(is.null(valueType)) valueType <- 'float'
# parameters <- c(parameters, mtkParameter(valName=name,type=xsOff(valueType),valString=value))
## HR refactoring syntaxe mtkValue, mtkParameter et mtkDomain
## TODO il faut peut etre gerer le cas ou domaine.valueType ne contient pas de : (pas de namespace)
## browser()
goodValueType <- str_sub(valueType, str_locate(valueType,":")[1]+1 )
goodValue <- switch(goodValueType,
"character" = as.character(value),
"string" = as.character(value),
"double" = as.double(value),
"float" = as.double(value),
"logical" = as.logical (value),
"integer" = as.integer (value)
)
## HM, le 12/10/2012
## assign(name,goodValue, pos=1)
zzz <- mtkParameter()
zzz@name <- name
zzz@val <- goodValue
zzz@type <- typeof(goodValue)
if(DEBUG.mtkParsor) cat("parameter:*", name, value, goodValueType, "*\n")
## parameters <- c(parameters, mtkParameter(name))
parameters <- c(parameters, zzz)
}
if(length(parameters) != 0)setDistributionParameters(dom,parameters)
facteur.features <- facteur["feature"]
features <- list()
if(length(facteur.features)!=0)
for (feature in facteur.features) {
if(DEBUG.mtkParsor)show(feature)
name <- xmlGetAttr(feature,"name",converter=str_trim)
value <- xmlGetAttr(feature,"value",converter=str_trim)
valueType <- xmlGetAttr(feature,"valueType", converter=str_trim)
if(is.null(name)) name <- 'unknown'
if(is.null(valueType)) valueType <- 'float'
## HR refactoring syntaxe mtkValue, mtkParameter et mtkDomain
goodValueType <- str_sub(valueType, str_locate(valueType,":")[1]+1 )
goodValue <- switch(goodValueType,
"character" = as.character(value),
"string" = as.character(value),
"double" = as.double(value),
"float" = as.double(value),
"logical" = as.logical (value),
"integer" = as.integer (value)
)
## HM, le 12/10/2012
## assign(name,goodValue, pos=1)
zzz <- mtkFeature()
zzz@name <- name
zzz@val <- goodValue
zzz@type <- typeof(goodValue)
## features <- c(features, mtkFeature(name))
features <- c(features, zzz)
}
fac<-mtkFactor(name=facteur.name, id=facteur.id, unit=facteur.unit,domain=dom)
if(length(features) != 0) setFeatures(fac,features)
factors <- c(factors,fac)
}
context@expFactors <- mtkExpFactors(expFactorsList=factors)
if(DEBUG.mtkParsor)show(context@expFactors)
processes <- getNodeSet(xmlRoot(doc), "//mxd:process")
for (process in processes ) {
if(DEBUG.mtkParsor)show(process)
stage <- xmlGetAttr(process,"stage")
call <- xmlValue(process[["call"]])
protocolSeparated <- strsplit(call, "://")
protocol <- protocolSeparated[[1]][1]
siteSeparated <- strsplit(protocolSeparated[[1]][2],"/")
site <- siteSeparated[[1]][1]
service <- siteSeparated[[1]][2]
parameters=process[["parameters"]]["parameter"]
p <- vector(mode="raw", length=0)
if(length(parameters)!=0)
for (parameter in parameters) {
if(DEBUG.mtkParsor)show(parameter)
name <- xmlGetAttr(parameter,"name",converter=str_trim)
value <- xmlGetAttr(parameter,"value",converter=str_trim)
valueType <- xmlGetAttr(parameter,"valueType",converter=str_trim)
if(is.null(name)) name <- 'unknown'
if(is.null(valueType)) valueType <- 'float'
#browser()
## HR refactoring syntaxe mtkValue, mtkParameter et mtkDomain
goodValueType <- str_sub(valueType, str_locate(valueType,":")[1]+1 )
goodValue <- switch(goodValueType,
"character" = as.character(value),
"string" = as.character(value),
"double" = as.double(value),
"float" = as.double(value),
"logical" = as.logical (value),
"integer" = as.integer (value)
)
## HM, le 12/10/2012
## assign(name,goodValue, pos=1)
zzz <- mtkParameter()
zzz@name <- name
zzz@val <- goodValue
zzz@type <- typeof(goodValue)
## p <- c(p,mtkParameter(name))
p <- c(p,zzz)
}
if(stage == "design") cService <- "mtkDesigner"
if(stage == "evaluate") cService <- "mtkEvaluator"
if(stage == "analyze") cService <- "mtkAnalyser"
obj<-new(cService,name=stage, protocol=protocol, site=site, service=service,
parameters=p,ready=TRUE, state=FALSE, result=NULL)
setProcess(context,obj,stage)
}
assign(nameContext,context, envir=parent.frame())
return(invisible())
})
#' Extracts the sub-string B from a string of pattern A:B such xs:integer.
#' @param str a string of pattern A:B such xs:integer
#' @return the sub-string B of str
#' @title The xsOff function
xsOff<-function(str){
tmp <- strsplit(str, ":")
return(tmp[[1]][2])
}
|
186838a40fc3646dca967ca7ba36d161b236f5c5
|
4863435653da8649080259a7bb151c436782be6d
|
/s_gwet.R
|
4ddb8e11edd17c790f3ea521506f9680c5a595b1
|
[] |
no_license
|
Carnuntum/agree
|
842e5e8898deb3fae7775e03edcff7cb95aa0f5e
|
2ca81550cf8512075515c62d08b96da746d2f1ce
|
refs/heads/master
| 2023-05-30T04:00:05.889149
| 2021-06-23T18:15:08
| 2021-06-23T18:15:08
| 288,434,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,968
|
r
|
s_gwet.R
|
kappa_gwet <- tabItem(
tabName = 'kappa_gwet',
fluidRow(column(width = 10,
offset = 1,
style = 'padding-left: 0px; padding-right: -5px;',
box(
id = 'gwetDocum',
width = NULL,
style = measure_title_style,
h3("Gwet's Agreement Coefficient")
),
hidden(
div(id = 'gwetDocumBox',
fluidRow(class = 'documRow',
column(width = 12,
offset = 0,
box(title = gwet_docum_text,
width = NULL,
style = 'text-align:center; padding: 0;')
)
)
)
)
)
),
fluidRow(
column(
width = 5,
offset = 1,
fluidRow(
box(
width = NULL,
height = '105px',
p(file_upload_text),
style = centerText
)
),
fluidRow(
box(
width = NULL,
height = '105px',
p(file_struct_text),
look_down,
style = centerText
)
)
)
,
column(
width = 5,
box(
width = NULL,
fileInput(inputId = 'gwetInput',
label = 'Browse for .csv files'),
h5('Choose weighting method'),
div(class = 'selectInputStyle',
selectInput(inputId = 'gwet_weight',
label = '',
choices = c('unweighted',
'linear',
'quadratic',
'ordinal',
'radical',
'ratio',
'circular',
'bipolar')
),
style = centerText),
actionButton(
inputId = 'gwetRun',
label = 'calculate'
),
style = centerText
)
)
),
fluidRow(
class = 'tabStyle',
column(
width = 5,
offset = 1,
style = 'padding: 0px;',
uiOutput('ui_gwet')
),
column(width = 5,
shinyWidgets::dropMenu(
div(id = 'gwetDrop',
fluidRow(class = 'style_valuebox_OUTPUT_cyan',
column(
width = 12,
valueBoxOutput(outputId = 'gwet', width = NULL)
)
)
),
HTML(kableExtra::kable(t(gwet_output_description)) %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri')),
trigger = 'mouseenter',
theme = 'translucent',
placement = 'left-start')
)
)
)
#-------------------------------------------------------------------------------
#'*--------------- CALCULATED OUTPUT FOR PERCENT AGREEMENT --------------------*
#-------------------------------------------------------------------------------
#chi output for corrected test
gwetOut <- function(input, output, data) {
tryCatch({
choice <- input$gwet_weight
vals_gwet <- list('vals' = warning_handler(irrCAC::gwet.ac1.raw(data,
if(!is.null(choice)) {weights = choice}
else {
weights = 'unweighted'
}
)$est
),
'warn' = msg)
l_gwet <<- lapply(vals_gwet$vals, as.data.frame)
class(vals_gwet$vals) <- 'list'
d_gwet <- t(as.data.frame(vals_gwet$vals))
output$gwet <- renderValueBox({
valueBox(
subtitle = p(HTML(
kableExtra::kable(d_gwet, format = 'html') %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri'),
),
div(
if(!is.null(msg)) {
p(HTML(paste0(
circleButton(inputId = 'warningButton',
icon = icon("exclamation"),
size = 's'),
br()
)))
},
style = centerText
),
div(
downloadButton(outputId = 'gwetFullDown',
label = 'Full Results'),
style = centerText
)),
value = ''
)
})
}, error = function(e) {
invalid_data(output, 'gwet')
print(e)
}, warning = function(w) {
invalid_data(output, 'gwet')
print(w)
})
}
|
7f6f0daf83eac46971fe9da1d06469740aeca10a
|
8bd435b8937f6b490a6eca74e03164dba64ec98f
|
/csubref.r
|
220f8eea14316ae36208e16123ef4f3c61c052e3
|
[] |
no_license
|
KnaveAndVarlet/ADASS2019
|
df2ad5840cafc55a3145a112ac36cbd9e7ee641f
|
58751de6942a2cdeeef5f860aedf40388859a438
|
refs/heads/master
| 2020-07-19T17:10:53.020159
| 2019-11-22T03:50:01
| 2019-11-22T03:50:01
| 206,484,934
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,878
|
r
|
csubref.r
|
#! /usr/bin/env Rscript
# c s u b r e f . r
#
# Summary:
# 2D array access test in R, using a reference class containing an array.
#
# Introduction:
# This is a test program written as part of a study into how well different
# languages handle accessing elements of 2D rectangular arrays - the sort of
# thing that are common in astronomy and similar scientific disciplines.
# This can also be used to see how efficient different ways of coding the
# same problem can be in the different languages, and to see what effect
# such things as compilation options - particularly optimisation options -
# have.
#
# The problem chosen is a trivial one: given an 2D array, add to each
# element the sum of its two indices and return the result in a second,
# similarly-sized array. This is harder to optimise away than, for example,
# simply doing an element by element copy of the array, but is generally
# easy to code. It isn't a perfect test (something brought out by the
# study), but it does produce some interesting results.
#
# This version:
# This version is for R. It isn't a good way of coding the problem in R, but
# it is something of an object lesson in how the way a problem is coded can
# affect the efficiency. It was written in good faith, hoping to get a small
# speed improvement over the straightforward R code in csub.r. Originally,
# csub.r created an output array in the main program and passed that to the
# csub() routine to receive the result. This is how a C or Fortran
# programmer (ie me) naturally thinks of implementing this. However, as R
# does not modify its arguments, this doesn't work. If you modify a passed
# array, R actually modifies a local copy, and if you want the calling
# routine to see the results, you have to pass it back as the function
# result. R does however provide the concept of reference classes, and if
# you pass one of these it is in fact passed 'by reference' and the
# subroutine can modify it in situ. It seemed this might speed up the
# process slightly, but avoiding generation of the local array. In fact, it
# slows things down hugely. See the programming notes at the end for more.
#
# Structure:
# Most test progrsms in this study code the basic array manipulation in a
# single subrutine, then create the original input array, and pass that,
# together with the dimensions of the array, to that subroutine, repeating
# that call a large number of times in oder to be able to get a reasonable
# estimate of the time taken. Then the final result is checked against the
# expected result.
#
# This code follows that structure, with both the main routine and the
# called subroutine in the same piece of code, as R doesn't optimise out
# the call in that case. In this case, the subroutine is passed an instance
# of a reference class that contains the output array to be modified by the
# subroutine. The input array is passed quite normally.
#
# Invocation:
# ./csubref irpt nx ny
#
# where irpt is the number of times the subroutine is called - default 1.
# nx is the number of columns in the array tested - default 2000.
# ny is the number of rows in the array tested - default 10.
#
# Note that R uses column-major order; arrays are stored in memory so that
# the first index varies fastest.
#
# Author(s): Keith Shortridge, Keith@KnaveAndVarlet.com.au
#
# History:
# 2nd Jul 2019. First properly commented version. KS.
#
# Copyright (c) 2019 Knave and Varlet
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
#
# The csub() subroutine.
#
# csub is a subroutine that is passed a two-dimensional floating point array
# ina, with dimensions nx by ny. It returns a two dimensional array of the same
# dimensions with each element set to the contents of the corresponding element
# in ina plus the sum of its two indices. The intent is to try to see how well
# any given language handles access to individual elements of a 2D array.
#
# This version is passed an instance of a reference class called 'out', which
# has an element called 'data' which is an array that is set to the result of
# the operation. This avoids dynamic creation of a local array that can be
# passed back to the calling routine as the return value of the subroutine, but
# it turns out to be horribly slow.
csub <- function(ina,nx,ny,out) {
for (iy in 1:ny) {
for (ix in 1:nx) {
out$data[ix,iy] <- ina[ix,iy] + ix + iy
}
}
}
# -----------------------------------------------------------------------------
# The main routine.
# Get the command line arguments, and determine the array size and the number
# of times to repeat the subroutine call.
args <- commandArgs(TRUE)
nx = 2000
ny = 10
nrpt = 1
if (length(args) > 0) {
nrpt = as.integer(args[1])
if (length(args) > 1) {
nx = as.integer(args[2])
if (length(args) > 2) {
ny = as.integer(args[3])
}
}
}
cat ("Arrays have",ny,"rows of",nx,"columns, repeats = ",nrpt,"\n")
# Create the input array. We set the elements of the input array to some set
# of values - it doesn't matter what, just some values we can use to check the
# array manipulation on. This uses the sum of the row and column indices in
# descending order.
ina <- matrix(0.0,nx,ny)
for (iy in 1:ny) {
for (ix in 1:nx) {
ina[ix,iy] = nx - ix + ny - iy;
}
}
# Create the output array as an element of a member of a reference class,
# so there should only be one instance of it that will be passed to csub()
# by reference.
ref_matrix <- setRefClass(
"ref_matrix",fields = list(data = "matrix"),
methods = list(
setData = function(ix,iy,value) {
data[ix,iy] <<- value
}
)
)
out <- ref_matrix(data = matrix(0.0,nx,ny))
# Unommenting this tracemem() call will show just how much copying actually
# does take place - and this scheme was inteded to avoid the need to copy
# the out array.
#
# tracemem(out$data)
# Call the subroutine the specified number of times.
for (irpt in 1:nrpt) {
csub(ina,nx,ny,out)
}
# Check the results.
error = FALSE
for (iy in 1:ny) {
for (ix in 1:nx) {
if (out$data[ix,iy] != ina[ix,iy] + ix + iy) {
cat("Error",ix,iy,out$data[ix,iy],"\n")
error = TRUE
break
}
}
if (error) { break }
}
# -----------------------------------------------------------------------------
# P r o g r a m m i n g N o t e s
#
# o I tried using access methods defined as part of the class to access
# the array elements, but that made no obvious improvement on the execution
# time. That's what the setData() method is doing in the class definition -
# it isn't actually used by this code and could be removed.
#
# o Note that the checking loop runs fast, whereas the loop in csub() - which
# modifies the elements of the array as opposed to just reading them - is
# very slow. I suspect that at some point the implementation is copying the
# whole matrix in order to modify one element and then copying it back!
# When I say slow, I mean really slow. I had to set nrpt = 1 to get this to
# run in a sensible amount of time. What's more, it goes up almost linearly
# if I make the out array bigger - eg using
# out <- ref_matrix(data = matrix(0.0,nx * 10,ny))
# but still only access the nx by ny subset of the array. It looks as if the
# WHOLE array is being copied each time a value is assigned to an element of
# the array! Adding the #tracemem(out$data) call actually shows this
# happening. I think this must be a candidate for 'slowest' way of
# implementing the csub function in any language.
#
# o The problem is to do with modifying the elements of the reference class,
# not with passing it to a subroutine. If I in-line the csub() call by hand,
# ie replacing it by:
# for (iy in 1:ny) {
# for (ix in 1:nx) {
# out$data[ix,iy] <- ina[ix,iy] + ix + iy
# }
# }
# it runs just as slowly.
#
# o There is something of an explanation here:
# https://r-devel.r-project.narkive.com/8KtYICjV/
# rd-copy-on-assignment-to-large-field-of-reference-class
# Note that I split up the URL so the line isn't too long.
#
# o I have a StackOverflow question relating to this behaviour, but so far
# nobody has suggested a way to bypassing the copying that takes place.
# https://stackoverflow.com/questions/56746989/
# how-to-speed-up-writing-to-a-matrix-in-a-reference-class-in-r
|
d818f624f711b01ed847f31ee4c9e31feb834cb6
|
6ca2a0b93c96b986a36cc0280844d0f21dffa4d7
|
/plots.R
|
a85005c4cf5635820f757155c05f2d24c4ae684e
|
[] |
no_license
|
emmalouiser/Data_Visualisation
|
4405f3c0424e4ac4dba75a7fe1af76f2266ff71a
|
a5019f20fedcaf6560c137e3a593d34f03e2db68
|
refs/heads/master
| 2020-04-04T17:02:36.467611
| 2018-12-01T22:58:26
| 2018-12-01T22:58:26
| 156,104,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,367
|
r
|
plots.R
|
library(plotly)
library(quantmod)
## A simple example with a couple of actors
## The typical case is that these tables are read in from files....
actors <- c("Alice", "Bob", "Cecil", "David", "Esmeralda")
g <- make_graph(edges=c("Bob", "Cecil", "Cecil", "David","David", "Esmeralda", "Alice", "Bob", "Alice", "Alice", "Bob", "Alice"))
g <- graph_from_data_frame(relations, directed=TRUE)
print(g, e=TRUE, v=TRUE)
## The opposite operation
as_data_frame(g, what="vertices")
as_data_frame(g, what="edges")
library(plotly)
library(igraph)
data(karate, package="igraphdata")
#g<-read_graph("graph.gml", format = c("gml"))
G <- upgrade_graph(g)
L <- layout.circle(G)
vs <- V(G)
es <- as.data.frame(get.edgelist(G))
Nv <- length(vs)
Ne <- length(es[1]$V1)
Xn <- L[,1]
Yn <- L[,2]
network <- plot_ly(x = ~Xn, y = ~Yn, mode = "markers", text = actors, hoverinfo = "text", type="scatter")
edge_shapes <- list()
for(i in 1:Ne)
{
v0 <- es[i,]$V1
v1 <- es[i,]$V2
edge_shape = list(
type = "line",
line = list(color = "#030303", width = 0.3),
x0 = Xn[v0],
y0 = Yn[v0],
x1 = Xn[v1],
y1 = Yn[v1]
)
edge_shapes[[i]] <- edge_shape
}
axis <- list(title = "", showgrid = FALSE, showticklabels = FALSE, zeroline = FALSE)
p <- layout(
network,
title = 'Karate Network',
shapes = edge_shapes,
xaxis = axis,
yaxis = axis
)
p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.