blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
127fc75726d06dd7352c9e2cb0019ec654611eef
|
a132a67357612f1c214a469addb6f06da1b6f592
|
/PreTest_Files/pretest_vis.R
|
408d9e9ab8d320be7eb516d80c6c4d791f2845f9
|
[] |
no_license
|
ehiroyasu/KuniInvasives
|
3b8df7bc8eb7beda15b99ef96dacb285f7ae47c3
|
22523890c83dbf714571a0819bd05c1f24c2647f
|
refs/heads/master
| 2021-01-18T18:42:34.657035
| 2018-05-01T16:21:56
| 2018-05-01T16:21:56
| 86,873,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,999
|
r
|
pretest_vis.R
|
#kuni pretest visualization#
library(dotwhisker)
#First, Bar Plots#
#CONTROL, ECON, ECOLOGICAL
support_control = na.omit(project_support[treat_cat=="control"])
support_econ = na.omit(project_support[treat_cat=="econ_gain" | treat_cat=="econ_loss"])
support_eco = na.omit(project_support[treat_cat=="eco_gain" | treat_cat=="eco_loss"])
m = c(mean(support_control), mean(support_econ), mean(support_eco))
names(m) = c("Control", "Econ Frame", "Ecological Frame")
se = c(sd(support_control)/sqrt(length(support_control)), sd(support_econ)/sqrt(length(support_econ)), sd(support_eco)/sqrt(length(support_eco)))
bp = barplot(m, ylim=c(0,1.0), xpd=FALSE, col=c("gray", "lightsteelblue3","darkolivegreen2"))
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects on Pig Management, 1", font.lab = 2)
arrows(x0=bp, y0=m-se, y1=m+se, code=3, angle=90)
#ADD GAIN/LOSS FRAMES
support_control = na.omit(project_support[treat_cat=="control"])
support_econ_gain = na.omit(project_support[treat_cat=="econ_gain"])
support_econ_loss = na.omit(project_support[treat_cat=="econ_loss"])
support_eco_gain = na.omit(project_support[treat_cat=="eco_gain"])
m = c(mean(support_control), mean(support_econ_gain), mean(support_econ_loss), mean(support_eco_gain))
names(m) = c("Control", "Econ Gain", "Econ Loss", "Ecol Gain")
se = c(sd(support_control)/sqrt(length(support_control)), sd(support_econ_gain)/sqrt(length(support_econ_gain)),
sd(support_econ_loss)/sqrt(length(support_econ_loss)), sd(support_eco_gain)/sqrt(length(support_eco_gain)))
bp = barplot(m, ylim=c(0,1.0), xpd=FALSE, col=c("gray", "lightsteelblue3", "lightsteelblue4", "darkolivegreen2"))
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects on Pig Management, 2", font.lab = 2)
arrows(x0=bp, y0=m-se, x1=bp, y1=m+se, code=3, angle=90)
#Party ID
summary(republican_mean <- mean(pretest_df$project_support[partyid==1], na.rm=T)) #0.675, N=163 (53 no, 110 yes)
summary(dem_mean <- mean(pretest_df$project_support[partyid==2], na.rm=T)) #0.553, N=372 (166 no, 205 yes)
summary(ind_mean <- mean(pretest_df$project_support[partyid==3 | partyid==4], na.rm=T)) #0.536, N=266 (123 no, 142 yes;include both independents and unaffiliated)
print(partyid_test <- prop.test(x = c(110, 205), n = c(163, 372), correct = FALSE)) #p<0.05. reps more likely to support than dems
partyid<-pretest_df$partyid
support_rep = na.omit(project_support[partyid==1])
support_dem = na.omit(project_support[partyid==2])
support_ind = na.omit(project_support[partyid==3])
m = c(mean(support_rep), mean(support_dem), mean(support_ind))
names(m) = c("Republican", "Democrat", "Independent")
se = c(sd(support_rep)/sqrt(length(support_rep)), sd(support_dem)/sqrt(length(support_dem)),
sd(support_ind)/sqrt(length(support_ind)))
windows()
bp = barplot(m, ylim=c(0,1.0), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Support for Pig Management, By Party ID", font.lab = 2)
arrows(x0=bp, y0=m-se, y1=m+se, code=3, angle=90)
#Environmentalist
support_enviro = na.omit(project_support[enviro_responsibility==5 | enviro_responsibility==4])
support_nonenviro = na.omit(project_support[enviro_responsibility==3 | enviro_responsibility==2 | enviro_responsibility==1])
summary(as.factor(enviro_responsibility))
m = c(mean(support_enviro), mean(support_nonenviro))
names(m) = c("Environmentalist", "Non-Environmentalist")
se = c(sd(support_enviro)/sqrt(length(support_enviro)), sd(support_nonenviro)/sqrt(length(support_nonenviro)))
windows()
bp = barplot(m, ylim=c(0,1.0), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Support for Pig Management, Environmentalism", font.lab = 2)
arrows(x0=bp, y0=m-se, y1=m+se, code=3, angle=90)
summary(enviro_ecogain <- mean(pretest_df$project_support[treat_cat=="econ_gain"], na.rm=T)) #0.5641
summary(enviro_ecogain <- na.omit(pretest_df$project_support[treat_cat=="econ_gain"]))
summary(econloss_mean <- mean(pretest_df$project_support[treat_cat=="econ_loss"], na.rm=T)) #0.5432
summary(ecogain_mean <- mean(pretest_df$project_support[treat_cat=="eco_gain"], na.rm=T)) #0.7055
summary(ecoloss_mean <- mean(pretest_df$project_support[treat_cat=="eco_loss"], na.rm=T)) #0.5723
summary(control_mean <- mean(pretest_df$project_support[treat_cat=="control"], na.rm=T)) #0.472
#heterogeneous treatment effects
enviro_dummy <- as.numeric(enviro_responsibility==5 | enviro_responsibility==4) #743 of these
nonenviro_dummy <- as.numeric(enviro_responsibility==3 | enviro_responsibility==2 | enviro_responsibility==1) #59 of these
enviro_dummy2 <- as.numeric(enviro_responsibility==5) #543 of these, 268 of 1 through 4
#among environmentalists (onlyi enviro_responsibility==5)
summary(enviro_econgain <- na.omit(pretest_df$project_support[treat_cat=="econ_gain" & enviro_dummy2==1])) #0.551
summary(enviro_econloss <- mean(pretest_df$project_support[treat_cat=="econ_loss" & enviro_dummy2==1], na.rm=T)) #0.5
summary(enviro_ecogain <- mean(pretest_df$project_support[treat_cat=="eco_gain" & enviro_dummy2==1], na.rm=T)) #0.6481
summary(enviro_control <- mean(pretest_df$project_support[treat_cat=="control" & enviro_dummy2==1], na.rm=T)) #0.3894
summary(non_enviro_econgain <- mean(pretest_df$project_support[treat_cat=="econ_gain" & nonenviro_dummy==1], na.rm=T)) #0.5789
summary(non_enviro_econloss <- mean(pretest_df$project_support[treat_cat=="econ_loss" & nonenviro_dummy==1], na.rm=T)) #0.6346
summary(non_enviro_ecogain <- mean(pretest_df$project_support[treat_cat=="eco_gain" & nonenviro_dummy==1], na.rm=T)) #0.8182
summary(non_enviro_control <- mean(pretest_df$project_support[treat_cat=="control" & nonenviro_dummy==1], na.rm=T)) #0.6667
summary(as.factor(enviro_responsibility))
#non-enviro
m = c(mean(non_enviro_control), mean(non_enviro_econgain), mean(non_enviro_econloss), mean(non_enviro_ecogain))
names(m) = c("Control", "Econ Gain", "Econ Loss", "Ecol Gain")
se = c(sd(non_enviro_control)/sqrt(length(non_enviro_control)), sd(non_enviro_econgain)/sqrt(length(non_enviro_econgain)),
sd(non_enviro_econloss)/sqrt(length(non_enviro_econloss)), sd(non_enviro_ecogain)/sqrt(length(non_enviro_ecogain)))
windows()
bp = barplot(m, ylim=c(0,1), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects Among Non-Environmentalists", font.lab = 2)
arrows(x0=bp, y0=m-se, x1=bp, y1=m+se, code=3, angle=90)
#enviros
m = c(mean(enviro_control), mean(enviro_econgain), mean(enviro_econloss), mean(enviro_ecogain))
names(m) = c("Control", "Econ Gain", "Econ Loss", "Ecol Gain")
se = c(sd(enviro_control)/sqrt(length(enviro_control)), sd(enviro_econgain)/sqrt(length(enviro_econgain)),
sd(enviro_econloss)/sqrt(length(enviro_econloss)), sd(enviro_ecogain)/sqrt(length(enviro_ecogain)))
windows()
bp = barplot(m, ylim=c(0,1), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects, Among Strong Environmentalists", font.lab = 2)
arrows(x0=bp, y0=m-se, x1=bp, y1=m+se)
#Party ID
summary(republican_mean <- mean(pretest_df$project_support[partyid==1], na.rm=T)) #0.675, N=163 (53 no, 110 yes)
summary(dem_mean <- mean(pretest_df$project_support[partyid==2], na.rm=T)) #0.553, N=372 (166 no, 205 yes)
summary(ind_mean <- mean(pretest_df$project_support[partyid==3 | partyid==4], na.rm=T)) #0.536, N=266 (123 no, 142 yes;include both independents and unaffiliated)
print(partyid_test <- prop.test(x = c(110, 205), n = c(163, 372), correct = FALSE)) #p<0.05. reps more likely to support than dems
support_rep = na.omit(project_support[partyid==1])
support_dem = na.omit(project_support[partyid==2])
support_ind = na.omit(project_support[partyid==3])
summary(as.factor(partyid)) #163 republicans, 372 dems, 231 independent
m = c(mean(support_rep), mean(support_dem), mean(support_ind))
names(m) = c("Republican", "Democrat", "Independent")
se = c(sd(support_rep)/sqrt(length(support_rep)), sd(support_dem)/sqrt(length(support_dem)),
sd(support_ind)/sqrt(length(support_ind)))
windows()
bp = barplot(m, ylim=c(.25,.75), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Support for Pig Management, By Party ID", font.lab = 2)
arrows(x0=bp, y0=m-se, y1=m+se, code=3, angle=90)
#dems
summary(dem_econgain <- mean(pretest_df$project_support[treat_cat=="econ_gain" & partyid==2], na.rm=T)) #0.5574
summary(dem_econloss <- mean(pretest_df$project_support[treat_cat=="econ_loss" & partyid==2], na.rm=T)) #0.5797
summary(dem_ecogain <- mean(pretest_df$project_support[treat_cat=="eco_gain" & partyid==2], na.rm=T)) #0.6875
summary(dem_control <- mean(pretest_df$project_support[treat_cat=="control" & partyid==2], na.rm=T)) #0.3816
#reps
summary(rep_econgain <- mean(pretest_df$project_support[treat_cat=="econ_gain" & partyid==1], na.rm=T)) #0.5526
summary(rep_econloss <- mean(pretest_df$project_support[treat_cat=="econ_loss" & partyid==1], na.rm=T)) #0.6857
summary(rep_ecogain <- mean(pretest_df$project_support[treat_cat=="eco_gain" & partyid==1], na.rm=T)) #0.8276
summary(rep_control <- mean(pretest_df$project_support[treat_cat=="control" & partyid==1], na.rm=T)) #0.6552
#dems
m = c(mean(dem_control), mean(dem_econgain), mean(dem_econloss), mean(dem_ecogain))
names(m) = c("Control", "Econ Gain", "Econ Loss", "Ecol Gain")
se = c(sd(dem_control)/sqrt(length(dem_control)), sd(dem_econgain)/sqrt(length(dem_econgain)),
sd(dem_econloss)/sqrt(length(dem_econloss)), sd(dem_ecogain)/sqrt(length(dem_ecogain)))
windows()
bp = barplot(m, ylim=c(0,1.0), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects Among Democrats", font.lab = 2)
arrows(x0=bp, y0=m-se, x1=bp, y1=m+se, code=3, angle=90)
#reps
m1 = c(mean(rep_control), mean(rep_econgain), mean(rep_econloss), mean(rep_ecogain))
names(m) = c("Control", "Econ Gain", "Econ Loss", "Ecol Gain")
se = c(sd(rep_control)/sqrt(length(rep_control)), sd(rep_econgain)/sqrt(length(rep_econgain)),
sd(rep_econloss)/sqrt(length(rep_econloss)), sd(rep_ecogain)/sqrt(length(rep_ecogain)))
windows()
bp1 = barplot(m1, ylim=c(0,1.0), xpd=FALSE)
box()
title(ylab = "Support for Pig Management Program", main="Framing Effects Among Republicans", font.lab = 2)
arrows(x0=bp1, y0=m-se, x1=bp1, y1=m+se, code=3, angle=90)
#gender
summary(male_mean <- mean(pretest_df$project_support[gender==1], na.rm=T)) #0.630, N=350 (165 no, 281 yes)
summary(female_mean <- mean(pretest_df$project_support[gender==0], na.rm=T)) #0.499, N=351 (176 no, 175 yes)
summary(anova_1 <- aov(project_support ~ gender, data=pretest_df)) #sig difference
##ggplot2 to make it prettier##
library(ggplot2)
library(gridExtra)
#Bar Plots#
summary.project_support <- data.frame(
treat=levels(as.factor(pretest_df$treat_cat)),
support=c(mean(na.omit(econ_gain_support)), mean(econ_loss_support), mean(eco_gain_support), mean(na.omit(eco_loss_support)),
mean(control_support)),
sd=c(sd(na.omit(econ_gain_support))/sqrt(length(na.omit(econ_gain_support)))), sd(econ_loss_support)/sqrt(length(econ_loss_support)),
sd(eco_gain_support)/sqrt(length(eco_gain_support)), sd(na.omit(eco_loss_support))/sqrt(length(na.omit(eco_loss_support))),
sd(control_support)/sqrt(length(control_support)))
summary.project_support
mean(na.omit(eco_loss_support))
ggplot(data = summary.vote15, aes(x = factor(treat), y = vote)) +
geom_point(position = position_dodge(width = 0.2)) +
geom_errorbar(aes(ymin=vote-sd, ymax=vote+sd), width=0.1, col="darkblue") +
coord_flip() +
geom_text(aes(y=vote, ymax=vote, label=round(vote,2)), position= position_dodge(width=0.9), vjust=-2, color="black") +
scale_y_continuous("Probability of Voting", limits=c(0.15,.25),breaks=seq(0.15, .25, .05)) +
scale_x_discrete("Control vs. Two Years of Treatment")
summary.vote15b <- data.frame(
treat=levels(as.factor(ooc_data$treatyear)),
vote=c(mean(vote_control) - mean(vote15), mean(vote_treatment15) - mean(vote15), mean(vote_treatboth) - mean(vote15)),
sd=c(sd(vote_control)/sqrt(length(vote_control)), sd(vote_treatment15)/sqrt(length(vote_treatment15)),
sd(vote_treatboth)/sqrt(length(vote_treatboth))))
summary.vote15b
ggplot(data = summary.vote15b, aes(x = factor(treat), y = vote)) +
geom_point(position = position_dodge(width = 0.2)) +
geom_errorbar(aes(ymin=vote-sd, ymax=vote+sd), width=0.1, col="darkblue") +
coord_flip() +
geom_text(aes(y=vote, ymax=vote, label=round(vote,2)), position= position_dodge(width=0.9), vjust=-2, color="black") +
scale_y_continuous("Increased Probability of Voting", limits=c(-0.05,.05),
breaks=seq(-0.05, .05, .05)) +
scale_x_discrete("Control vs. Two Years of Treatment")
#Just comparing control to treatment in 2015
summary.vote <- data.frame(
treat=levels(as.factor(ooc_data$treatment15)),
vote=tapply(ooc_data$vote15, ooc_data$treatment15, mean),
sd=tapply(ooc_data$vote15, ooc_data$treatment15, sd))
summary.vote
summary(vote15)
|
d1700940054190aa880e7b05377fe14fb17dd26c
|
f9483bcbd9a14a2afe2b3b95c5591addac313e68
|
/man/nsink_prep_lakemorpho.Rd
|
ee2b080714a3331303df9e1de42f9b5bd481f00c
|
[
"MIT"
] |
permissive
|
jhollist/nsink
|
4851e82a06d251c1c3e1fae74d11665abd116451
|
8a61adaf5fa2b3d3bbfd122d6c1399661e126a1f
|
refs/heads/main
| 2023-05-13T19:57:52.497188
| 2023-04-27T19:32:34
| 2023-04-27T19:32:34
| 190,607,176
| 5
| 1
|
NOASSERTION
| 2021-03-02T21:41:24
| 2019-06-06T15:40:51
|
R
|
UTF-8
|
R
| false
| true
| 542
|
rd
|
nsink_prep_lakemorpho.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nsink_prep_data.R
\name{nsink_prep_lakemorpho}
\alias{nsink_prep_lakemorpho}
\title{Prepare lake morphology data for N-Sink}
\usage{
nsink_prep_lakemorpho(data_dir)
}
\arguments{
\item{data_dir}{Base directory that contains N-Sink data folders. Data may
be downloaded with the \code{\link{nsink_get_data}} function.}
}
\value{
A tibble of the lake morphology data
}
\description{
Standardizes lake morphology from the lake morphology tables.
}
\keyword{internal}
|
30f43720ad6a99493cd4900ff56a775b23470580
|
94d74785fe89e2200ac5fc2f61c2933931e4f7c5
|
/Authentication V2.R
|
b8a66c3b1371c8cfe6295165036be95281f62930
|
[] |
no_license
|
GowriVijay/Compilation
|
7f2e4108615dc421ba811747a893ea953c20509c
|
fa95600ec6cdcbcc2b695c0d5419f3bd6139075f
|
refs/heads/master
| 2021-01-11T17:52:26.817272
| 2017-01-23T22:36:55
| 2017-01-23T22:36:55
| 79,855,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,877
|
r
|
Authentication V2.R
|
# install.packages("foreign")
# install.packages("sqldf")
library(foreign)
library(sqldf)
library(tcltk)
# Use the Keyboard shortcut Ctrl+shift+H for changing the working directory
# Use the Keyboard shortcut Ctrl+shift+C for Commenting / Uncommenting codes
setwd("C:\\Users\\vthangamuthu\\Documents\\Vijay\\Sam\\Authentication Survey\\Authentication Survey - V2")
rm(list = ls())
# load("Attitudinal_Segment_Workspace.RData")
All <- read.spss("Authentication Survey - V2 - Aug 11th 9 PM.sav",to.data.frame = T)
# US <- subset(All,All$vco == c("us"))
# UK <- subset(All,All$vco == c("uk"))
# DE <- subset(All,All$vco == c("de"))
All_1 <-subset(All,All$Q2_Order == c("Current / Virtual / Physical"))
Bk_up_1 <- All_1
All_1_VA_Outlier_removed <- All_1[ which((All_1$Q5A_Q5B < 3 & All_1$Q5A_Q5B >= 0) &
(All_1$Q2A == c("Extremely unlikely 1") | All_1$Q2A == c("2") | All_1$Q2A == c("3")) &
(All_1$Q2B == c("Extremely likely 5") | All_1$Q2B == c("4"))), ] #I've added the outlier piece for All_1$Q5A_Q5B
All_1_PA_Outlier_removed <- All_1[ which((All_1$Q5A_Q5C < 3 & All_1$Q5A_Q5C >= 0) &
(All_1$Q2A == c("Extremely unlikely 1") | All_1$Q2A == c("2") | All_1$Q2A == c("3")) &
(All_1$Q2C == c("Extremely likely 5") | All_1$Q2C == c("4"))), ] #I've added the outlier piece for All_1$Q5A_Q5C
#(All_1$Q5A_Q5B < 3 & All_1$Q5A_Q5B >= 0) &
#(All_1$Q5A_Q5C < 3 & All_1$Q5A_Q5C >= 0) &
#All_1$Q5A_Q5B < 3 & All_1$Q5A_Q5B >= 0 &
#All_1$Q5A_Q5C < 3 & All_1$Q5A_Q5C >= 0 &
# & (All_1$Q2A == c("Extremely likely 5") | All_1$Q2A == c("4"))
# & (All_1$Q2A == c("Extremely likely 5") | All_1$Q2A == c("4"))
#Doing the next line just because the dataframe was named as All_1 and I'm too lazy to rename them
All_1_VA <- All_1_VA_Outlier_removed
All_1_PA <- All_1_PA_Outlier_removed
# Calculating Avg % increase for VA
A1 <- aggregate(All_1_VA$Q5A_Q5B, by = list(Gender = All_1_VA$Gender), FUN = length)
B1 <- aggregate(All_1_VA$Q5A_Q5B, by = list(Vertical_Gender = All_1_VA$Vrtcl_Gndr,Item_Condition = All_1_VA$vchrt), FUN = length)
C1 <- aggregate(All_1_VA$Q5A_Q5B, by = list(Vertical_Gender = All_1_VA$Vrtcl_Gndr),FUN = length)
D1 <- aggregate(All_1_VA$Q5A_Q5B, by = list(Gender = All_1_VA$Gender,Item_Condition = All_1_VA$vchrt),FUN = length)
# Calculating Avg % increase for PA
W1 <- aggregate(All_1_PA$Q5A_Q5C, by = list(Gender = All_1_PA$Gender), FUN = length) #to calculate average replace length with mean
X1 <- aggregate(All_1_PA$Q5A_Q5C, by = list(Vertical_Gender = All_1_PA$Vrtcl_Gndr,Item_Condition = All_1_PA$vchrt), FUN = length)
Y1 <- aggregate(All_1_PA$Q5A_Q5C, by = list(Vertical_Gender = All_1_PA$Vrtcl_Gndr), FUN = length)
Z1 <- aggregate(All_1_PA$Q5A_Q5C, by = list(Gender = All_1_PA$Gender,Item_Condition = All_1_PA$vchrt), FUN = length)
All_2 <-subset(All,All$Q2_Order == c("Current / Physical / Virtual"))
Bk_up_2 <- All_2
All_2_VA_Outlier_removed <- All_2[ which((All_2$Q5A_Q5B < 3 & All_2$Q5A_Q5B >= 0) & (All_2$Q2A == c("Extremely unlikely 1") | All_2$Q2A == c("2") | All_2$Q2A == c("3")) & (All_2$Q2B == c("Extremely likely 5") | All_2$Q2B == c("4"))), ] #I've added the outlier piece for All_2$Q5A_Q5B
All_2_PA_Outlier_removed <- All_2[ which((All_2$Q5A_Q5C < 3 & All_2$Q5A_Q5C >= 0) & (All_2$Q2A == c("Extremely unlikely 1") | All_2$Q2A == c("2") | All_2$Q2A == c("3")) & (All_2$Q2C == c("Extremely likely 5") | All_2$Q2C == c("4"))), ] #I've added the outlier piece for All_2$Q5A_Q5C
#All_2$Q5A_Q5B < 3 & All_2$Q5A_Q5B >= 0 &
#All_2$Q5A_Q5C < 3 & All_2$Q5A_Q5C >= 0 &
# & (All_2$Q2A == c("Extremely likely 5") | All_2$Q2A == c("4"))
# & (All_2$Q2A == c("Extremely likely 5") | All_2$Q2A == c("4"))
#Doing the next line just because the dataframe was named as All_2 and I'm too lazy to rename them
All_2_VA <- All_2_VA_Outlier_removed
All_2_PA <- All_2_PA_Outlier_removed
# Calculating Avg % increase for PA
W2 <- aggregate(All_2_PA$Q5A_Q5C, by = list(Gender = All_2_PA$Gender), FUN = length)
X2 <- aggregate(All_2_PA$Q5A_Q5C, by = list(Vertical_Gender = All_2_PA$Vrtcl_Gndr,Item_Condition = All_2_PA$vchrt), FUN = length)
Y2 <- aggregate(All_2_PA$Q5A_Q5C, by = list(Vertical_Gender = All_2_PA$Vrtcl_Gndr), FUN = length)
Z2 <- aggregate(All_2_PA$Q5A_Q5C, by = list(Gender = All_2_PA$Gender,Item_Condition = All_2_PA$vchrt), FUN = length)
# Calculating Avg % increease fo VA
A2 <- aggregate(All_2_VA$Q5A_Q5B, by = list(Gender = All_2_VA$Gender), FUN = length)
B2 <- aggregate(All_2_VA$Q5A_Q5B, by = list(Vertical_Gender = All_2_VA$Vrtcl_Gndr,Item_Condition = All_2_VA$vchrt), FUN = length)
C2 <- aggregate(All_2_VA$Q5A_Q5B, by = list(Vertical_Gender = All_2_VA$Vrtcl_Gndr), FUN = length)
D2 <- aggregate(All_2_VA$Q5A_Q5B, by = list(Gender = All_2_VA$Gender,Item_Condition = All_2_VA$vchrt), FUN = length)
# Doing the next set of work for replacing the Segments with missing information
bs <- data.frame(Vertical_Gender = rep(sort(unique(All$Vrtcl_Gndr)),3),Item_Condition = rep(sort(unique(All$vchrt)),each = 10))
cs <- data.frame(Vertical_Gender = sort(unique(trimws(as.character(All$Vrtcl_Gndr),"right"))))
bs$New <- as.factor(paste(trimws(as.character(bs$Vertical_Gender),"right"),bs$Item_Condition))
# Replacing missing infor for Accessories - Handbags - Male for VA1
B1$New <- as.factor(paste(trimws(as.character(B1$Vertical_Gender),"right"),B1$Item_Condition))
C1$Vertical_Gender <- trimws(as.character(C1$Vertical_Gender),"right")
B1 <- sqldf("SELECT bs.Vertical_Gender, bs.Item_Condition, B1.x
FROM bs
LEFT JOIN B1
ON bs.New = B1.New")
B1$x[is.na(B1$x)] <- 0
C1 <- sqldf("SELECT cs.Vertical_Gender, C1.x
FROM cs
LEFT JOIN C1
ON cs.Vertical_Gender = C1.Vertical_Gender")
C1$x[is.na(C1$x)] <- 0
# Replacing missing infor for Accessories - Handbags - Male for PA1
X1$New <- as.factor(paste(trimws(as.character(X1$Vertical_Gender),"right"),X1$Item_Condition))
Y1$Vertical_Gender <- trimws(as.character(Y1$Vertical_Gender),"right")
X1 <- sqldf("SELECT bs.Vertical_Gender, bs.Item_Condition, X1.x
FROM bs
LEFT JOIN X1
ON bs.New = X1.New")
X1$x[is.na(X1$x)] <- 0
Y1 <- sqldf("SELECT cs.Vertical_Gender, Y1.x
FROM cs
LEFT JOIN Y1
ON cs.Vertical_Gender = Y1.Vertical_Gender")
Y1$x[is.na(Y1$x)] <- 0
# Replacing missing infor for Accessories - Handbags - Male for PA2
X2$New <- as.factor(paste(trimws(as.character(X2$Vertical_Gender),"right"),X2$Item_Condition))
Y2$Vertical_Gender <- trimws(as.character(Y2$Vertical_Gender),"right")
X2 <- sqldf("SELECT bs.Vertical_Gender, bs.Item_Condition, X2.x
FROM bs
LEFT JOIN X2
ON bs.New = X2.New")
X2$x[is.na(X2$x)] <- 0
Y2 <- sqldf("SELECT cs.Vertical_Gender, Y2.x
FROM cs
LEFT JOIN Y2
ON cs.Vertical_Gender = Y2.Vertical_Gender")
Y2$x[is.na(Y2$x)] <- 0
# Replacing missing infor for Accessories - Handbags - Male for VA2
B2$New <- as.factor(paste(trimws(as.character(B2$Vertical_Gender),"right"),B2$Item_Condition))
C2$Vertical_Gender <- trimws(as.character(C2$Vertical_Gender),"right")
B2 <- sqldf("SELECT bs.Vertical_Gender, bs.Item_Condition, B2.x
FROM bs
LEFT JOIN B2
ON bs.New = B2.New")
B2$x[is.na(B2$x)] <- 0
C2 <- sqldf("SELECT cs.Vertical_Gender, C2.x
FROM cs
LEFT JOIN C2
ON cs.Vertical_Gender = C2.Vertical_Gender")
C2$x[is.na(C2$x)] <- 0
setwd("C:\\Users\\vthangamuthu\\Documents\\Vijay\\Sam\\Authentication Survey\\Authentication Survey - V2\\Version 4\\All")
write.csv(c(A1,D1,C1,B1),"All_VA_1.csv")
write.csv(c(W1,Z1,Y1,X1),"All_PA_1.csv")
write.csv(c(A2,D2,C2,B2),"All_VA_2.csv")
write.csv(c(W2,Z2,Y2,X2),"All_PA_2.csv")
|
1b7530020a708d690252915089d2057133870964
|
c319dbc7602b7e923548fa45e06aa675580427f8
|
/man/isOnCourt.Rd
|
3c56f68615635189d462aad0f2e14a0cf94dc2cb
|
[
"MIT"
] |
permissive
|
solmos/eurolig
|
cb229cdae020d33bd1cd26e1e833038fd5606dc8
|
5a6e10ca793649a570b76db813f8d9c533cb3904
|
refs/heads/master
| 2023-05-11T04:39:19.757976
| 2020-02-09T13:25:18
| 2020-02-09T13:25:18
| 171,119,131
| 23
| 8
|
NOASSERTION
| 2023-05-04T17:16:47
| 2019-02-17T12:24:34
|
R
|
UTF-8
|
R
| false
| true
| 700
|
rd
|
isOnCourt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isOnCourt.R
\name{isOnCourt}
\alias{isOnCourt}
\title{Find if players were on the court}
\usage{
isOnCourt(pbp, players)
}
\arguments{
\item{pbp}{A play-by-play data frame}
\item{players}{A character vector with the nemes of the players}
}
\value{
Logical vector indicating whether the specified players were
on the court at each event of the play-by-play data frame.
}
\description{
\code{isOnCourt} finds if a player or a set of players is on the court
for each event of play-by-play data with lineup information.
}
\examples{
data("samplepbp")
isOnCourt(samplepbp, players = c("FERNANDEZ, RUDY", "AYON, GUSTAVO"))
}
|
e3a98927466cdf7b903c3f6d9529b803c0a430d6
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/tests/testthat/test_string.R
|
4e014d6b097eb5c0e24d38a8b0d0fee8bfb67bd5
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| false
| 29
|
r
|
test_string.R
|
context("string functions")
|
f9a65f419a1c0ea58cca0e5d69387c4f38463a19
|
070bd25468e5522c8ccfe3cd2bd64f39eef3dd76
|
/Functions/Paper_Plots/Fig8_Network.R
|
03bd9c19cea2a0631c8a1032eee0f57ce5ee5f8d
|
[] |
no_license
|
paleovar/iHadCM3LastMill
|
eaed0b3206aa33b2aea2a9995ba6c82db2e51687
|
14ee42abb6028c5ad05122edc0d65d1470f803bf
|
refs/heads/master
| 2023-03-07T04:18:43.390854
| 2021-02-19T19:20:49
| 2021-02-19T19:20:49
| 292,620,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,812
|
r
|
Fig8_Network.R
|
#################################################
## Paper Figure 8 ###############################
#################################################
## Network
#################################################
library(plyr)
library(dplyr)
library(tidyverse)
library(zoo)
library(nest)
library(PaleoSpec)
lats = DATA_past1000$CAVES$entity_info$latitude[mask_spec]
longs = DATA_past1000$CAVES$entity_info$longitude[mask_spec]
dist<-fossil::earth.dist(cbind(lats,longs),dist=TRUE)
dist_matrix <- as.matrix(dist)
#################################################
source("Functions/networkmap_simple3.R")
for(plot in c("full", "down")){
plot_dist = dist_matrix
plot_dist[lower.tri(dist_matrix)] = NA
plot_dist_e = dist_matrix_e
plot_dist_e[lower.tri(dist_matrix_e)] = NA
link_density = 0.05
C_SIM = NETWORK[[paste0("sim_", plot, "_", run)]]
C_REC = NETWORK$record
o_sim = order(abs(C_SIM), na.last = F)
o_rec = order(abs(C_REC), na.last = F)
C_SIM[o_sim[1:floor((length(o_sim)-link_density*length(o_sim)))]] = NA
C_REC[o_rec[1:floor((length(o_rec)-link_density*length(o_rec)))]] = NA
C_SIM[NETWORK[[paste0("sim_", plot, "_", run, "_p")]]>0.1] = NA
C_REC[NETWORK$record_p>0.1] = NA
plot_c_sim = NETWORK[[paste0("sim_", plot, "_", run)]]
plot_c_sim[lower.tri(NETWORK[[paste0("sim_", plot, "_", run)]], diag = FALSE)] = NA
plot_c_rec = NETWORK$record
plot_c_rec[lower.tri(NETWORK$record)] = NA
plot_c_rec_max_e <- c_ensemble
plot_c_rec_max_e[lower.tri(c_ensemble)] = NA
boxes_sim <- list()
boxes_rec <- list()
for(ii in 1:20){
boxes_sim[[paste0(ii*1000)]] <- na.omit(as.numeric(plot_c_sim[plot_dist<ii*1000 & plot_dist>(ii-1)*1000]))
boxes_rec[[paste0(ii*1000)]] <- na.omit(as.numeric(plot_c_rec[plot_dist<ii*1000 & plot_dist>(ii-1)*1000]))
}
all_scale = 1.4
scaling = 1.5*all_scale
spacing = 0.7*all_scale
namcex = 1*all_scale
colorbar_length = 0.7
# vielleicht erst die stärksten Links aussuchen udn dann unsignifikante raus schmeißen! Damit die gleiche Anzahl an Links da ist!
rbPal <- colorRampPalette(c("#2166ac", "grey", "#b2182b"))
COLZ <- array(rbPal(9))
pdf(file = paste0("Paper_Plots/Fig8_Network_",plot, "_", run,".pdf"), height= PLOTTING_VARIABLES$HEIGHT, width = PLOTTING_VARIABLES$WIDTH)
#png(file = paste0("Plots/Paper_Plot_6_Network_a_ds_xnap",run,".png"), height= 100*PLOTTING_VARIABLES$HEIGHT, width = 100*PLOTTING_VARIABLES$WIDTH)
par(mfrow=c(2,2), mai = c(rep(spacing, 4)), mar = c(1.5,2.5,0,0.5), oma = c(2,0.5,0.5,0.5))
#SIM MAP
networkmap_simple3(CMAT = C_SIM,
lat = lats,
lon = longs,
title = "",
thresh = 0.1)
fields::colorbar.plot(x = 190,y = 10, col = rev(COLZ),
strip = c(1,0.75, 0.5, 0.25,0,-0.25, -0.5, -0.75,-1), horizontal = F, strip.length = colorbar_length, strip.width = 0.04)
axis(4,at=seq(80,-60,by=-2*140/8),labels=FALSE)
mtext("HadCM3@SISALv2", side = 1, cex = namcex, line = -2, font = 2)
mtext("(a)", side = 3, adj = 0, cex = namcex, line = -1.5, at = -185)
#SIM Cor-Dist
plot(plot_dist[seq(1,length(plot_dist), by = 2)], plot_c_sim[seq(1,length(plot_c_sim), by = 2)],
ylim = c(-1,1),xlim = c(0,20000),
ylab = "", xlab = "",
cex = 1, lwd = 0.5, pch = 23,
panel.first = grid(), col = adjustcolor("grey", alpha.f = 0.3), xaxt = "n")
limits = c(1, 0.875, 0.625, 0.375, 0.125, -0.125, -0.375, -0.625, -0.875)
for(ii in 1:(length(limits)-1)){
points(dist_matrix[C_SIM>limits[ii+1] & C_SIM<limits[ii]], C_SIM[C_SIM>limits[ii+1] & C_SIM<limits[ii]], pch = 23,
col = adjustcolor(rev(COLZ)[ii], alpha.f = 0.6))
}
points(dist_matrix[C_SIM<limits[9]], C_SIM[C_SIM<limits[9]], pch = 23, col = adjustcolor(rev(COLZ)[9], alpha.f = 0.7))
abline(h=0)
for(ii in 1:20){
boxplot(boxes_sim[[paste0(ii*1000)]], add = TRUE, at = c(ii*1000-500),boxwex = 1000, names = "n", axes = F, outline = F)
}
lo <- loess(plot_c_sim[order(plot_dist)] ~ plot_dist[order(plot_dist)], span = 0.2)
lines(lo$x, lo$fitted, lwd = 4, col = "#B2182B")
#lines(lowess(lowess_dist_sorted,lowess_c_sim_sorted, f=0.1), lwd = 4, col = "#B2182B")
mtext("(b)", side = 3, adj = 0, cex = namcex, line = -1.5, at = 1000)
mtext("HadCM3@SISALv2", side = 1, cex = namcex, line = -2, font = 2)
#SISAL MAP
networkmap_simple3(CMAT = C_REC,
lat = lats,
lon = longs,
title = "",
thresh = 0.1)
fields::colorbar.plot(x = 190,y = 10, col = rev(COLZ),
strip = c(1,0.75, 0.5, 0.25,0,-0.25, -0.5, -0.75,-1), horizontal = F, strip.length = colorbar_length, strip.width = 0.04)
axis(4,at=seq(80,-60,by=-2*140/8),labels=FALSE)
mtext("SISALv2", side = 1, cex = namcex, line = -2, font = 2)
mtext("(c)", side = 3, adj = 0, cex = namcex, line = -1.5, at = -185)
#SISAL Cor-Dist
plot(plot_dist[seq(1,length(plot_dist), by = 2)], plot_c_rec[seq(1,length(plot_c_rec), by = 2)],
ylim = c(-1,1),
xlim = c(0,20000),
ylab = "",
xlab = "",
cex.axis = all_scale,
lwd = 1, pch = 24,
panel.first = grid(), col = adjustcolor("grey", alpha.f = 0.3))
limits = c(1, 0.875, 0.625, 0.375, 0.125, -0.125, -0.375, -0.625, -0.875)
for(ii in 1:(length(limits)-1)){
points(dist_matrix[C_REC>limits[ii+1] & C_REC<=limits[ii]], C_REC[C_REC>limits[ii+1] & C_REC<=limits[ii]], pch = 24,
col = adjustcolor(rev(COLZ)[ii], alpha.f = 0.6))
}
points(dist_matrix[C_REC<limits[9]], C_REC[C_REC<limits[9]], pch = 24, col = adjustcolor(rev(COLZ)[9], alpha.f = 0.7))
abline(h=0)
for(ii in 1:20){
boxplot(boxes_rec[[paste0(ii*1000)]], add = TRUE, at = c(ii*1000-500),boxwex = 1000, names = "n", axes = F, outline = F)
}
lo <- loess(plot_c_rec[order(plot_dist)] ~ plot_dist[order(plot_dist)], span = 0.2)
lines(lo$x, lo$fitted, lwd = 4, col = "#B2182B")
lo <- loess(plot_c_rec_max_e[order(plot_dist_e)] ~ plot_dist_e[order(plot_dist_e)], span = 0.2)
lines(lo$x, lo$fitted, lwd = 4, col = "#0068C4")
mtext("SISALv2", side = 1, cex = namcex, line = -2, font = 2)
mtext("Distance between pairs (km)", side= 1, line = 2)
mtext("(d)", side = 3, adj = 0, cex = namcex, line = -1.5, at = 0)
text(20000, 0.9, "original chron.", col = "#B2182B", adj = 1, cex = all_scale)
text(20000, 0.77, "sisal ensemble", col = "#0068C4", adj = 1, cex = all_scale)
dev.off()
}
rm(boxes_rec, boxes_sim, COLZ, lo, plot_c_rec, plot_dist, ii, lats, link_density,longs, namcex, o_rec, o_sim, plot, scaling, spacing,plot_c_sim,
C_REC, C_SIM, plot_c_rec_max_e, plot_dist_e, colorbar_length, limits, all_scale, dist, run)
|
a192371a9b009365518aab8702fca267f20ec777
|
a585391bba19fa78f05969ef2bd7695448b3d689
|
/scripts/create_tables.R
|
3e9bdb80ff92dd53967748416e17edd29c4ceebd
|
[] |
no_license
|
spcanelon/geospatial-analysis-pregnancy-outcomes
|
826a289757a22f3d6721547b843064b99f2e55b0
|
4ee6a54b381cfee435ae60ea00fe827ed9d5009a
|
refs/heads/master
| 2023-04-22T20:44:46.486308
| 2021-05-07T10:22:30
| 2021-05-07T10:22:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,867
|
r
|
create_tables.R
|
library(xtable)
wdstr <- "results/"
##########################################################
############ Model comparison table (table 2) ############
##########################################################
add_str <- "noNH_WHITE_"; add_str2 <- "";
####### ALLDATA & noSMOTE
### PRETERM
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"newcov_PRETERM_CAR",add_str2,".RData"))
noSMOTE_CAR <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"newcov_PRETERM_independent",add_str2,".RData"))
noSMOTE_ind <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"noRE_newcov_PRETERM_independent",add_str2,".RData"))
noSMOTE_noRE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
res_pret_alld_noSMOTE <- rbind(noSMOTE_CAR,noSMOTE_ind, noSMOTE_noRE)
colnames(res_pret_alld_noSMOTE) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec","WAIC1","WAIC1*","WAIC2","WAIC2*")
res_pret_alld_noSMOTE <- res_pret_alld_noSMOTE[,c("DIC", "WAIC1", "WAIC2")]
### STILLBIRTH
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"newcov_STILLBIRTH_CAR",add_str2,".RData"))
noSMOTE_CAR <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"newcov_STILLBIRTH_independent",add_str2,".RData"))
noSMOTE_ind <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_",add_str,"noRE_newcov_STILLBIRTH_independent",add_str2,".RData"))
noSMOTE_noRE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec), mean(WAIC1), mean(WAIC1_shifted), mean(WAIC2), mean(WAIC2_shifted))
res_still_alld_noSMOTE <- rbind(noSMOTE_CAR,noSMOTE_ind, noSMOTE_noRE)
colnames(res_still_alld_noSMOTE) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec","WAIC1","WAIC1*","WAIC2","WAIC2*")
res_still_alld_noSMOTE <- res_still_alld_noSMOTE[,c("DIC", "WAIC1", "WAIC2")]
res <- t(rbind(res_still_alld_noSMOTE,res_pret_alld_noSMOTE))
xtable(res, digits = 1)
#############################################################################
############ Predictive performance, SMOTE vs ORIGINAL (table 3) ############
#############################################################################
method <- "independent"
####### LOO - PRETERM
tmp <-load(paste0(wdstr, "output_LOO_nogamma_rho_nointeractions_noNH_WHITE_newcov_PRETERM_",method,".RData"))
noSMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
tmp <- load(paste0(wdstr,"output_LOO_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_PRETERM_",method,".RData"))
SMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
res_pret_loo <- rbind(SMOTE,noSMOTE)
colnames(res_pret_loo) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec")
res_pret_loo <- res_pret_loo[,-(2:5)]
####### ALLDATA - PRETERM
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_PRETERM_",method,".RData"))
noSMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_PRETERM_",method,".RData"))
SMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
res_pret_all <- rbind(SMOTE,noSMOTE)
colnames(res_pret_all) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec")
res_pret_all <- res_pret_all[,-(2:5)]
res_pret <- rbind(res_pret_loo, res_pret_all)
####### LOO - STILLBIRTH
tmp <-load(paste0(wdstr,"output_LOO_nogamma_rho_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
noSMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
tmp <- load(paste0(wdstr,"output_LOO_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
SMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
res_still_loo <- rbind(SMOTE,noSMOTE)
colnames(res_still_loo) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec")
res_still_loo <- res_still_loo[,-(2:5)]
####### ALLDATA - STILLBIRTH
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
noSMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
SMOTE <- c(mean(auc), mean(DIC), mean(rmse), mean(DIC_shifted), mean(rmse_shifted), mean(misclass), mean(sens), mean(spec))
res_still_all <- rbind(SMOTE,noSMOTE)
colnames(res_still_all) <- c("AUC", "DIC","rmse","DIC*","rmse*","misclass", "sens", "spec")
res_still_all <- res_still_all[,-(2:5)]
res_still <- rbind(res_still_loo, res_still_all)
# round(t(res_pret),3)
# round(t(res_still),3)
res <- t(rbind(res_still, res_pret))
xtable(res, digits = 3)
#############################################################################
######################## Odds ratio table (table 4) #########################
#############################################################################
noNH_WHITE_bool <- TRUE
bayesp <- function(x){max(mean(x>0),mean(x<0))}
var_neighborhood <- c("prop_Asian", "prop_Hispanic", "prop_Black", # "Prop_White",
"prop_women_15_to_50", "prop_women_below_poverty",
"prop_women_public_assistance", "prop_women_labor_force",
"prop_birth_last_12_months", "prop_women_HS_grad",
"prop_women_college_grad", "log_occupied_housing", "log_housing_violation",
"log_violent_crime", "log_nonviolent_crime")
if(noNH_WHITE_bool){
var_individual <- c("Hispanic","Black", "Asian", "multiple_birth", "age")
} else {
var_individual <- c("Hispanic","White","Black", "Asian", "multiple_birth", "age")
}
p1 <- length(var_individual)
p2 <- length(var_neighborhood)
col_to_be_scaled <- p1:(p1+p2)
burnin <- 500
thin <- 10
mcmc_niter <- 500*thin+burnin
index_thinning <- seq(burnin, mcmc_niter, by = thin)
order_covariates <- c("age","Black","Hispanic","Asian","multiple_birth",
"prop_Asian","prop_Hispanic","prop_Black","prop_women_15_to_50",
"prop_women_below_poverty","prop_women_public_assistance",
"prop_women_labor_force","prop_birth_last_12_months",
"prop_women_HS_grad","prop_women_college_grad",
"log_occupied_housing","log_housing_violation",
"log_violent_crime","log_nonviolent_crime")
index <- match(order_covariates,c(var_individual, var_neighborhood))
### For PRETERM. Uncomment the lines with the stillbirth code to make the plot for STILLBIRTH.
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_PRETERM_independent.RData"))
# tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
year <- 1; output1 <- output_list1[[year]]; output2 <- output_list2[[year]]
beta_tr <- cbind(output1$beta[,index_thinning, drop = FALSE],
output2$beta[,index_thinning, drop = FALSE])
scale_sds <- scale_sds_list[[1]]
for(i in col_to_be_scaled){
beta_tr[i,] <- beta_tr[i,] / scale_sds[i]
}
XtX <- XtX_list[[year]]
XtXinv <- solve(XtX)
VIF <- diag(XtX) * diag(XtXinv)
beta_OR <- rbind(exp(rowMeans(beta_tr[,index])),
exp(apply(beta_tr[,index], MARGIN = 1, FUN = quantile, probs = c(0.025, 0.975))),
apply(beta_tr[,index], MARGIN = 1, FUN = bayesp),
VIF
)
colnames(beta_OR) <- c(var_individual, var_neighborhood)
rownames(beta_OR) <- c("Odds Ratio", "CI_LB", "CI_UB", "bayes_p", "VIF")
coeff_OR <- t(beta_OR)
noSMOTE_OR <- coeff_OR[,c("Odds Ratio","bayes_p", "VIF")]
tmp <- load(paste0(wdstr,"output_alldata_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_PRETERM_independent.RData"))
# tmp <- load(paste0(wdstr,"output_alldata_nogamma_rho_SMOTE_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData"))
year <- 1; output1 <- output_list1[[year]]; output2 <- output_list2[[year]]
beta_tr <- cbind(output1$beta[,index_thinning, drop = FALSE],
output2$beta[,index_thinning, drop = FALSE])
scale_sds <- scale_sds_list[[1]]
for(i in col_to_be_scaled){
beta_tr[i,] <- beta_tr[i,] / scale_sds[i]
}
XtX <- XtX_list[[year]]
XtXinv <- solve(XtX)
VIF <- diag(XtX) * diag(XtXinv)
beta_OR <- rbind(exp(rowMeans(beta_tr[,index])),
exp(apply(beta_tr[,index], MARGIN = 1, FUN = quantile, probs = c(0.025, 0.975))),
apply(beta_tr[,index], MARGIN = 1, FUN = bayesp),
VIF
)
colnames(beta_OR) <- c(var_individual, var_neighborhood)
rownames(beta_OR) <- c("Odds Ratio", "CI_LB", "CI_UB", "bayes_p", "VIF")
coeff_OR <- t(beta_OR)
SMOTE_OR <- coeff_OR[,c("Odds Ratio","bayes_p", "VIF")]
rnames_noSM <- rownames(noSMOTE_OR)
rnames_SM <- rownames(SMOTE_OR)
res <- cbind(noSMOTE_OR, SMOTE_OR[match(rnames_noSM, rnames_SM),])
res_noVIF <- res[,-c(3,6)]
xtable(res_noVIF[index,])
#############################################################################
##################### Neighborhood comparison (in appendix) #################
#############################################################################
logit <- function(x) log(x/(1-x))
var_neighborhood <- c("prop_Asian", "prop_Hispanic", "prop_Black", # "Prop_White",
"prop_women_15_to_50", "prop_women_below_poverty",
"prop_women_public_assistance", "prop_women_labor_force",
"prop_birth_last_12_months", "prop_women_HS_grad",
"prop_women_college_grad", "log_occupied_housing", "log_housing_violation",
"log_violent_crime", "log_nonviolent_crime")
var_neighborhood_old <- c("Prop_Asian", "Prop_Hispanic_Latino", "Prop_Black", #"Prop_White",
"Prop_women15to50", "Prop_Below100percPoverty_women15to50",
"Prop_ReceivedPublicAssistanceIncome_women15to50", "Prop_InLaborForce_women16to50",
"Prop_BirthsInPast12Months_women15to50", "Prop_HighschoolGrad_women15to50",
"Prop_BachelorsDegree_women15to50", "log.occupied.housing", "log.violation",
"log.violent", "log.nonviolent")
if(noNH_WHITE_bool){
var_individual <- c("Hispanic","Black", "Asian", "multiple_birth", "age")
} else {
var_individual <- c("Hispanic","White","Black", "Asian", "multiple_birth", "age")
}
p1 <- length(var_individual)
p2 <- length(var_neighborhood)
col_to_be_scaled <- p1:(p1+p2)
burnin <- 500
thin <- 10
mcmc_niter <- 500*thin+burnin
index_thinning <- seq(burnin, mcmc_niter, by = thin)
order_covariates <- c("age","Black","Hispanic","Asian","multiple_birth",
"prop_Asian","prop_Hispanic","prop_Black","prop_women_15_to_50",
"prop_women_below_poverty","prop_women_public_assistance",
"prop_women_labor_force","prop_birth_last_12_months",
"prop_women_HS_grad","prop_women_college_grad",
"log_occupied_housing","log_housing_violation",
"log_violent_crime","log_nonviolent_crime")
index <- match(order_covariates,c(var_individual, var_neighborhood))
philly_all_covariates$log.occupied.housing <- asinh(0.5*philly_all_covariates$Total_Occupied_Housing_Units)
philly_all_covariates$log.violation <- asinh(0.5*philly_all_covariates$violation)
philly_all_covariates$log.violent <- asinh(0.5*philly_all_covariates$violent)
philly_all_covariates$log.nonviolent <- asinh(0.5*philly_all_covariates$nonviolent)
# select only the covariates we use
philly_all_covariates <- (philly_all_covariates[,c(1,2,match(var_neighborhood_old, colnames(philly_all_covariates)))])
### we need this only for tractID_unique (tracts used in the analysis)
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData")) # STILL
tractID_unique <- tractID_unique_list[[1]] # used neighborhoods
# select only the neighborhoods we use
philly_all_covariates <- philly_all_covariates[which(philly_all_covariates$tractID %in% tractID_unique),]
### find neighborhoods
IS1 <- c(); IS2 <- c(); IBS1 <- c(); IBS2 <- c()
####### Prop_black #########
# indeces for philly_all_covariates
i1 <- which.min(philly_all_covariates$Prop_Black); IS1 <- c(IS1,i1)
i2 <- which.max(philly_all_covariates$Prop_Black); IS2 <- c(IS2,i2)
# indeces for tractID_unique
IBS1 <- c(IBS1,match(philly_all_covariates$tractID[i1],tractID_unique))
IBS2 <- c(IBS2,match(philly_all_covariates$tractID[i2],tractID_unique))
####### Prop_InLaborForce_women16to50 #######
# indeces for philly_all_covariates
i1 <- which.min(philly_all_covariates$Prop_InLaborForce_women16to50); IS1 <- c(IS1,i1)
i2 <- which.max(philly_all_covariates$Prop_InLaborForce_women16to50); IS2 <- c(IS2,i2)
# indeces for tractID_unique
IBS1 <- c(IBS1,match(philly_all_covariates$tractID[i1],tractID_unique))
IBS2 <- c(IBS2,match(philly_all_covariates$tractID[i2],tractID_unique))
########## Prop_BachelorsDegree_women15to50 #########
# indeces for philly_all_covariates
i1 <- which.min(philly_all_covariates$Prop_BachelorsDegree_women15to50); IS1 <- c(IS1,i1)
i2 <- which.max(philly_all_covariates$Prop_BachelorsDegree_women15to50); IS2 <- c(IS2,i2)
# indeces for tractID_unique
IBS1 <- c(IBS1,match(philly_all_covariates$tractID[i1],tractID_unique))
IBS2 <- c(IBS2,match(philly_all_covariates$tractID[i2],tractID_unique))
########### log.violent ###########
# indeces for philly_all_covariates
i1 <- which.min(philly_all_covariates$log.violent); IS1 <- c(IS1,i1)
i2 <- which.max(philly_all_covariates$log.violent); IS2 <- c(IS2,i2)
# indeces for tractID_unique
IBS1 <- c(IBS1,match(philly_all_covariates$tractID[i1],tractID_unique))
IBS2 <- c(IBS2,match(philly_all_covariates$tractID[i2],tractID_unique))
# noSMOTE
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_STILLBIRTH_CAR.RData")) # STILL
phat_neig <- phat_neig_list[[1]]
data <- data.frame()
i <- 1
i1 <- IS1[i]; i2 <- IS2[i]
iB1 <- IBS1[i]; iB2 <- IBS2[i]
data <- rbind(data, philly_all_covariates[c(i1,i2),])
data$phat[1:2] <- phat_neig[c(iB1,iB2)]
data$OR[1:2] <- exp(logit(phat_neig[c(iB1,iB2)]) )
data$RelOR[1:2] <- c(NA,exp(sum( logit(phat_neig[c(iB1,iB2)]) * c(1,-1) )))
for(i in 2:4){
i1 <- IS1[i]; i2 <- IS2[i]
iB1 <- IBS1[i]; iB2 <- IBS2[i]
data <- rbind(data, cbind(philly_all_covariates[c(i1,i2),],
phat = phat_neig[c(iB1,iB2)],
OR = exp(logit(phat_neig[c(iB1,iB2)]) ),
RelOR = c(NA, exp(sum( logit(phat_neig[c(iB1,iB2)]) * c(1,-1) )))))
}
data$phat2 <- NA
data$OR2 <- NA
data$RelOR2 <- NA
tmp <-load(paste0(wdstr,"output_alldata_nogamma_rho_nointeractions_noNH_WHITE_newcov_PRETERM_independent.RData")) # PRE
phat_neig <- phat_neig_list[[1]]
for(i in 1:4){
i1 <- IS1[i]; i2 <- IS2[i]
iB1 <- IBS1[i]; iB2 <- IBS2[i]
data$phat2[2*(i-1)+1:2] = phat_neig[c(iB1,iB2)]
data$OR2[2*(i-1)+1:2] = exp(logit(phat_neig[c(iB1,iB2)]) )
data$RelOR2[2*(i-1)+1:2] = c(NA, exp(sum( logit(phat_neig[c(iB1,iB2)]) * c(1,-1) )))
}
data_print <- data
data_print$tractID <- as.character(data_print$tractID)
colnames(data_print)[match(var_neighborhood_old,colnames(data_print))] <- var_neighborhood
data_print[, var_neighborhood] <- round(data_print[, var_neighborhood],2)
data_print[, c("phat","OR","RelOR","phat2","OR2","RelOR2")] <- round(data_print[, c("phat","OR","RelOR","phat2","OR2","RelOR2")],3)
xtable(t(data_print))
|
45ee18ee324b17b07cb58b36ed1959a52f207e25
|
80de789a2b26a51899062294b459b18bbcb20624
|
/SALES/man/coef.ernet.Rd
|
78be71a58086002b38e9ed44e166c3a5a7bffbcb
|
[] |
no_license
|
knightgu/SALES
|
675ffbb2b84cadb7b9e73363874615ec35c33928
|
58130b8c7b50a60b95f0568ca2a96af83180b4c4
|
refs/heads/main
| 2023-01-14T13:21:01.297409
| 2022-08-15T15:48:03
| 2022-08-15T15:48:03
| 62,416,019
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,942
|
rd
|
coef.ernet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.ernet.R
\name{coef.ernet}
\alias{coef.ernet}
\alias{coef.alspath}
\title{Get coefficients from an ernet object}
\usage{
\method{coef}{ernet}(object, s = NULL, type = c("coefficients", "nonzero"), ...)
}
\arguments{
\item{object}{fitted \code{\link{ernet}} model object.}
\item{s}{value(s) of the penalty parameter \code{lambda} at which predictions
are to be made. Default is the entire sequence used to create the model.}
\item{type}{type \code{"coefficients"} computes coefficients at the requested
values for \code{s}. Type \code{"nonzero"} returns a list of the indices of
nonzero coefficients for each value of \code{s}. Default is
\code{"coefficients"}.}
\item{\dots}{not used. Other arguments to predict.}
}
\value{
The object returned depends on type.
}
\description{
Computes the coefficients or returns a list of the indices of the nonzero
coefficients at the requested values for \code{lambda} from a fitted ernet
object.
}
\details{
\code{s} is the new vector at which predictions are requested. If
\code{s} is not in the lambda sequence used for fitting the model, the
\code{coef} function will use linear interpolation to make predictions. The
new values are interpolated using a fraction of coefficients from both left
and right \code{lambda} indices.
}
\examples{
set.seed(1)
n <- 100
p <- 400
x <- matrix(rnorm(n * p), n, p)
y <- rnorm(n)
tau <- 0.90
pf <- abs(rnorm(p))
pf2 <- abs(rnorm(p))
lambda2 <- 1
m1 <- ernet(y = y, x = x, tau = tau, eps = 1e-8, pf = pf,
pf2 = pf2, standardize = FALSE, intercept = FALSE,
lambda2 = lambda2)
as.vector(coef(m1, s = m1$lambda[5]))
}
\seealso{
\code{\link{ernet}}, \code{\link{predict.ernet}},
\code{\link{print.ernet}}, \code{\link{plot.ernet}}
}
\author{
Yuwen Gu and Hui Zou\cr
Maintainer: Yuwen Gu <yuwen.gu@uconn.edu>
}
\keyword{models}
\keyword{regression}
|
5dc23340ab8c46d0ea54adeee55852c05da997d3
|
fecc9ee435b6450d1ccea9a782eabf740f8b79f6
|
/plot5.R
|
2e778f00b2962c75ca091d0db5c0aa8cbc98442d
|
[] |
no_license
|
izaakjephsonbw/ExploratoryDataAnalysisWeek4
|
76e3e8c474e2214c71bd29d0bff57b6637106db9
|
32f0d28e398aa4154e616250ee17cc751f093456
|
refs/heads/master
| 2020-09-20T07:52:01.393806
| 2019-12-04T09:50:59
| 2019-12-04T09:50:59
| 224,415,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
r
|
plot5.R
|
## Import libraries
library(tidyverse)
## Import data
if(!exists("NEI")) {NEI <- readRDS("summarySCC_PM25.rds")}
if(!exists("SCC")) {SCC <- readRDS("Source_Classification_Code.rds")}
## Filter data by city for motor sources in Baltimore
colnames(NEI)[2] <- "SCCode"
baltimore <- NEI %>% filter(fips == "24510")
baltimoremotor <- baltimore %>% filter(SCCode %in% as.vector(SCC$SCC[str_which(SCC$Short.Name,"Vehicle|vehicle")]))
## Group and summarise data by total emissions
bmmotortotal <- baltimoremotor %>% group_by(year)%>% summarise(sum(Emissions))
colnames(bmmotortotal)[2] <- "bmtotal"
## Create plot 5 to show total motor emissions in Baltimore over time
png(file = "plot5.png")
g <- ggplot(data = bmmotortotal, mapping = aes(year,bmtotal))
g + geom_line() + geom_point() + ylab("baltimore motor vehicle emissions")
dev.off()
|
50cb1bc8a09ac184b9e4a0a8abe58f7523b4088e
|
032d5b16e6d9afa5d08cae5e4121b4f2e5e161f6
|
/FCM_tyson.R
|
d2f47c87760f38dd721aad460f4e1cdabc07e9c8
|
[] |
no_license
|
tysonwepprich/photovoltinism
|
93d3dca661f97a3bb559c791b3bb7a90670eef67
|
7c951f4983570bbf9f7c894f887ce57ea2e096a1
|
refs/heads/master
| 2022-11-21T21:26:12.152840
| 2022-11-03T21:54:03
| 2022-11-03T21:54:03
| 100,308,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,471
|
r
|
FCM_tyson.R
|
require(sp)
require(rgdal)
require(raster)
# setwd("F:/PRISM/2014") # remove this
prism_path <- "/data/PRISM/2014"
Con=function(condition, trueValue, falseValue){
return(condition * trueValue + (!condition)*falseValue)
}
source('CDL_funcs.R')
#Daily PRISM Statistics (degree days and thresholds) for pest development models
#### The old NAPPFAST model parameters:
#Lower Base temp = 12 degrees C, Upper Base Temp = 40 degrees C (tmean)
#growing degree days threshold = 450 DD
####Pest Specific, Multiple Life Stage Phenology Model Parameters:
#LDT = lower development threshold, temp at which growth = 0 (using PRISM tmean)
#DD = degree days, number of cumulative heat units to complete that lifestage
eggLDT = 11.93
larvaeLDT = 11.6
pupaeLDT = 11.9
adultLDT = 12.2 #for oviposition
eggDDmu = 69.3
eggDDsig = 10
larvaeDDmu = 156
larvaeDDsig = 10
pupaeDDmu = 174 #females
pupaeDDsig = 10
adultDDmu = 79.2 #time to 50% eggs laid
adultDDsig = 5
# choose 'substages' for individual variation
cohorts <- 7
# simulate parameters for lognormal distribution from estimated Gaussian distributions
Cohort_Mean <- function(mu, sig, ngroups, nsim){
testdist <- rnorm(nsim, mu, sig)
lndistrib <- MASS::fitdistr(testdist, "lognormal")
cuts <- Hmisc::cut2(rlnorm(nsim, lndistrib$estimate[1], lndistrib$estimate[2]),
g = ngroups,
levels.mean = TRUE)
as.numeric(as.character(levels(cuts)))
}
region_param <- "OR"
#Search pattern for PRISM daily temperature grids. Load them for processing.
pattern = paste("(PRISM_tmin_)(.*)(_bil.bil)$", sep="") # changed this to min, mean not on GRUB?
files <- list.files(path = prism_path, pattern=pattern, all.files=FALSE, full.names=TRUE)
#Check that there are enough files for the year
length(files)
numlist <- vector()
for (file in files) {
num = strsplit(file,split="_")[[1]][5]
numlist <-c(numlist,num)
}
#Order by date starting with first downloaded date. Up to user to download date range
#in line with species' biofix. Assumes Jan 01 biofix
#Sorting is necessary in most recent year with provisional data and different filename structure
sortedlist <- sort(numlist)
filelist <- vector()
#Initialize Tracking Rasters
#Read in first raster as a template and intialize tracking rasters
template <- raster(files[1])
REGION <- switch(region_param,
"CONUS" = extent(-125.0,-66.5,24.0,50.0),
"NORTHWEST" = extent(-125.1,-103.8,40.6,49.2),
"OR" = extent(-124.7294, -116.2949, 41.7150, 46.4612))
template <- crop(raster(files[1]),REGION)
template[!is.na(template)] <- 0
#Initialize all tracking rasters as zero with the template
DDaccum <- template
Lifestage <- template
NumGen <- template
#rm(template)
#Lifestage: [0] = egg, [1] = larvae, [2] = pupae, [3] = adult
# Accumulate degree days and reclass cells to NA with temperature exclusion
# NA for exclusion means that DD accum cannot occur anymore with incomplete
# generation development and no oviposition to next generation. This may
# be tested for sensitivity by allowing exlusion cells to reset to zero instead.
###For testing only
sublist <- sortedlist[151:200]
## for (d in sublist) {
for (d in sublist) {
print(d)
if (.Platform$OS.type == "windows") flush.console()
Sys.sleep(1)
#Read in that day's PRISM raster files
# pattern = paste("(PRISM_tmean_)(.*)(",d,")(_bil.bil)$", sep="")
# temp <- list.files(pattern=pattern,all.files=FALSE, full.names=TRUE)
# tmean <- raster(temp)
pattern = paste("(PRISM_tmin_)(.*)(",d,")(_bil.bil)$", sep="")
temp <- list.files(path = prism_path, pattern=pattern,all.files=FALSE, full.names=TRUE)
tmin <- crop(raster(temp), REGION)
pattern = paste("(PRISM_tmax_)(.*)(",d,")(_bil.bil)$", sep="")
temp <- list.files(path = prism_path, pattern=pattern,all.files=FALSE, full.names=TRUE)
tmax <- crop(raster(temp), REGION)
rm(pattern,temp)
# tmean not in GRUB PRISM data, use approximation
tmean <- (tmax + tmin) / 2
#Create stage specific lifestage binary rasters
#Limits operations to a mask for cells that are in that lifestage
#This is what allows for pixel by pixel tracking of what lifestage
# that cell is in
LS0 <- Lifestage == 0
LS1 <- Lifestage == 1
LS2 <- Lifestage == 2
LS3 <- Lifestage == 3
# writeRaster(Lifestage,paste("Lifestage_",d,sep=""), format="GTiff",overwrite=TRUE)
doy <- which(sublist == d)
if (doy %% 28 == 0){
plot(Lifestage)
}
for (i in 1:4) {
if (i==1){
#developmental degree days (zero values for temps below LDT)
dd0 <- ((tmean > eggLDT) * (tmean - eggLDT))
# #Calculate lower lethal threshold and exclusion mask
# eggmin <- tmin > eggLLT
# eggmin[eggmin==0] <- NA
# writeRaster(eggmin,paste("EggMin_",d,sep=""), format="GTiff",overwrite=TRUE)
# #Calculate upper lethal threshold and exclusion mask
# eggmax <- tmax < eggULT
# eggmax[eggmax==0] <- NA
# writeRaster(eggmax,paste("EggMax_",d,sep=""), format="GTiff",overwrite=TRUE)
# #Apply exclusions and lifestage mask to daily degree day surface
# dd0 <- dd0 * eggmin * eggmax * LS0
#Accumulate degree days, if dd0 > 0 otherwise exclusion masks get applied to this generation.
#count cells with dd>0
dd.stat <- cellStats(dd0,stat='max',na.rm=TRUE)
if (dd.stat > 0) {
DDaccum <- DDaccum + dd0
#Calculate lifestage progression: for dd accum in correct lifestage, is it >= egg DD threshold?
progress0 <- (DDaccum * LS0) >= eggDD
# writeRaster(progress0,paste("Progress0_",d,sep=""), format="GTiff",overwrite=TRUE)
Lifestage <- Con(LS0 == 1 & progress0 == 1, 1, Lifestage)
#Reset the DDaccum cells to zero for cells that progressed to next lifestage
DDaccum <- Con(progress0 == 0, DDaccum, 0)
}
} else if (i == 2) {
#developmental degree days
dd1 <- ((tmean > larvaeLDT) * (tmean - larvaeLDT))
# #Calculate lower lethal threshold and exclusion mask
# larmin <- tmin > larvaeLLT
# larmin[larmin==0] <- NA
# writeRaster(larmin,paste("Larmin_",d,sep=""), format="GTiff",overwrite=TRUE)
# #Calculate upper lethal threshold and exclusion mask
# larmax <- tmax < larvaeULT
# larmax[larmax == 0] <- NA
# writeRaster(larmax,paste("Larmax_",d,sep=""), format="GTiff",overwrite=TRUE)
# #Apply exclusions and lifestage mask to daily degree day surface and limit to correct stage
# dd1 <- dd1 * larmin * larmax * LS1
#Accumulate degree days, if dd0 > 0 otherwise exclusion masks get applied to this generation.
dd.stat <- cellStats(dd1,stat='max',na.rm=TRUE)
if (dd.stat > 0) {
DDaccum <- DDaccum + dd1
#Calculate lifestage progression: for dd accum in correct lifestage, is it >= larvae DD threshold?
progress1 <- (DDaccum * LS1) >= larvaeDD
# writeRaster(progress1,paste("Progress1_",d,sep=""), format="GTiff",overwrite=TRUE)
Lifestage <- Con(LS1 == 1 & progress1 == 1, 2, Lifestage)
#Reset the DDaccum cells to zero for cells that progressed to next lifestage
DDaccum <- Con(progress1 == 0, DDaccum, 0)
}
} else if (i == 3) {
#developmental degree days
dd2 <- ((tmean > pupaeLDT) *(tmean - pupaeLDT))
#Apply exclusions (none for pupae stage) and lifestage mask to daily degree day surface and limit to correct stage
dd2 <- dd2 * LS2
#Accumulate degree days, if dd0 > 0 otherwise exclusion masks get applied to this generation.
dd.stat <- cellStats(dd2,stat='max',na.rm=TRUE)
if (dd.stat > 0) {
DDaccum <- DDaccum + dd2
#Calculate lifestage progression: for dd accum in correct lifestage, is it >= pupae DD threshold?
progress2 <- (DDaccum * LS2) >= pupaeDD
# writeRaster(progress2,paste("Progress2_",d,sep=""), format="GTiff",overwrite=TRUE)
Lifestage <- Con(LS2 == 1 & progress2 == 1, 3, Lifestage)
#Reset the DDaccum cells to zero for cells that progressed to next lifestage
DDaccum <- Con(progress2 == 0, DDaccum, 0)
}
} else { #adult stage, or time to 50% oviposition
#developmental degree days
dd3 <- ((tmean > adultLDT) * (tmean - adultLDT))
# #Calculate lower lethal threshold and exclusion mask
# admin <- tmin > adultLLT
# admin[admin==0] <- NA
# writeRaster(admin,paste("Admin_",d,sep=""), format="GTiff",overwrite=TRUE)
# #Apply exclusions and lifestage mask to daily degree day surface
# dd3 <- dd3 * admin * LS3
#Accumulate degree days, if dd0 > 0 otherwise exclusion masks get applied to this generation.
dd.stat <- cellStats(dd3,stat='max',na.rm=TRUE)
if (dd.stat > 0) {
DDaccum <- DDaccum + dd3
#Calculate lifestage progression: for dd accum in correct lifestage, is it >= adult DD threshold?
progress3 <- (DDaccum * LS3) >= adultDD
# writeRaster(progress3,paste("Progress3_",d,sep=""), format="GTiff",overwrite=TRUE)
#Reset the DDaccum cells to zero for cells that progressed to next lifestage
DDaccum <- Con(progress3 == 1,0, DDaccum)
#Remove masking effect from progress counter so it doesn't perpetuate through NumGen raster
progress3[is.na(progress3)] <- template[is.na(progress3)]
#Reset Lifestage raster with new generation. By replacing NA with template zero values, we remove generation dependency
Lifestage <- Con(LS3 == 1 & progress3 == 1,0, Lifestage)
Lifestage[is.na(Lifestage)] <- template[is.na(Lifestage)]
#Increment NumGen + 1
NumGen <- NumGen + progress3
# writeRaster(NumGen,paste("NumGen",d,sep=""), format="GTiff",overwrite=TRUE)
}
}
}
}
# writeRaster(NumGen,"FCM_NumGenerations.tif", format="GTiff",overwrite=TRUE)
plot(NumGen)
#Possibility to calculate any number of outputs. This example was for 2014
#data only, but will want to look at multi-year calculations and how we
#can express uncertainty (annual variability) for more static risk maps.
|
5a1c9a8f9cba44d1f0515da0ee5b75aa2152ad63
|
e98b541d19a1893cf0507389888a14b4097607e4
|
/tests/testthat/test-FIND.R
|
764761eaacf7d666b39e9c10073834bf3d330ef2
|
[] |
no_license
|
njbultman/xlsxFunctions
|
6cd647aacd140a45207949fe01b596f00bd0490a
|
0326efb9af26be1efc7f6e65b782f3a6e30bebcf
|
refs/heads/master
| 2023-07-06T00:22:15.572100
| 2021-08-08T14:26:17
| 2021-08-08T14:26:17
| 294,152,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
test-FIND.R
|
test_that("Works with a string", {
expect_equal(FIND("l", "hello"), 3)
})
test_that("Works with a character vector", {
expect_equal(FIND("l", c("hello", "hello")), c(3, 3))
})
test_that("Inserts NA when character not found in string", {
expect_equal(FIND("l", c("hi", "hello")), c(NA, 3))
})
test_that("Works when last character in string is the target character", {
expect_equal(FIND("l", "hel"), 3)
})
|
b4c23c670bb7df4b38315b97b7f50034b8280187
|
c555c086b271eaca27472410f3aa5c97709958d9
|
/man/is.local.Rd
|
f0bcddcc41c6cdc792f4a2e78673d582a0d36a09
|
[
"MIT"
] |
permissive
|
filipezabala/embedr
|
c3e9e299bc1ed9a8e7f811a96c7e53b60fe53b73
|
64eee3d975392c20f2242c9663f607e3790a322e
|
refs/heads/master
| 2023-03-20T14:37:29.654223
| 2020-07-09T07:54:50
| 2020-07-09T07:54:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 363
|
rd
|
is.local.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is.local}
\alias{is.local}
\title{Return strings without a URL scheme}
\usage{
is.local(x)
}
\arguments{
\item{x}{A character vector.}
}
\description{
Given a character vector, returns a logical indicating whether the
paths in the vector point to existing local files.
}
|
fa7ce7c5819acc19e867fb7cfc5986dade6da99e
|
64f5a277a159ce9e8489fffc0affa08d4c90b8bc
|
/A2-Econometria.R
|
c9a875eb907696546f8fc9d2305d807757af6bec
|
[] |
no_license
|
JuliaNavas/2-Series_Temporais
|
470c7a00d9a6d2a91a507c6f965700557c740015
|
edb6aff5a26c6608c145afa92990de92ca0e8bd2
|
refs/heads/master
| 2020-03-28T04:55:30.387867
| 2018-10-29T22:34:58
| 2018-10-29T22:34:58
| 147,745,098
| 0
| 0
| null | 2018-09-06T23:32:44
| 2018-09-06T23:32:43
| null |
UTF-8
|
R
| false
| false
| 1,405
|
r
|
A2-Econometria.R
|
# Econometria Avançada Aula 2
# Noções Básicas
x <- 1:10 #cria objeto x com elementos de 1:10
y <- rep(3,10) #cria objeto y com 10 repetições do número 3
ls() #lista todos os objetos que estão no global environment
rm(list="x","y") #apaga os objetos x e y
setwd("G:/USJT/Econometria") #altera a pasta de trabalho do R
getwd() #mostra a pasta de trabalho atual do R
# Instalar Pacotes
install.packages("pwt8") #instala o pacote pwt8
library("pwt8") #roda o pacote pwt8
data("pwt8.0") #carrega os dados pwt8.0 disponíveis no pacote
View (pwt8.0) #vizualiza a tabela pwt8.0
# Extrair dados da tabela
br <- subset(pwt8.0, country=="Brazil", select = c("rgdpna","avh","xr")) #cria a tabela br onde é escolhido o país Brasil e as variáveis
colnames(br) <- c("PIB","Trabalho","Câmbio") #altera o nome das variáveis
# Criar Gráficos
plot(br$PIB) #cria um gráfico dos valores de PIB na tabela br
plot(br$PIB, type = "l") #transforma os valores em forma de linha
dados <- ts(br, start = 1950, frequency = 1) #cria a tabela dados com uma série temporal que começa em 1950 e a frequência é anual
plot(dados) #cria um gráfico da tabela dados
plot(dados, col="blue", main="Dados Brasileiros", xlab="Ano") #muda o título e a cor das linhas do gráfico para azul
#Salvar
write.csv(br, file="G:/USJT/Econometria/A2/Dados-Brasileiros.csv") #salva a tabela
|
f958b2ec069b72643ef322cabea4a730c83eb19f
|
a407e7551a05b15a26085369bc4aeeadc80fee87
|
/man/readTIFF2.Rd
|
22af830919f0cce0cf22430aa02f3a2cda02c79e
|
[
"MIT"
] |
permissive
|
tkatsuki/dipr
|
8ec80f0aaaec481e6a477f2108365760964e7511
|
2d9adf558a893cd3525ee54c099695c9becae0a4
|
refs/heads/master
| 2021-06-06T19:54:00.071143
| 2021-04-18T06:04:38
| 2021-04-18T06:04:38
| 63,497,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 846
|
rd
|
readTIFF2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readTIFF2.R
\name{readTIFF2}
\alias{readTIFF2}
\title{A readTIFF2 Function}
\usage{
readTIFF2(filename, start = 1, end = 0, crop = c(0, 0, 0, 0),
frames = NULL, getFrames = F, intensity = F)
}
\arguments{
\item{start}{An integer of the start frame. Default = 1.}
\item{end}{An integer of the end frame. Default = 0 (last frame of the file).}
\item{crop}{Not implemented yet.}
\item{frames}{A vector specifying which frames to be loaded.}
\item{getFrames}{Return number of frames. Default = False.}
\item{intensity}{Whether or not return mean intensity of each frame. Default = False.}
\item{filepath}{A caracter string of the path to the file. Required.}
}
\description{
This function allows you to load a TIFF file.
}
\examples{
readTIFF2()
}
\keyword{tiff}
|
831975f98f940728e41ef965266fe52f885b7052
|
7b51d31509b41bdb46feec97e8fe723fee46ceed
|
/projects/kaggle_challenges/orange_customer_satisfaction_competition/code/models/rf/basic.rf.model.r
|
9b518efcc44205e95d5f6304185a2c3482e8657a
|
[] |
no_license
|
FredGH/ProjectPortfolio_2.0
|
7c2a90e1aba821bd388d2d364288d46c46311914
|
c45ac47cc958a67dab137afd57a80d883462486b
|
refs/heads/master
| 2021-07-05T03:32:14.700525
| 2020-08-20T06:30:31
| 2020-08-20T06:30:31
| 65,825,815
| 1
| 1
| null | 2020-08-19T06:12:36
| 2016-08-16T14:12:14
|
Roff
|
UTF-8
|
R
| false
| false
| 1,302
|
r
|
basic.rf.model.r
|
debug.train <- TRUE
debug.test <- TRUE
train.ntree <- 50
train.BasicRF <- function(formula, data) {
print("training..")
targetColumnName <- get_target_column_name(formula)
if (debug.train) {
print(sprintf("targetColumnName is %s", targetColumnName))
}
levels(data[, targetColumnName]) <- c("NO", "YES")
# Using sampling = "smote" in trainControl doesn't always seem to work
if (debug.train) {
print("applying SMOTE")
}
data <- SMOTE(form = formula, data = data)
if (debug.train) {
print("fitting model")
}
caretModel <- train(
formula,
data,
method = "rf",
metric = "ROC",
ntree = train.ntree,
trControl = trainControl(
method = "repeatedcv",
number = 5,
classProbs = TRUE,
summaryFunction = twoClassSummary
)
)
model <- structure(list(
model = caretModel),
class = "BasicRF")
if (debug.train) {
print(model)
print("finished")
}
return(model)
}
predict.BasicRF <- function(model, data) {
print("testing..")
model <- model$model
predictions <- predict(model, data)
levels(predictions) <- c(-1, 1)
if (debug.test) {
print(sprintf("predictions: %s", paste(predictions, collapse=",")))
print("finished")
}
return(predictions)
}
|
cf0817ca92fa18c28a179dc185569f2abe5056b8
|
b26909940752cd8322f4f0e8fd5078c61f69c9cc
|
/man/hw04pDmando-package.Rd
|
cb2b55e034f174279ad584aa65b030ea400103cc
|
[
"MIT"
] |
permissive
|
STAT-413-613-21S/hw04pDmando
|
89618199bdeae4f288a9dacc0d826ca71b9c830b
|
14d2324b47f5109659970a49ef4cdd121326f932
|
refs/heads/master
| 2023-03-14T05:31:13.153322
| 2021-03-04T16:02:02
| 2021-03-04T16:02:02
| 344,530,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 485
|
rd
|
hw04pDmando-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hw04pDmando-package.R
\docType{package}
\name{hw04pDmando-package}
\alias{hw04pDmando}
\alias{hw04pDmando-package}
\title{hw04pDmando: Two Functions for HW4}
\description{
If this package works it will help me get an A in this class and it has two key functions.
}
\author{
\strong{Maintainer}: First Last \email{first.last@example.com} (\href{https://orcid.org/YOUR-ORCID-ID}{ORCID})
}
\keyword{internal}
|
dbba5806278a573057d33a0c96761e371eb4ced6
|
05c8ef8706b84a0c4abd5886cf98b246495fed36
|
/TP 1/Ejercicio 3/kfold_spiral_test.R
|
c8e7c2ec12edaf1fc4abcb554e6417e703cf0691
|
[] |
no_license
|
Tomasfdel/Trabajos-DM
|
dbc53ff4bd544b5f09da2b7ed9209c552a33855e
|
bbf40027d7853aa0f187d7453d5c216be027982a
|
refs/heads/main
| 2023-02-27T17:02:19.241945
| 2021-01-31T07:15:31
| 2021-01-31T07:15:31
| 334,008,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,915
|
r
|
kfold_spiral_test.R
|
library(class)
library(rpart)
source("../Ejercicio 1/spirals.R")
getClassMembers = function(class, dataset, classIndex){
# Devuelve todos los valores de dataset que pertenezcan a la clase class.
dataset[dataset[classIndex] == class, ]
}
shuffleClassMembers = function(classMembers){
#Reordena los elementos del dataframe de entrada.
classMembers[sample(nrow(classMembers)),]
}
splitClasses = function(dataset, foldAmount){
#~ Esta función devuelve una lista en la que cada elemento tiene un dataframe con todos los
#~ valores que pertenecen a la misma clase.
classIndex = dim(dataset)[2]
dataByClass = lapply(unique(dataset[[classIndex]]), getClassMembers, dataset, classIndex)
lapply(dataByClass, shuffleClassMembers)
}
getValuesOutOfFold = function(dataset, foldIndex, foldAmount){
#~ Toma los elementos de una clase, calcula el tamaño que debe tener un fold y devuelve
#~ todos los elementos que no pertenezcan al foldIndex correspondiente.
foldSize = dim(dataset)[1] / foldAmount
dataset[- (((foldIndex - 1) * foldSize + 1) : (foldIndex * foldSize)) ,]
}
getValuesInFold = function(dataset, foldIndex, foldAmount){
#~ Toma los elementos de una clase, calcula el tamaño que debe tener un fold y devuelve
#~ todos los elementos que pertenezcan al foldIndex correspondiente.
foldSize = dim(dataset)[1] / foldAmount
dataset[((foldIndex - 1) * foldSize + 1) : (foldIndex * foldSize) ,]
}
getFoldedData = function(dataset, foldIndex, foldAmount){
#~ Dado un un dataset dividido por clases y un foldIndex, devuelve una lista con la combinación de
#~ los puntos de todas las clases que no pertenecen al fold indicado, y por otro lado los que sí pertenecen.
trainList = lapply(dataset, getValuesOutOfFold, foldIndex, foldAmount)
testList = lapply(dataset, getValuesInFold, foldIndex, foldAmount)
#~ Una vez que se consiguieron los datos que pertenecen al fold y los que no, divididos por clase,
#~ se los combina para obtener el set de entrenamiento y el de test.
list(do.call(rbind.data.frame, trainList), do.call(rbind.data.frame, testList))
}
args = commandArgs(trailingOnly = TRUE)
if(length(args) != 3){
cat("ERROR. Modo de uso: Rscript kfold_spiral_test.R setSize KNN_K foldAmount\n")
} else {
setSize = as.integer(args[1])
K = as.integer(args[2])
foldAmount = as.integer(args[3])
spiralDataset = generateSpiralsData(setSize)
splitDataset = splitClasses(spiralDataset)
knnErrors = c()
treeErrors = c()
for(foldIndex in 1:foldAmount){
#~ Teniendo los datos divididos por clase, armaremos el n-ésimo fold y lo devolveremos como set de test.
#~ Todos los demás los uniremos para armar el set de entrenamiento. Cada fold se armará con una proporción
#~ de puntos de cada clase igual a la proporción en el dataset completo.
#~ Más allá de eso, el entrenamiento funciona igual que en el regular_test, salvo que guardamos los errores de
#~ cada iteración para luego promediarlos.
foldedDataset = getFoldedData(splitDataset, foldIndex, foldAmount)
spiralTrain = foldedDataset[[1]]
spiralTest = foldedDataset[[2]]
spiralKnn = knn(spiralTrain[,-3],
test = spiralTest[,-3],
cl = spiralTrain[,3],
k = K)
spiralTree = rpart("X3~.", data = spiralTrain, method = "class")
spiralTreePred = predict(spiralTree, spiralTest)
knnErrors[foldIndex] = mean(spiralTest[,3] == spiralKnn)
treeErrors[foldIndex] = mean(spiralTest[,3] == max.col(spiralTreePred, "first") - 1)
}
cat(sprintf("Error Espiral KNN: %f\n", 1 - mean(knnErrors)))
cat(sprintf("Error Espiral Tree: %f\n", 1 - mean(treeErrors)))
}
#~ Resultados de una ejecución:
#~ setSize = 200
#~ KNN_K = 3
#~ foldAmount = 5
#~ Error Espiral KNN: 0.250000
#~ Error Espiral Tree: 0.380000
#~ Para este caso, los resultados son realmente más pesimistas por un margen significativo,
#~ a diferencia de en el caso diagonal.
|
f0b4aa3796059cb18606f27d7b1c20ca9d87bf6c
|
19fa6ad75dad84e3cdf55b3e70f2a793b3d97a1a
|
/data cleaning.R
|
530df908953dd6266d46e5b58be0b1b586fb52c0
|
[] |
no_license
|
OmarLaz97/BigData_HR
|
57821a31f2ebea01ae10a8827048850f56cd52f5
|
05bc2babffd58f7cd105390ce849935baf0a4277
|
refs/heads/master
| 2022-05-24T07:05:32.028352
| 2020-05-01T19:52:08
| 2020-05-01T19:52:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,247
|
r
|
data cleaning.R
|
rm(list=ls())
library(lubridate)
#Set the working directory to the folder that contains the dataset files
inTime <- read.table("in_time.csv", header=TRUE, sep=",")
outTime <- read.table("out_time.csv", header=TRUE, sep=",")
generalData <- read.table("general_data.csv", header=TRUE, sep=",")
employeeSurvey <- read.table("employee_survey_data.csv", header=TRUE, sep=",")
managerSurvey <- read.table("manager_survey_data.csv", header=TRUE, sep=",")
#average working hours per day for each employee
inTime<-inTime[-c(1,2)]
outTime<-outTime[-c(1,2)]
timediff <- function(u,v) as.numeric(difftime(ymd_hms(u),ymd_hms(v),units=c("hours")), units="hours")
hours<-mapply(timediff, outTime, inTime)
hours<-as.data.frame(hours)
hours[is.na(hours)] <- 0
avgHrs<-rowMeans(hours)
#removing unnecessary columns
generalData<-generalData[ , !(names(generalData) %in% c("Over18", "StandardHours","EmployeeCount"))]
#Merge tables
employeeData<-merge(generalData,employeeSurvey,by="EmployeeID")
employeeData<-merge(employeeData,managerSurvey, by="EmployeeID")
#Add average working hours
employeeData$AverageWorkingHours<-avgHrs
#Remove NAs
employeeData<-employeeData[complete.cases(employeeData), ]
#Export to csv file
write.csv(employeeData,"employee_data.csv")
|
3b15dbd175a37a66138cb000822b6bd70c2e3e75
|
229c0dd9ed28a2c5887957856e8b9ddd439597e2
|
/initialisation/set_CAF2021_Turrialba_trees_E_AC.R
|
507659e03d2b5f4050688b32151b5eefb34c5f50
|
[] |
no_license
|
MarcelVanOijen/CAF2021
|
7e6e68ac4e17f33badb2071d8dc74d1390d907df
|
e752e2024c420cb1f4518fcfc29010fa65670afd
|
refs/heads/main
| 2023-04-11T18:29:56.318506
| 2022-01-17T09:07:46
| 2022-01-17T09:07:46
| 349,493,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
set_CAF2021_Turrialba_trees_E_AC.R
|
## set_CAF2021_Turrialba_trees_E_AC.R ##
## MvO, 2021-06-04
params <- set_par( "TREEDENS0(1)", 0.07 )
calendar_prunT[ 1, 1 , ] <- c( 2001, 266, 0.8 )
calendar_prunT[ 1, 2:33, 1 ] <- rep( 2002:2017, each=2 )
calendar_prunT[ 1, 2:33, 2 ] <- c( 140, 350 )
calendar_prunT[ 1, 2:33, 3 ] <- 0.6
calendar_thinT[ 1, 1 , ] <- c( 2007, 124, 0.004 )
calendar_thinT[ 1, 2 , ] <- c( 2010, 124, 0.49 )
|
b3d928c7646f6217d1670546186a3b46b7664251
|
d149292e3981f86ae0d216ea44d7170cc53d0599
|
/cachematrix.R
|
1548c063c5fd667e3be103292bf8b4ee441eb205
|
[] |
no_license
|
RonanCon/ProgrammingAssignment2
|
e6eb4f9c683e5d48b02e6cb8d59188d17c1ee5c4
|
fa5f11dc552c5a13d1477083edad702a7bd96405
|
refs/heads/master
| 2021-01-21T19:07:15.130723
| 2014-09-21T22:32:24
| 2014-09-21T22:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,809
|
r
|
cachematrix.R
|
## Creates a matrix object that can calculate its own inverse (a)
## and caches that value which can be recalled until the input
## matrix is changed. Based on the sample functions provided in the ##assignment
## This function creates the object which holds the input matrix
## and defines the variables and functions required to cache the
##inverse of the input matrix. If the input matrix changes this
##function resets the cached value
makeCacheMatrix <- function(x = matrix()) {
a <- NULL ## sets the inverse value to NULL
set <- function(y) { ## substitutes input matrix y for the original x
x <<- y
a <<- NULL ## resets the inverse to NULL if input matrix changes
}
get <- function() x ## retrieves the input matrix
setinverse <- function(solve) a <<- solve ##writes the inverse to the cache
getinverse <- function() a ## retrieves the inverse from the cache
list(set = set, get = get, ##creates list holding the functions
setinverse = setinverse,
getinverse = getinverse)
}
## This function calls the value of the inverse from the cache.
##If its a non-NULL value it prints it and if its NULL it calculates
##the inverse of the input value
cacheSolve <- function(x, ...) {
a <- x$getinverse() ## checks the cache for the inverse
if(!is.null(a)) { ## prints the value of inverse in cache if its not NULL
message("getting cached data")
return(a)
}
data <- x$get() ## creates a variable data which contains the input matirx
a <- solve(data, ...) ##calcaulates the inverse
x$setinverse(a) ## writes to the cache
a ##prints the inverse
}
|
34c61bc62cf6a1b7c2b15bceeb6f2ee10c6d5fa1
|
b0cc45f7079c36065479cbc55cd314a7f12f3958
|
/Lecture/03-matrices-arrays-lists-data_frames.R
|
b7b8dbf41e15f80e836b28c49b3582e2dd3e2661
|
[] |
no_license
|
aaronlai1027/R-for-Data-Science
|
03f6d4521d16a1e197bb5fed2673d89d14a9c0e9
|
d6e1044ade0d3e94c00fe68236e50f1ca15f72c2
|
refs/heads/master
| 2022-11-11T10:14:04.114263
| 2020-07-01T06:59:47
| 2020-07-01T06:59:47
| 276,236,772
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,630
|
r
|
03-matrices-arrays-lists-data_frames.R
|
#### Class 3: Tuesday September 5
## Matrices and arrays.
## Lists.
## Data frames.
##**** Matrices and Arrays ****##
## Matrices and arrays are vectors with a ``dim''
## attribute set to a vector of 2 (for matrices) or
## more (for arrays) elements (each element adds a
## dimension) associated to it.
## A matrix is a particular case of an array
## (it is an array of dimension 2),
## but it is such an important particular case
## that it will have a separate treatment.
## We have already created matrices (and arrays)
## by assigning a dimension vector
## to its ``dim'' attribute.
## Using the matrix function
matrix(1:8, nrow = 2)
matrix(1:8, ncol = 4)
## It assigns by column by default
## If you want to assign by row
matrix(1:8, nrow = 2, byrow = TRUE)
##**** Elements of Matrices ****##
(A <- matrix(10:19, nrow = 2))
A[7] ## Remember that matrices are still vectors
A[1, 4] ## Row and Column desired [explore negative indexes]
##**** Index Matrices ****##
I <- matrix(c(1, 3, ## Index Matrix
1, 4,
2, 2), ncol = 2, byrow = TRUE)
A[I]
A[I] <- 0
A
##**** Rows and Columns from Matrices ****##
A <- matrix(1:16, nrow = 4)
A[,]
A[, 2]
A[2,]
A[c(1, 3), c(2, 4)]
##**** Transpose of a Matrix ****##
(A <- matrix(10:19, nrow = 2))
t(A)
t(t(A))
##**** Different uses of function diag() ****##
diag(3) ## 3x3 Identity matrix
diag(c(3, -1, 5)) ## 3x3 Square matrix with this diagonal
A
diag(A) ## Diagonal of a matrix
sum(diag(A)) ## Calculate the trace of A
##**** Element by Element Product of Matrices ****##
(A <- matrix(1:4, nrow = 2))
(B <- matrix(2:5, nrow = 2))
A * B ## Element by element product
(D <- matrix(1:6, nrow = 2))
A * D ## Matrices need to have same number of rows
## and columns
##**** Matrix Product ****##
A %*% B ## 2x2 * 2x2 = OK
A %*% D ## 2x2 * 2x3 = OK
D %*% A ## 2x3 * 2x2 = No!
##**** Concatenation of Matrices ****##
A
B
rbind(A,B) # Vertically
cbind(A,B) # Horizontally
##**** Operations on Rows and Columns of Matrices ****##
B
rowMeans(B)
rowSums(B)
colMeans(B)
colSums(B)
##**** Solving Linear Equations ****##
## Suppose we have the system A %*% x = b
A <- matrix(1:4, nrow = 2) ## Matrix of coefficients
b <- c(3, -7) ## Vector of independent terms
(x <- solve(A,b)) ## System solution
A %*% x ## = b. Verification
##**** Inverse of a square matrix ****##
## solve() is also used to invert a square matrix
solve(A) ## Matrix inversion
A %*% solve(A) ## Verification
## Your turn:
## Investigate matrix functions eigen, svd, qr, ...
## Your turn:
## Draw 1000 random samples of size 20 each from a
## standard normal distribution
## (remember you can use "rnorm" for that):
## 1) save all values in a 1000x20 matrix.
## 2) print the first 10 random samples (10x20 matrix)
## 3) calculate the 1000 sample means xbar
## 4) Verify numerically that E(xbar) = mu (approx solution)
## 5) Verify numerically that SD(xbar) = sigma/sqrt(n)
## 6) Plot the empirical distribution (use function "hist")
## of xbar
## Begin solution
## End solution
##**** Lists ****##
## A list is an object containing as elements objects of
## any type, including other lists.
L <- list(c(1, 3, 2),
c("Two", "Words"),
list(c(TRUE, FALSE, FALSE),
"something"))
L
class(L)
str(L)
##**** Lists with Named Components ****##
## You can assign names to the elements of the list.
L <- list(number = c(1, 3, 2),
char = c("Two", "Words"),
other_list=list(logical = c(TRUE,FALSE,FALSE),
morechar = "something"))
L
class(L)
str(L)
##**** Accessing a Given List Component ****##
L[1] ## Be careful about this way
str(L[1])
L[[1]] ## Use this way
str(L[[1]])
L$number ## or this way
str(L$number)
L[["number"]]
##**** Getting a Sub-List ****##
L[2:3] ## This is right
L[[2:3]] ## This does not work
L[c("number", "other_list")] ## This is right
L[[c("number", "other_list")]] ## This does not work
##**** Accessing Components of List Components ****##
L[[1]][2]
L$number[2]
L[[3]][[1]][2]
L$other_list$logical[2]
L[["other_list"]][["logical"]][2]
##**** Empty Lists and Adding Elements to Lists ****##
L <- list() ## Empty list
L[[3]] <- c(2, -1)
L$morelogical <- c(FALSE, TRUE)
L
##**** Concatenating Lists ****##
L1 <- list(c(2,3), TRUE)
L2 <- list(NA, c("a", "v"))
c(L1, L2)
##**** Data Frames ****##
## Similar to Lists, but with a matrix-like structure.
df <- data.frame(num = c(3, 4, 2, -1),
char = c("a", "b", "b", "a"),
lgc = c(T, T, F, T))
df
str(df)
## NOTE: the character vector was converted to factor.
## To avoid set stringsAsFactors=FALSE.
df <- data.frame(num=c(3,4,2,-1),
char=c("a","b","b","a"),
lgc=c(T,T,F,T), stringsAsFactors = FALSE)
df
str(df)
## To set it as a global default
## (in general not recommended) run
## options(stringsAsFactors = FALSE)
##**** Dimension of Data Frames ****##
dim(df)
nrow(df)
ncol(df)
##**** Accessing Components of Data Frames ****##
df[1]
str(df[1])
df[[1]]
df[, 1]
df$num
## Matrix-Like Components
df[2, 1]
df[3, "lgc"]
df["3", "lgc"]
rownames(df) <- c("first", "second", "third", "fourth")
df
df["third", "lgc"]
df["3", "lgc"]
##**** Accessing R datasets ****##
## For a list of datasets
library(help = "datasets")
## Load CO2 dataset
data(CO2)
head(CO2)
str(CO2)
##**** attach() and detach() ****##
CO2$uptake[1:10]
uptake[1:10]
attach(CO2)
CO2$uptake[1:10]
uptake[1:10]
detach()
CO2$uptake[1:10]
uptake[1:10]
attach(CO2)
CO2$uptake[1:10]
uptake[1:10]
uptake[1:10] <- 0
detach()
uptake[1:10]
CO2$uptake[1:10]
## attach() and detach() can be used both with lists
## and dataframes.
##**** Writing/Reading Data to/from Files ****##
## Saving and Loading Human Readable Data
write.table(CO2, "../data/CO2.data")
write.csv(CO2, "../data/CO2.csv")
CO2.table <- read.table("../data/CO2.data")
str(CO2.table)
CO2.csv <- read.csv("../data/CO2.csv")
str(CO2.csv)
## Saving and Loading Binary Data
save(CO2, file="../data/CO2.RData")
CO2 <- NA
CO2
load(file="../data/CO2.RData")
head(CO2)
## Your turn:
## Make sure you have the library ggplot2 installed
## If not, install it by running the uncommented code below
## install.packages("ggplot2")
## Load the library
library(ggplot2)
## Activate the mpg data.frame provided by ggplot2
data(mpg)
## New versions of ggplot have all character variables, while
## older had factors. Transform manufacturer to factor
## to show how most data.frames treat character variables
mpg$manufacturer <- factor(mpg$manufacturer)
str(mpg)
## 1) Inspect the structure of mpg data.frame.
## Note the kind of data provided
## 2) Run the summary function to learn more
## about the variables
## 3) Get a subset of the data.frame including all
## cars that are not ford nor audi
## 4) Determine if the manufacturer variable (that is a factor)
## in your subset has or not dropped the now removed
## manufacurers audi and ford
## 5) Devise a strategy to assure that the above factor has dropped
## the levels that have no elements
## 6) Further subset the data making sure that only front drive
## midsize cars model 2008 with at least 20 highway
## or city miles per gallon are included.
## 7) Determine how many cars per manufacturer satisfy
## these restrains. Only show manufacturers with
## at least one vehicle.
## 8) Only show the case(s) with more cars
## Note: there might be ties...
## Begin solution
## End solution
|
85fabc324f34819f7c9df056df4151691698110c
|
de33d107690c2a6e119af35f1496ad7a9a22a0ae
|
/man/treestack-package.Rd
|
0a920f0aadea0cbcf77a62d11386f6ee2e6274ee
|
[] |
no_license
|
nverno/treestack
|
e493f68f15a5c9211b3deaf7e84c3363ff5c44d8
|
dca6c6f859500046297952075ffb65d843f20fa7
|
refs/heads/master
| 2021-01-10T05:54:01.927512
| 2015-11-09T04:51:05
| 2015-11-09T04:51:05
| 45,757,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 573
|
rd
|
treestack-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg.R
\docType{package}
\name{treestack-package}
\alias{treestack-package}
\title{Stacked Treemap}
\description{
\tabular{ll}{
Package: \tab treestack\cr
Type: \tab Package\cr
License: \tab GPL-(>=2)\cr
LazyLoad: \tab yes\cr
}
}
\details{
Visualize data iteratively aggregated by categories using stacked barplots. The color and size of the bars can be determined by continuous variables.
}
\author{
Noah Peart \email{noah.v.peart@gmail.com}
}
\keyword{graph}
\keyword{tree}
\keyword{treemap}
|
30a11607c3fd9288d04a33cebb4d7889bc1e9c06
|
4ddb88f2583737faebf066b5efdfdc3d9e04a891
|
/libraries/old/la-county-calcs.R
|
57d8b4daf9de57733d89aef49f7694aa6804b0b8
|
[] |
no_license
|
ryanvmenezes/census-queries
|
67ecb97a6826cf2c469612c7544a6ad57841221d
|
4567b352d8f401537550a666e926704485ca8db7
|
refs/heads/master
| 2023-03-08T11:00:26.297720
| 2021-02-24T21:01:20
| 2021-02-24T21:01:20
| 165,991,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,557
|
r
|
la-county-calcs.R
|
library(tidyverse)
lang.raw = read_csv('old/languages-la-county.csv')
lang.raw
lang.by.year = lang.raw %>%
pivot_longer(`1980`:`2018`, names_to = 'year', values_to = 'popcount') %>%
mutate(
year = parse_integer(year),
popcount = popcount %>%
str_split('\n') %>%
map_chr(`[[`, 3) %>%
parse_number(),
LANGUAGE = language,
language = language %>%
str_replace('^\\d+:\\s', '') %>%
word() %>%
str_replace_all(',', '')
) %>%
select(LANGUAGE, everything()) %>%
group_by(year) %>%
mutate(pop.pct = popcount / sum(popcount)) %>%
ungroup() %>%
arrange(-popcount)
lang.by.year
lang.xwalk = lang.by.year %>%
group_by(language) %>%
summarise(n = sum(popcount)) %>%
arrange(-n) %>%
mutate(
n = case_when(
str_starts(language, 'N/A') ~ 0,
TRUE ~ n
)
) %>%
mutate(
languagef = fct_lump(language, n = 10, w = n),
languagef = fct_reorder(languagef, n)
)
lang.xwalk %>% head(15)
lang.xwalk$languagef
plot.all = lang.by.year %>%
left_join(lang.xwalk %>% select(-n)) %>%
filter(languagef != 'Other') %>%
ggplot(aes(languagef, pop.pct * 100, fill = languagef)) +
geom_bar(stat = 'identity', position = 'dodge') +
facet_wrap(year ~ ., nrow = 1) +
scale_fill_brewer(palette = "Paired", name = 'Language') +
ylab('Percent of L.A. County population') +
ggtitle('Most-spoken languages in L.A. County') +
theme_minimal() +
theme(axis.text.x = element_blank(), axis.title.x = element_blank())
plot.all
ggsave('top-languages.pdf', device = 'pdf', width = 10, height = 8, units = 'in')
plot.no.eng.no.spa = lang.by.year %>%
left_join(lang.xwalk %>% select(-n)) %>%
filter(languagef != 'Other') %>%
group_by(year) %>%
mutate(rankinyear = rank(-pop.pct)) %>%
filter(!(languagef %in% c('English','Spanish'))) %>%
ggplot(aes(languagef, pop.pct * 100, fill = languagef)) +
geom_bar(stat = 'identity', position = 'dodge') +
facet_wrap(year ~ ., nrow = 1) +
# geom_text(aes(languagef, pop.pct * 100 + 0.18, label = formatC(popcount, format="d", big.mark=",")), size = 3, angle = 90) +
scale_fill_brewer(palette = "Paired", name = 'Language') +
ylab('Percent of L.A. County population') +
ggtitle('Most-spoken languages in L.A. County, excluding English and Spanish') +
theme_minimal() +
theme(axis.text.x = element_blank(), axis.title.x = element_blank())
plot.no.eng.no.spa
ggsave('top-languages-no-eng-no-spa.pdf', device = 'pdf', width = 10, height = 8, units = 'in')
lang.by.year %>%
left_join(lang.xwalk %>% select(-n)) %>%
group_by(year, languagef) %>%
summarise(pop.pct = sum(pop.pct)) %>%
ggplot(aes(year, pop.pct * 100)) +
geom_line() +
facet_wrap(languagef ~ ., scales = 'free') +
theme_minimal()
top10.by.year = lang.by.year %>%
filter(!str_detect(language, 'N/A')) %>%
group_by(year) %>%
mutate(rankinyear = rank(desc(popcount))) %>%
filter(rankinyear <= 10) %>%
ungroup()
top10.by.year
lang.by.year %>%
mutate(
language = if_else(language %in% c('English','Spanish'), language, 'Other')
) %>%
group_by(language, year) %>%
summarise(popcount = sum(popcount), pop.pct = sum(pop.pct)) %>%
ggplot(aes(year, pop.pct, color = language)) +
geom_line() +
geom_point() +
theme_minimal()
eng.spa.other.by.year = lang.by.year %>%
filter(language %in% c('English', 'Spanish')) %>%
select(language, year, pop.pct)
# group_by(language, year) %>%
# summarise(popcount = sum(popcount), pop.pct = sum(pop.pct))
# select(-popcount) %>%
# pivot_wider(names_from = 'year', values_from = 'pop.pct')
eng.spa.other.by.year
eng.spa.other.by.year %>% write_csv('la-county-eng-spa-other-by-year.csv')
t10.plot = top10.by.year %>%
ggplot(aes(x = year, y = rankinyear, group = language)) +
geom_point(aes(size = popcount), color = 'grey') +
geom_line(color = 'grey') +
geom_point(
data = . %>% filter(rankinyear > 4),
aes(color = language, size = popcount)
) +
geom_line(
data = . %>% filter(rankinyear > 4),
aes(color = language)
) +
# names at end
geom_text(
data = . %>% group_by(language) %>% filter(year == max(year)),
aes(year + 0.6, rankinyear, label = language),
hjust = 'left',
size = 3.5
) +
scale_x_continuous(
limits = c(1980, 2021),
breaks = c(1980, 1990, 2000, 2010, 2018),
) +
# # names at beginning
# geom_text(
# data = . %>% group_by(language) %>% filter(year == min(year)),
# aes(year - 0.7, rankinyear, label = language),
# hjust = 'right',
# size = 3.5
# ) +
# scale_x_continuous(
# limits = c(1978, 2018),
# breaks = c(1980, 1990, 2000, 2010, 2018),
# ) +
scale_colour_brewer(palette = 'Paired') +
scale_y_reverse(
breaks = 1:10
) +
theme_minimal() +
theme(legend.position = 'none') +
ylab('Rank in year') +
xlab('') +
ggtitle('Top 10 languages spoken in L.A. County')
t10.plot
ggsave('top-10.pdf', device = 'pdf', width = 10, height = 8, units = 'in')
t10.lang = top10.by.year %>%
select(year, rankinyear, language) %>%
pivot_wider(names_from = year, values_from = language)
t10.pct = top10.by.year %>%
select(year, pop.pct, rankinyear) %>%
pivot_wider(names_from = year, values_from = pop.pct)
t10.counts = top10.by.year %>%
select(year, popcount, rankinyear) %>%
pivot_wider(names_from = year, values_from = popcount)
t10.lang
t10.pct
t10.counts
t10.master = t10.lang %>%
left_join(
t10.counts,
by = 'rankinyear',
suffix = c('_lang', '_count')
) %>%
left_join(
t10.pct %>%
rename_at(vars(-rankinyear), ~str_c(., '_pct')),
by = 'rankinyear',
) %>%
select(rankinyear, starts_with('1980'), starts_with('1990'), starts_with('2000'), starts_with('2010'), starts_with('2018'))
t10.master
t10.master %>% write_csv('top-10-la-county-by-year.csv')
top10.by.year %>% write_csv('top-10-la-county-by-year-long.csv')
top10.by.year %>% distinct(language)
#
#
# lang.by.year %>%
# left_join(lang.xwalk %>% select(-n)) %>%
# group_by(year, language) %>%
# summarise(pop.pct = sum(pop.pct)) %>%
# ggplot(aes(year, pop.pct * 100)) +
# geom_line() +
# facet_wrap(language ~ ., scales = 'free') +
# ylab('Percent of L.A. County population') +
# theme_minimal()
top13 = top10.by.year %>%
distinct(language) %>%
left_join(lang.by.year) %>%
filter(year %in% c(1980, 2018)) %>%
select(language, year, pop.pct) %>%
pivot_wider(names_from = 'year', values_from = 'pop.pct')
top13 %>% write_csv('top13.csv')
|
b6fb4cd0cb11e563c871482506640b7c812db830
|
1ae54e124af1de72ac9b42c827f3a29cd14f2b99
|
/program/Scer_n157_nonMosaic_Spar_stat_SNPs.R
|
ffb7e687b1bfda43059481e521ce0379763e7fc8
|
[] |
no_license
|
eternal-bug/pars
|
43114444e19d53752df0068ec90320521d64e746
|
4959b439c367bc4b998cdc234402edbe40ab4afe
|
refs/heads/master
| 2020-03-21T15:23:10.794853
| 2018-06-23T08:07:47
| 2018-06-23T08:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,505
|
r
|
Scer_n157_nonMosaic_Spar_stat_SNPs.R
|
name <- "Scer_n157_nonMosaic_Spar"
path <- paste0("~/data/mrna-structure/result/", name, collapse = NULL)
setwd(path)
library(ggplot2)
library(scales)
library(reshape)
library(pander)
library(gridExtra)
library(plyr)
library(dplyr)
library(proto)
library(gsubfn)
library(RSQLite)
library(sqldf)
# 统计所有snp的数量
file_SNPs_all <- paste0("~/data/mrna-structure/xlsx/", name, ".mvar.gene_list.csv", collapse = NULL)
data_SNPs_all <- read.csv(file_SNPs_all, header = TRUE, sep = ",")
dd_SNPs <- data.frame(name = c("all"),SNPs=c(nrow(data_SNPs_all)))
rownames(data_SNPs_all) <- NULL # suppress rownames
# 统计intergenic snp的数量
file_SNPs_intergenic <- paste0("~/data/mrna-structure/process/", name, ".snp.intergenic.pos.txt", collapse = NULL)
data_intergenic <- read.csv(file_SNPs_intergenic, header = FALSE, sep = "\t")
names(data_intergenic) =c("name")
data_intergenic <- merge(data_SNPs_all, data_intergenic, by="name")
dd_SNPs <- rbind(dd_SNPs, data.frame(name="intergenic", SNPs=nrow(data_intergenic) ))
rownames(data_intergenic) <- NULL # suppress rownames
# 统计有PARS数据的转录本中的SNPs
file_SNPs_PARS_transcripts <- paste0("~/data/mrna-structure/process/", name, ".gene_variation.var_pars.tsv", collapse = NULL)
data_SNPs_PARS_transcripts <- read.csv(file_SNPs_PARS_transcripts, header = TRUE, sep = "\t")
# 合并data_SNPs_all和data_SNPs_PARS_transcripts
data_SNPs_PARS_transcripts <- merge(data_SNPs_PARS_transcripts, data_SNPs_all, by="name")
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_transcripts", SNPs=nrow(data_SNPs_PARS_transcripts) ))
# 得到每一个有PARS信息的转录本的茎环长度、GC含量等信息
file_fold_class <- paste0("~/data/mrna-structure/result/",name,"/",name, ".gene_variation.fold_class.csv", collapse = NULL)
data_fold_class <- read.csv(file_fold_class, header = TRUE, sep = ",")
# 得到protein coding gene的list
file_protein_coding_list <- "~/data/mrna-structure/phylogeny/protein_coding_list.csv"
data_protein_coding_list <- read.csv(file_protein_coding_list, header = FALSE, sep = ",")
colnames(data_protein_coding_list) <- c("gene")
# 取出cds alignment proportation = 1 的基因
file_proporation_1_gene <- paste0("~/data/mrna-structure/phylogeny/",name,"_distance_processed_pro_1.csv", collapse = NULL)
data_proporation_1_gene <- read.csv(file_proporation_1_gene, header = TRUE, sep = ",")
data_proporation_1_gene <- subset(data_proporation_1_gene,select = gene)
data_protein_coding_list <- merge(data_protein_coding_list,data_proporation_1_gene,by="gene")
# 将有PARS信息的转录本分为mRNA和非mRNA
data_fold_class_mRNA <- merge(data_fold_class, data_protein_coding_list , by="gene")
data_fold_class_non_mRNA <- sqldf('SELECT * FROM [data_fold_class] EXCEPT SELECT * FROM [data_fold_class_mRNA]') # 备用
# 得到有PARS信息的mRNA和非mRNA的SNPs
data_SNPs_PARS_mRNA <- merge(data_SNPs_PARS_transcripts , data_fold_class_mRNA , by="gene")
data_SNPs_PARS_non_mRNA <- merge(data_SNPs_PARS_transcripts , data_fold_class_non_mRNA , by="gene") # 备用
# 统计有PARS信息的mRNA中SNPs的数量
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_mRNA", SNPs=nrow(data_SNPs_PARS_mRNA) ))
# 统计有PARS信息的mRNA的数量
data_gene_process <- data_SNPs_PARS_mRNA["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_mRNA_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_mRNA_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- data.frame(name = c("PARS_mRNA"),gene=c(nrow(data_gene_process)))
# 去除有PARS信息的mRNA的SNPs中complex,求出SNPs数量和mRNA数量
data_SNPs_PARS_mRNA <- subset(data_SNPs_PARS_mRNA, data_SNPs_PARS_mRNA$mutant_to != "Complex")
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_mRNA_non_complex", SNPs=nrow(data_SNPs_PARS_mRNA) ))
data_gene_process <- data_SNPs_PARS_mRNA["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_mRNA_non_complex_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_mRNA_non_complex_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- rbind(dd_gene, data.frame(name="PARS_mRNA_non_complex", gene=nrow(data_gene_process) ))
# 取出mRNA中CDS,求出SNPs数量和mRNA数量
file_SNPs_cds <- paste0("~/data/mrna-structure/process/", name, ".snp.cds.pos.txt", collapse = NULL)
data_cds <- read.csv(file_SNPs_cds, header = FALSE, sep = "\t")
names(data_cds) =c("name")
data_cds <- merge(data_SNPs_PARS_mRNA, data_cds, by="name")
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_cds", SNPs=nrow(data_cds) ))
rownames(data_cds) <- NULL # suppress rownames
data_gene_process <- data_cds["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_cds_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_cds_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- rbind(dd_gene, data.frame(name="PARS_cds", gene=nrow(data_gene_process) ))
write.csv(data_cds,file=paste0(path,"/data_SNPs_PARS_cds.csv",collapse = NULL),row.names = FALSE)
# 取出mRNA中UTR,求出SNPs数量和mRNA数量
file_SNPs_utr <- paste0("~/data/mrna-structure/process/", name, ".snp.utr.pos.txt", collapse = NULL)
data_utr <- read.csv(file_SNPs_utr, header = FALSE, sep = "\t")
names(data_utr) =c("name")
data_utr <- merge(data_SNPs_PARS_mRNA, data_utr, by="name")
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_utr", SNPs=nrow(data_utr) ))
rownames(data_utr) <- NULL # suppress rownames
data_gene_process <- data_utr["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_utr_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_utr_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- rbind(dd_gene, data.frame(name="PARS_utr", gene=nrow(data_gene_process) ))
write.csv(data_utr,file=paste0(path,"/data_SNPs_PARS_utr.csv",collapse = NULL),row.names = FALSE)
# 取出syn,求出SNPs数量和mRNA数量
data_SNPs_PARS_syn <- sqldf('SELECT * FROM [data_SNPs_PARS_mRNA] where syn > 0 AND nsy == 0' )
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_syn", SNPs=nrow(data_SNPs_PARS_syn) ))
data_gene_process <- data_SNPs_PARS_syn["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_syn_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_syn_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- rbind(dd_gene, data.frame(name="PARS_syn", gene=nrow(data_gene_process) ))
write.csv(data_SNPs_PARS_syn,file=paste0(path,"/data_SNPs_PARS_syn.csv",collapse = NULL),row.names = FALSE)
# 取出nsy,求出SNPs数量和mRNA数量
data_SNPs_PARS_nsy <- sqldf('SELECT * FROM [data_SNPs_PARS_mRNA] where nsy > 0 AND syn == 0' )
dd_SNPs <- rbind(dd_SNPs, data.frame(name="PARS_nsy", SNPs=nrow(data_SNPs_PARS_nsy) ))
data_gene_process <- data_SNPs_PARS_nsy["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
colnames(data_gene_process) <- c("PARS_nsy_gene")
write.csv(data_gene_process,file=paste0(path,"/PARS_nsy_gene.csv",collapse = NULL),row.names = FALSE)
dd_gene <- rbind(dd_gene, data.frame(name="PARS_nsy", gene=nrow(data_gene_process) ))
write.csv(data_SNPs_PARS_nsy,file=paste0(path,"/data_SNPs_PARS_nsy.csv",collapse = NULL),row.names = FALSE)
write.csv(dd_SNPs,file=paste0(path,"/dd_SNPs.csv",collapse = NULL),row.names = FALSE)
write.csv(dd_gene,file=paste0(path,"/dd_gene.csv",collapse = NULL),row.names = FALSE)
|
63473b1b7dd85e6cc8ca41267a6ca4c1250fc389
|
c66763a1b19ccacffe8326cfc375b3bb8f569f12
|
/man/rt_sleep_timer.Rd
|
b56240d5e3e0b8e22ff804d802363a80e1eca93a
|
[
"MIT"
] |
permissive
|
junduck/tswbench
|
8273bc5e618ebf54226693708e01c3c4b6bfd407
|
88c28b4348b0d7ea835d1eb280e24e7cb1c87b49
|
refs/heads/master
| 2023-05-14T11:36:25.328939
| 2021-06-09T16:55:43
| 2021-06-09T16:55:43
| 225,979,888
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 342
|
rd
|
rt_sleep_timer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/realtime_loop.R
\name{rt_sleep_timer}
\alias{rt_sleep_timer}
\title{Default sleep timer for A-share market}
\usage{
rt_sleep_timer(t_now)
}
\arguments{
\item{t_now}{ITime}
}
\value{
time in seconds to sleep
}
\description{
Default sleep timer for A-share market
}
|
ffc6287b7ca5385b5df9a28ca5f0fe31283ed328
|
57f54656b24fd75ee6b7d403bb30f74657bedc41
|
/archive/AGU_analysis/plots.r
|
706a183c648bf601fefc2f99a445ea0d74542e93
|
[] |
no_license
|
kroppheather/synthesis_database
|
95a78338270fea260ee364f81813e63126858de3
|
b34246d99970152456cbc0fda62b8b4df2e0f08d
|
refs/heads/master
| 2021-01-09T20:36:11.505384
| 2020-09-15T20:33:11
| 2020-09-15T20:33:11
| 60,531,994
| 1
| 0
| null | 2017-05-08T14:57:16
| 2016-06-06T13:54:44
|
R
|
UTF-8
|
R
| false
| false
| 10,756
|
r
|
plots.r
|
###################################################################
#########plot the results from the Nvege model for AGU ###########
###################################################################
#read in datafiles
#quantiles
datQ<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_quant.csv")
#means
datS<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_stats.csv")
#data
datSN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegesN_for_model.csv")
datWN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegewN_for_model.csv")
dexps<-"\\[*[[:digit:]]*\\]"
datS$parms<-gsub(dexps,"",datS$X)
datA<-data.frame(parms=datS$parms, means=datS$Mean,ci2.5=datQ[,2], ci97.5=datQ[,6])
#get a list of parameter names
parmS<-unique(datA$parms)
#look at model fit
plot(datWN$n, datA$means[datA$parms=="rep.nW"], pch=19 )
abline(0,1)
fitwn<-lm(datA$means[datA$parms=="rep.nW"]~datWN$n)
summary(fitwn)
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 0.125126 0.009876 12.67 <2e-16 ***
#datWN$n 0.721801 0.020013 36.07 <2e-16 ***
#Residual standard error: 0.08717 on 462 degrees of freedom
#Multiple R-squared: 0.7379, Adjusted R-squared: 0.7374
plot(datSN$n, datA$means[datA$parms=="rep.nS"], pch=19 )
abline(0,1)
fitsn<-lm(datA$means[datA$parms=="rep.nS"]~datSN$n)
summary(fitsn)
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 0.51530 0.02382 21.64 <2e-16 ***
#datSN$n 0.45373 0.02466 18.40 <2e-16 ***
#Residual standard error: 0.1009 on 388 degrees of freedom
#Multiple R-squared: 0.4659, Adjusted R-squared: 0.4645
#now look at regression parameters
#make a panel for the regressions
#where the winter and summer
#parameters are shown for each region in
#a panel
#each season parameter gets a plot
a<-layout(matrix(c(1,2), ncol=2), widths=c(lcm(15),lcm(15)),heights=c(lcm(15),lcm(15)))
layout.show(a)
#now set up sequence for bars
xW<-c(1,3.5,6)
xS<-c(1,3.5,6)
xu<-7.5
xl<--.5
yuE<-1
ylE<--12
#regions are as follows
#1=interior AK
#2= western CA
#3= Greenland + svalbard
layout.show(a)
#plot slope EVI
#winter
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylE,yuE), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xW[i]-1,xW[i]-1,xW[i]+1,xW[i]+1),
c(0,datA$means[datA$parms=="nbeta2W"][i],datA$means[datA$parms=="nbeta2W"][i],0),
col="deepskyblue4")
}
arrows(xW,datA$ci2.5[datA$parms=="nbeta2W"],xW,datA$ci97.5[datA$parms=="nbeta2W"],code=0)
axis(2,seq(-12,10, by=2), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Winter EVI slope parameter", side=2, line=5, cex=2)
#summer
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylE,yuE), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xS[i]-1,xS[i]-1,xS[i]+1,xS[i]+1),
c(0,datA$means[datA$parms=="nbeta2S"][i],datA$means[datA$parms=="nbeta2S"][i],0),
col="mediumseagreen")
}
arrows(xS,datA$ci2.5[datA$parms=="nbeta2S"],xS,datA$ci97.5[datA$parms=="nbeta2S"],code=0)
axis(4,seq(-12,10, by=2), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Summer EVI slope parameter", side=4, line=5, cex=2)
#plot slope OLT
ylO<--.1
yuO<-.1
layout.show(a)
#winter
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylO,yuO), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xW[i]-1,xW[i]-1,xW[i]+1,xW[i]+1),
c(0,datA$means[datA$parms=="nbeta3W"][i],datA$means[datA$parms=="nbeta3W"][i],0),
col="deepskyblue4")
}
arrows(xW,datA$ci2.5[datA$parms=="nbeta3W"],xW,datA$ci97.5[datA$parms=="nbeta3W"],code=0)
axis(2,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Winter organic layer slope parameter", side=2, line=5, cex=2)
#summer
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylO,yuO), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xS[i]-1,xS[i]-1,xS[i]+1,xS[i]+1),
c(0,datA$means[datA$parms=="nbeta3S"][i],datA$means[datA$parms=="nbeta3S"][i],0),
col="mediumseagreen")
}
arrows(xS,datA$ci2.5[datA$parms=="nbeta3S"],xS,datA$ci97.5[datA$parms=="nbeta3S"],code=0)
axis(4,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Summer organic layer slope parameter", side=4, line=5, cex=2)
#plot slope Shrub
ylS<--.1
yuS<-.1
layout.show(a)
#winter
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylS,yuS), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xW[i]-1,xW[i]-1,xW[i]+1,xW[i]+1),
c(0,datA$means[datA$parms=="nbeta4W"][i],datA$means[datA$parms=="nbeta4W"][i],0),
col="deepskyblue4")
}
arrows(xW,datA$ci2.5[datA$parms=="nbeta4W"],xW,datA$ci97.5[datA$parms=="nbeta4W"],code=0)
axis(2,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Winter % shrub cover slope parameter", side=2, line=5, cex=2)
#summer
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylS,yuS), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xS[i]-1,xS[i]-1,xS[i]+1,xS[i]+1),
c(0,datA$means[datA$parms=="nbeta4S"][i],datA$means[datA$parms=="nbeta4S"][i],0),
col="mediumseagreen")
}
arrows(xS,datA$ci2.5[datA$parms=="nbeta4S"],xS,datA$ci97.5[datA$parms=="nbeta4S"],code=0)
axis(4,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Summer % shrub cover slope parameter", side=4, line=5, cex=2)
#plot slope moss
ylM<--.05
yuM<-.05
layout.show(a)
#winter
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylM,yuM), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xW[i]-1,xW[i]-1,xW[i]+1,xW[i]+1),
c(0,datA$means[datA$parms=="nbeta5W"][i],datA$means[datA$parms=="nbeta5W"][i],0),
col="deepskyblue4")
}
arrows(xW,datA$ci2.5[datA$parms=="nbeta5W"],xW,datA$ci97.5[datA$parms=="nbeta5W"],code=0)
axis(2,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Winter % moss cover slope parameter", side=2, line=5, cex=2)
#summer
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(xl,xu),ylim=c(ylM,yuM), xlab=" ", ylab=" ", xaxs="i", yaxs="i",
axes=FALSE)
abline(h=0)
for(i in 1:3){
polygon(c(xS[i]-1,xS[i]-1,xS[i]+1,xS[i]+1),
c(0,datA$means[datA$parms=="nbeta5S"][i],datA$means[datA$parms=="nbeta5S"][i],0),
col="mediumseagreen")
}
arrows(xS,datA$ci2.5[datA$parms=="nbeta5S"],xS,datA$ci97.5[datA$parms=="nbeta5S"],code=0)
axis(4,seq(-.1,.1, by=.01), cex.axis=1.5,las=2)
axis(1, xW, c("Alaska","Western Canada", "Greenland"), cex.axis=1.5)
box(which="plot")
mtext("Summer % moss cover slope parameter", side=4, line=5, cex=2)
###############################################################################
###############################################################################
#make a plot for year random effects in the model
b<-layout(matrix(c(1,2),ncol=1), widths=c(lcm(33),lcm(33)), height=c(lcm(8), lcm(8)))
layout.show(b)
Syear<-data.frame(yearID=seq(1,26), year=seq(1991,2016))
Wyear<-Syear
#plot the random effect for year
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(0,27),ylim=c(-.15,.15), axes=FALSE, xlab=" ", ylab=" ", xaxs="i",
yaxs="i")
for(i in 1:dim(Syear)[1]){
polygon(c(Syear$yearID[i]-.5,Syear$yearID[i]-.5,Syear$yearID[i]+.5,Syear$yearID[i]+.5),
c(0,datA$means[datA$parms=="eps.star"][i],datA$means[datA$parms=="eps.star"][i],0),
col="mediumseagreen")
}
arrows(Syear$yearID,datA$ci2.5[datA$parms=="eps.star"],Syear$yearID,datA$ci97.5[datA$parms=="eps.star"],code=0)
box(which="plot")
axis(2, seq(-.12,.12,by=.04),las=2,cex.axis=1.25)
mtext("Random effect", side=2,cex=2,line=5)
text(3,-.13, "Summer", cex=3)
#now for winter
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", xlim=c(0,27),ylim=c(-.25,.25), axes=FALSE, xlab=" ", ylab=" ", xaxs="i",
yaxs="i")
for(i in 1:dim(Wyear)[1]){
polygon(c(Wyear$yearID[i]-.5,Wyear$yearID[i]-.5,Wyear$yearID[i]+.5,Wyear$yearID[i]+.5),
c(0,datA$means[datA$parms=="epsW.star"][i],datA$means[datA$parms=="epsW.star"][i],0),
col="deepskyblue4")
}
arrows(Wyear$yearID,datA$ci2.5[datA$parms=="epsW.star"],Wyear$yearID,datA$ci97.5[datA$parms=="epsW.star"],code=0)
axis(1, Wyear$yearID, Wyear$year, cex.axis=1.25)
axis(2, seq(-.2,.2,by=.05),las=2,cex.axis=1.25)
box(which="plot")
mtext("Year", side=1,cex=2,line=3)
mtext("Random effect", side=2,cex=2,line=5)
text(3,-.21, "Winter", cex=3)
##################################################################################
##########Now plot overall N factors #############################################
##################################################################################
#read in summer and winter data
datSN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\SummerNvege.csv")
datWN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\winterNvege.csv")
#exclude site with impossible air temperature
datSN<-datSN[datSN$n<3,]
#exclude all depths greater than 10
datSN<-datSN[datSN$depth<10,]
datWN<-datWN[datWN$depth<10,]
#get the number of observations
nSn<-aggregate(datSN$n, by=list(datSN$Snew_vA), FUN="length")
nWn<-aggregate(datWN$n, by=list(datWN$new_vA), FUN="length")
#make box plots
par(mfrow=c(1,2))
par(mai=c(1,1,.5,.5))
plot(datWN$new_vA,datWN$n, col="deepskyblue4", axes=FALSE, ylim=c(0,1.6), pch=19, yaxs="i")
axis(1, c(-1,1,2,5),c(NA,"boreal","tundra", NA),cex.axis=1.5)
axis(2, seq(-.3,1.8, by=.3), las=2,cex.axis=1.5 )
mtext("Winter n factor", side=2, line=3, cex=2.5)
mtext("Biome", side=1, line=3, cex=2.5)
text((seq(1,2)+.3),rep(1.4,2),paste("n =",nWn$x), cex=2, font=3)
par(mai=c(1,1,.5,.5))
plot(datSN$Snew_vA,datSN$n, col="mediumseagreen",axes=FALSE, ylim=c(0,1.6), pch=19, yaxs="i")
axis(1, c(-1,1,2,5),c(NA,"boreal","tundra", NA),cex.axis=1.5)
axis(2, seq(-.3,1.8, by=.3), las=2,cex.axis=1.5 )
mtext("Summer n factor", side=2, line=3, cex=2.5)
mtext("Biome", side=1, line=3, cex=2.5)
text((seq(1,2)+.3),rep(1.4,2),paste("n =",nSn$x), cex=2, font=3)
|
1c5c858c79278b3ed9d7773fc3be33b5cfd3eae6
|
36c215bb36f3a2e7c610a64ff3a327df0adf777b
|
/slice.R
|
98b83f4e4962a89b33c7600b3cd8df006a9e3ac3
|
[] |
no_license
|
mcewenkhundi/miscellaneous
|
c575d111e0d1eadbe4add61fac768bc9452f6f0a
|
32651dc1729d8ce3f1765c34f4c9322c8038d47d
|
refs/heads/master
| 2022-08-29T22:25:14.105053
| 2022-08-19T08:56:40
| 2022-08-19T08:56:40
| 151,368,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,760
|
r
|
slice.R
|
#Subset rows using their positions
#https://dplyr.tidyverse.org/reference/slice.html
library(tidyverse)
mtcars <- mtcars
head(mtcars)
mtcars %>% slice(1L)
tail(mtcars)
mtcars %>% slice(n())
mtcars %>%
slice(5:n())
#Rows can be dropped with negative indices
slice(mtcars, -(1:4))
# First and last rows based on existing order
mtcars %>% slice_head(n = 5)
mtcars %>% slice_tail(n = 5)
# Rows with minimum and maximum values of a variable
mtcars %>% slice_min(mpg, n = 5)
mtcars %>% slice_max(mpg, n = 5)
# slice_min() and slice_max() may return more rows than requested
# in the presence of ties. Use with_ties = FALSE to suppress
mtcars %>% slice_min(cyl, n = 1)
mtcars %>% slice_min(cyl, n = 1, with_ties = FALSE)
# slice_sample() allows you to random select with or without replacement
mtcars %>% slice_sample(n = 5)
mtcars %>% slice_sample(n = 5, replace = TRUE)
# Group wise operation ----------------------------------------
df <- tibble(
group = rep(c("a", "b", "c"), c(1, 2, 4)),
x = runif(7)
)
# All slice helpers operate per group, silently truncating to the group
# size, so the following code works without error
df %>% group_by(group) %>% slice_head(n = 2)
# When specifying the proportion of rows to include non-integer sizes
# are rounded down, so group a gets 0 rows
df %>% group_by(group) %>% slice_head(prop = 0.5)
# Filter equivalents --------------------------------------------
# slice() expressions can often be written to use `filter()` and
# `row_number()`, which can also be translated to SQL. For many databases,
# you'll need to supply an explicit variable to use to compute the row number.
filter(mtcars, row_number() == 1L)
filter(mtcars, row_number() == n())
filter(mtcars, between(row_number(), 5, n()))
|
f534e5315083fd40ef41356ee31022dbc2089365
|
9a43ed375f522bd9a696c4e83c00d9dce5ac0965
|
/ai/TimeseriesR.R
|
6269ea52b150c895bced00461200b6f924ae981e
|
[
"MIT"
] |
permissive
|
flyingabove/RobinhoodTrader
|
3dbbf9966595f40a1c39944e0233cb5c42af76ad
|
a85cc6c69dd644331e05253421cf5b3cb7f86b81
|
refs/heads/master
| 2020-03-10T20:02:12.404262
| 2018-04-14T23:17:42
| 2018-04-14T23:17:42
| 129,561,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,639
|
r
|
TimeseriesR.R
|
###################
#Christian Gao Hw2#
###################
###########
#Problem 1#
###########
load("hw1.Rdata")
#a) Annual Plot, Seasonal Boxplot
chocolate<- read.delim("cbe.dat")[1]
choc.ts <- ts(chocolate, start = 1958, freq = 12)
par(mfrow=c(3,1))
plot(choc.ts, main = "Chocolate")
plot(aggregate(choc.ts), main = "Aggregated Annual Series")
boxplot(choc.ts ~ cycle(choc.ts),main = "Seasonal Boxplot")
#b) Seasonal Decomp Multiplicative
plot(decompose(choc.ts))
#c) ACF of decomposed elements
chocolate.decom = decompose(choc.ts, "multiplicative")
choc.trend.ts = chocolate.decom$trend
choc.seasonal.ts = chocolate.decom$seasonal
choc.random.ts = chocolate.decom$random
par(mfrow=c(2,2))
acf(choc.ts, na.action = na.pass, main = "original acf")
acf(ts(choc.trend.ts), na.action = na.pass, main = "trend acf")
acf(choc.seasonal.ts, na.action = na.pass, main = "seasonal acf")
acf(choc.random.ts, na.action = na.pass, main = "random acf")
#d)
###########
#Problem 2#
###########
#a.)
par(mfrow=c(2,1))
wine.ts.1 = ts(c(39, 35, 16, 18, 7, 22, 13, 18, 20, 9, -12, -11, -19, -9, -2, 16))
plot(wine.ts.1, ylab = "volume", main = "Serendipity Vineyard")
wine.ts.2 = ts(c(47, -26, 42, -10, 27, -8, 16, 6, -1, 25, 11, 1, 25, 7, -5, 3))
plot(wine.ts.2, ylab = "volume", main = "Cagey Vineyard")
#b.) lag plot
par(mfrow=c(1,1))
plot(wine.ts.1[-16], wine.ts.1[-1], main = "Wine 1 Lag 1 scatter", xlab = "x_t", ylab = "x_t+1")
plot(wine.ts.2[-16], wine.ts.2[-1], main = "Wine 2 Lag 1 scatter", xlab = "x_t", ylab = "x_t+1")
#c.)
acf(wine.ts.1, na.action = na.pass, main = "wine 1 acf")
acf(wine.ts.2, na.action = na.pass, main = "wine 2 acf")
#d.)
#I would trust Cagey Chardonnay. Cagey Chardonnay is decreasing in variation overtime and more predictable.
###########
#Problem 3#
###########
#a.)
par(mfrow=c(1,1))
global<- read.delim("global.dat")
global.ts <- ts(global, start = 1856,fr = 12)
global.decomposed <- decompose(global.ts, "multiplicative")
plot(global.decomposed)
sd(global.ts - global.decomposed$trend, na.rm = TRUE)
sd(global.decomposed$random, na.rm = TRUE)
plot(global.decomposed$trend)
boxplot(global.ts ~ cycle(global.ts),main = "Seasonal Boxplot")
#Seasonal Adjustment effective
#b.)
acf(global.decomposed$random, na.action = na.pass, main = "Global")
#c.)
global.diff = ts(global.ts[-1] - global.ts[-length(global.ts)],start = 1856,fr = 12)
global.decomposed.diff<-decompose(global.diff)
plot(global.decomposed.diff)
#Yes seems
###########
#Problem 4#
###########
#a.)
ksi.ts = ts(read.csv("ksi.txt"), start = c(1981, 1), end = c(1984, 12), fr = 12)
ksi.decomposed <- decompose(ksi.ts, "multiplicative")
plot(ksi.decomposed$trend, main = "KSI", ylab = "accidents")
#Yes seems like it helps
#b.)
ksi.ts.ds = read.csv("ksi2.txt")
###########
#Problem 6#
###########
#a.)
set.seed(123456)
y = arima.sim(n=100, list(ar=0.1),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
#b.)
set.seed(123456)
y = arima.sim(n=100, list(ar=-0.4),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
#c.)
set.seed(123456)
y = arima.sim(n=100, list(ar=0.4),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
#d.)
set.seed(123456)
y = arima.sim(n=100, list(ar=0.9),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
#e.)
set.seed(123456)
y = arima.sim(n=100,list(ar=0.4, ma=-0.3),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
#f.)
set.seed(123456)
y = arima.sim(n=100, list(ar=c(0.3,-0.4),ma=c(-0.3,0.7)),innov=rnorm(100))
op=par(no.readonly=TRUE)
layout(matrix(c(1,1,2,3),2,2,byrow=TRUE))
plot.ts(y,ylab=" ", main="model formula")
acf(y,main="Autocorrelations",ylab="",ylim=c(-1,1))
pacf(y, main="Partial Autocorrelations",ylab="",ylim=c(-1,1))
|
22dbe3716548e23f3c757c4eecddeb053ddb5191
|
e41bbd233fd878a14a6e2e8e6556a6f25d9d92ab
|
/man/diffExamples.Rd
|
ed6d00c7a977763d7c52f110e53f3e26f528ca0f
|
[] |
no_license
|
cran/RatingScaleReduction
|
a8a806f3d4d1bf1379d530c86efb55d3089f2b66
|
e34c7642fb50180918344e05e508762d5e4df353
|
refs/heads/master
| 2021-06-14T09:03:11.637982
| 2021-01-21T08:30:02
| 2021-01-21T08:30:02
| 81,642,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,251
|
rd
|
diffExamples.Rd
|
\name{diffExamples}
\alias{diffExamples}
\title{The number of different (unique) examples in a dataset}
\description{Datasets often contain replications. In particular,
one example may be replicated n times, where n is the total
number of examples, so that there are no other examples.
Such situation would deviate computations and should be early detected.
Ideally, no example should be replicated but if the rate is small,
we can progress to computing AUC.}
\usage{
diffExamples(attribute)
}
\arguments{
\item{attribute}{ a matrix or data.frame containing attributes}}
\value{
\item{total.examples}{a number of examples in a data}
\item{diff.examples}{a number of different examples in a data}
\item{dup.exapmles}{a number of duplicate examples in a data}
}
\author{Waldemar W. Koczkodaj, Feng Li,Alicja Wolny-Dominiak
}
\examples{
#creating the matrix of attributes and the decision vector
#must be as.numeric()
data(aSAH)
attach(aSAH)
is.numeric(aSAH)
attribute <-data.frame(as.numeric(gender),
as.numeric(age), as.numeric(wfns), as.numeric(s100b), as.numeric(ndka))
colnames(attribute) <-c("a1", "a2", "a3", "a4", "a5")
#show the number of different examples
diffExamples(attribute)
}
|
0d13714d4b627de6953fa12228b9e663431a0989
|
efe5e7be31569208c415bbaceeb653de1d1e2464
|
/weatherMal/server.R
|
3af5c5a5a013fddd98a512dc3fd3b8995d7f8180
|
[] |
no_license
|
ck2136/malaria2k17
|
2543d14472c53fec56c71d3704f3574526184bb9
|
36b5e31be72b7b411b46ebb77e9ea2c6e7357db5
|
refs/heads/master
| 2021-08-24T03:35:31.379969
| 2017-12-07T22:28:58
| 2017-12-07T22:28:58
| 111,337,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
server.R
|
library(shiny)
library(lattice) # for graphics
library(RColorBrewer)
library(sp)
library(maptools)
library(latticeExtra)
library(rgdal)
library(tmap)
library(ggplot2)
library(dplyr)
library(ggpubr)
library(plotly)
# load data
setwd("/srv/shiny-server/malaria_2k17")
final <- readRDS("./data/final.rds")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
lab <- reactive({
y=c()
if(input$vars == "tavg") {
y="Daily Average Temp (C)"
}
if(input$vars == "raintot") {
y="Daily Rainfall (mm)"
}
if(input$vars == "rh") {
y="Relative Humidity (%)"
}
if(input$vars == "sd") {
y="Saturation Vapor\n Pressure Deficit (mmHg)"
}
if(input$vars == "psfc") {
y="Surface Barometric\n Pressure (hPa)"
}
y
})
#
output$distPlot <- renderPlotly({
theGraph <- ggplot(final) +
geom_smooth(aes(x = get(input$vars), y = inc1k, colour="No Lag")) +
geom_smooth(aes(x = get(paste0(input$vars,"_lag_2")), y = inc1k, colour="Lag 2wk")) +
geom_smooth(aes(x = get(paste0(input$vars,"_lag_4")), y = inc1k, colour="Lag 4wk")) +
geom_smooth(aes(x = get(paste0(input$vars,"_lag_8")), y = inc1k, colour="Lag 8wk")) +
# scale_colour_manual(name=lab(), values=c("blue", "red", "green", "yellow")) +
xlab(lab()) +
ylab("Incidence per 1000")
print(ggplotly(theGraph) %>%
layout( legend=list(orientation = 'h',
xanchor = "center",
yanchor = "bottom",
x = 0.5) ))
})
})
|
5df83b7cd1c4b4827266e6a77f34e1de9d76c135
|
c8b538781e646c517518691c425ee72e7d1aef0e
|
/System Simulation Projects/Data-Analysis-R/auto-correlation.R
|
a0d46ef79a6b6b3288e2b84f2c2bd882bea29219
|
[] |
no_license
|
alprnbg/cs-course-projects
|
532dbff5fe1cc4c9d07a577305bd7f8c61f8ed00
|
3d5bdd01d089eed649e914462e2418c8d15d35cb
|
refs/heads/master
| 2023-01-18T17:06:09.804720
| 2020-06-23T09:55:21
| 2020-06-23T09:55:21
| 244,027,566
| 0
| 0
| null | 2022-12-22T03:38:38
| 2020-02-29T19:18:50
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 249
|
r
|
auto-correlation.R
|
file_path = ''
data <- read_xls(file_path)
# Auto correlation function
acf(data$`Day 1`, lag.max = 2, plot = TRUE)
acf(data$`Day 1`, lag.max = 2, plot = FALSE)
acf(data$`Day 2`, lag.max = 2, plot = TRUE)
acf(data$`Day 2`, lag.max = 2, plot = FALSE)
|
1333eec7c47924d90b63c7745aa0ea3b21e7cd0c
|
839a85ac4cd9b38017ace1ffaa5c0a294d759c08
|
/Aug 25, 2021 R Document.R
|
978ce8c2bd5c2e67e46f297c58a66d9d6c227c24
|
[] |
no_license
|
JhordynJones/Aug-23-2021-R-Document
|
35d21eb896ff648b4efb11ebb4cb72d9affe3b2b
|
84386d1a2003d24602622a65cff457e0e35cb000
|
refs/heads/main
| 2023-08-17T10:36:28.400655
| 2021-09-27T16:43:33
| 2021-09-27T16:43:33
| 402,130,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
Aug 25, 2021 R Document.R
|
# Author: Jhordyn Jones, Date: Aug 23, 2021, Purpose: Calculating Pearson correlation coefficient value
# Install the library "ggpubr"
if(!require(devtools)) install.packages("devtools") devtools::install_github("kassambara/ggpubr")
# Loading this Libraary
library("ggpubr")
|
9fa1ebf3847f7371bbb4ad284821789809ac96ac
|
a7635ddc182075b0b9e195b26ac7ffa4264a79cc
|
/tests/testthat/test_get_results_error.R
|
46db5468c69496ea6681c659d00952b876d7f40b
|
[] |
no_license
|
BiostatQian/ASAFE
|
e55924885db0c8be2895fa0e6a71ae49174491ca
|
c6826e4ce9f81da82702f9213d224a6b0f8ff4ad
|
refs/heads/master
| 2021-01-24T21:42:07.655843
| 2018-03-27T17:31:15
| 2018-03-27T17:31:15
| 47,431,269
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
test_get_results_error.R
|
test_that("TESTING get_results_error()", {
n_ind <- 3
n_markers <- 2
print(paste("n_ind = ", n_ind))
print(paste("n_markers = ", n_markers))
anc_spec_freqs <- matrix(c(1, 1, 1,
0, 0, 0,
0.5, 0.5, 0.5), nrow = 3, ncol = 3, byrow = TRUE)
print("anc_spec_freqs = ")
print(anc_spec_freqs)
ancestries_matrix <- matrix(rep(c(0,0,1,1,2,2), times = 2),
nrow = 2 * n_ind, ncol = n_markers)
print("ancestries_matrix = ")
print(ancestries_matrix)
print("get_results_error(error_rate = 0,
anc_spec_freqs, ancestries_matrix_true) = ")
print(get_results_error(error_rate = 0,
anc_spec_freqs,
ancestries_matrix_true = ancestries_matrix))
print("get_results_error(error_rate = 1,
anc_spec_freqs, ancestries_matrix_true) = ")
print(get_results_error(error_rate = 1,
anc_spec_freqs,
ancestries_matrix_true = ancestries_matrix))
})
|
eba7c822e5f067cfffe30af6cda8961816529592
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/misc/multi-logit/fit.R
|
30523f65e272fed92afdbf8349ad2eef527a88f4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 110
|
r
|
fit.R
|
library(rstan);
fit <- stan("multi_logit.stan", data=c("K","D","N","x","y"),
chains=4, iter=500);
|
299ec1746897939ea6728d96bb1fe85c60ffa5d9
|
b06ada71be740e288545cd95da40eb213a958987
|
/yahoo_stock_transaction.R
|
8c96bbe30e55b05eaac0f0a23d9de628c219ad4f
|
[] |
no_license
|
KuiMing/RCrawler101-201605
|
96f40a399469423e68079df3c269480fef83906d
|
75addd132c5c1aa6da5faa7ed0e1d0d1aa18abbe
|
refs/heads/master
| 2021-01-21T15:12:29.639323
| 2018-11-21T13:29:26
| 2018-11-21T13:29:26
| 58,893,337
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
yahoo_stock_transaction.R
|
library(httr)
library(rvest)
library(magrittr)
url <- 'https://tw.stock.yahoo.com/q/ts?s=3008&t=50'
transaction <- GET(url) %>%
content("text",encoding="big5") %>%
read_html() %>%
html_table(fill=TRUE) %>%
.[[8]] %>%
.[-1,] %>%
`colnames<-`(c("time","buy","sell","deal",
"change",'volume')) %>%
mutate(deal=as.numeric(deal),
volume=as.numeric(volume))
|
b97aa792e4f5eb349362450f903fe24633745485
|
d4599d2a5faeaa5e40994b8486e6becc59141fe1
|
/man/acum.Rd
|
d4986c72bdb89f07dacc8751d79c49edb242c8cb
|
[] |
no_license
|
Allisterh/forecast-pimfc
|
6a61c568768a792babc49ba1f27cc89997c63cfa
|
928986ec4932e7247fff857da58e53733ee00cd4
|
refs/heads/master
| 2023-03-16T23:02:45.366133
| 2017-03-13T23:35:45
| 2017-03-13T23:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 429
|
rd
|
acum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/di.R
\name{acum}
\alias{acum}
\title{Acumula series em percentual ao mes em m meses}
\usage{
acum(x, m = 12)
}
\arguments{
\item{x}{A time series univariate}
\item{m}{number of monthes}
}
\value{
A time series univariate
}
\description{
Transforma uma serie mensal dada em percentual ao mes
em uma serie mensal com percentual nos ultimos m meses
}
|
dbcc6c09f6c1905f24c57cc5bbf5151f9c322962
|
ab4da977e6ffde05be2559e81ce4ad3032041dff
|
/man/lt_attgt.Rd
|
c60972866662687d79e3be2d4e6e73ae72f1a858
|
[] |
no_license
|
bcallaway11/ife
|
59a32983c03c69079666a1c0d598e21b8ef30778
|
c3d1177bcf69a6b92ab162435553930be796c80f
|
refs/heads/master
| 2023-08-28T13:15:55.338660
| 2023-08-15T21:38:05
| 2023-08-15T21:38:05
| 387,966,129
| 16
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 898
|
rd
|
lt_attgt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lt_attgt.R
\name{lt_attgt}
\alias{lt_attgt}
\title{lt_attgt}
\usage{
lt_attgt(gt_data, xformla = ~1, anticipation = 0, ...)
}
\arguments{
\item{gt_data}{data frame that is local to the specific groups
and times for which we'll be computing a treatment efect
estimate.}
\item{xformla}{Formula for which covariates to include in the model. Default is ~1.}
\item{anticipation}{Number of periods that treatment is anticipated. Default
is 0. This is in ``periods''; e.g., code will work in time periods are
equally spaced but 2 years apart. In this case, to allow for treatment
anticipation of 2 year (<=> 1 period), set \code{anticipation = 1}.}
\item{...}{extra arguments}
}
\value{
\code{attgt_if} object
}
\description{
Computes estimates of ATT(g,t) using a linear trends model
for untreated potential outcomes
}
|
943248834327c971203f8b588c6a3b909f9fefc9
|
370da857b20dc8c00e78f5b24de5070246069cae
|
/01 BasicAnswers.R
|
a038c84ce10e9d46bf4819d6b0ac91b6c2511b37
|
[] |
no_license
|
CodyStumpo/MLB-Extra-Inning-Rule-Change
|
2f45d68fb9f4430e15a7d84554cd2f616b7008b9
|
2f7053f67613bfab7746759f322f3d15a78fe166
|
refs/heads/master
| 2021-01-22T06:48:51.612886
| 2017-02-13T06:14:23
| 2017-02-13T06:14:23
| 81,790,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,723
|
r
|
01 BasicAnswers.R
|
setwd("~/Documents/Code/mlb-hackathon-2017/")
library(readr)
#rawData = read.csv("2016-WS.csv")
rawData2016 = read_csv("2016.csv")
rawData2015 = read_csv("2015.csv")
rawData2014 = read_csv("2014.csv")
rawData=rbind(rawData2014,rawData2015,rawData2016)
rm(rawData2014, rawData2015, rawData2016)
library("dplyr")
library(ggplot2)
#here's an idea, what if last out from previous inning starts on second base in extra innings.
#expectation of outcomes & time saved. simulate it out. not just expectations.
# how does home team wpct compare with this rule to actual?
rawData = rawData %>% select(gameString,inning, side, balls, strikes, outs, manOnFirst, manOnSecond, manOnThird,
runsHome, visitingTeamFinalRuns, homeTeamFinalRuns)
## what happened in extra inning games?
xinning= unique(rawData %>%
filter(inning>9) %>%
select (gameString)
)
totalGames=nrow(unique(rawData %>% select(gameString)))
howOftenExtra = nrow(xinning)/totalGames #8.7%
#get last record of each xinning game, record # of innings & winner
xinning$innings=9
xinning$winner='H'
for(i in 1:nrow(xinning)){
game=xinning[i,]
record=rawData %>% filter(gameString==game$gameString) %>% tail(n=1)
xinning$innings[i]=record$inning
if(record$visitingTeamFinalRuns>record$homeTeamFinalRuns) xinning$winner[i]='V'
}
#### 1. are extra innings like other innings? similar distribution of runs?
runTable=rawData %>% select(gameString, inning, side, runsHome) %>%
group_by(gameString, inning, side) %>% summarise(runs=sum(runsHome, na.rm=TRUE))
numRegInnings=nrow(subset(runTable, inning<10))
numExtraInnings=nrow(subset(runTable, inning>9))
whatPctInningsExtra=numExtraInnings/(numExtraInnings+numRegInnings) #2.2%
r=table(subset(runTable, inning<10)$runs)
x=table(subset(runTable, inning>9 & side=='T')$runs) #ignore B because walkoffs
r=r/sum(r)
x=x/sum(x)
runDist=data.frame(regulation=r[1:6]*100, extra=x[1:6]*100) #yes, they are quite similar
#### 2. what happens now in extra innings games
empiricalXInningDist=100*table(xinning$innings)/nrow(xinning)
#### 1-alt. what is the distribution of runs in full innings (now)
fullRunDist=100*table(runTable$runs)/nrow(runTable)
### 3. try to simulate today's extra inning length
simlength=10000
indexT=sample(1:nrow(runTable), simlength, replace=TRUE)
indexB=sample(1:nrow(runTable), simlength, replace=TRUE)
t=runTable[indexT,"runs"]
b=runTable[indexB,"runs"]
v=sum(t>b)
h=sum(b>t)
i=1
inningsVector=as.numeric()
innings=1
while(i < simlength){
if(b[i,]==t[i,]) innings=innings+1
else {inningsVector=append(inningsVector,innings); innings=1}
i=i+1
}
SimXInningDist=100*table(inningsVector+9)/length(inningsVector)
plot(cumsum(empiricalXInningDist), col="blue", type = "l",
xlab="Extra Innings", ylab="Chance Game Over After X Extra Innings", main="Extra Inning Game Length Under Current Rules")
lines(cumsum(SimXInningDist), col="red")
legend("bottomright",
c("Empirical","Simulated"), # puts text in the legend
lty=c(1,1), # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5),col=c("blue","red"))
### 4. what happens with man on second, no outs?
##pull similar innings to simulate from
situations = unique(rawData %>%
filter(manOnFirst=='false' & manOnSecond=='true' & manOnThird=='false' &
balls==0 & strikes==0 & outs==0) %>%
select (gameString, inning, side)
)
situations$runs=0
for (i in 1:nrow(situations)){
gameInning=situations[i,]
records=rawData %>% filter(gameString==gameInning$gameString & inning==gameInning$inning & side==gameInning$side )
# find first instance of situation in this inning, then sum runs scored from that pitch on
countFromHere = min(which(records$manOnFirst=='false' & records$manOnSecond=='true' & records$manOnThird=='false' &
records$balls==0 & records$strikes==0 & records$outs==0 ))
recordsToCount=records[countFromHere:nrow(records),]
runsFromThere=sum(recordsToCount$runsHome, na.rm=TRUE)
situations$runs[i]=runsFromThere
}
#this takes ~8 minutes on my laptop, write to disk
write_csv(situations, "situations.csv")
manOn2RunDist=100*table(situations$runs)/nrow(situations)
simlength=10000
t=situations %>% filter(side=='T') %>% select(runs)
b=situations %>% filter(side=='B') %>% select(runs)
indexT=sample(1:nrow(t), simlength, replace=TRUE)
indexB=sample(1:nrow(b), simlength, replace=TRUE)
t=t[indexT,]
b=b[indexB,]
v=sum(t>b)
h=sum(b>t)
i=1
newInningsVector=as.numeric()
innings=1
while(i < simlength){
if(b[i,]==t[i,]) innings=innings+1
else {newInningsVector=append(newInningsVector,innings); innings=1}
i=i+1
}
NewSimXInningDist=100*table(newInningsVector+9)/length(newInningsVector)
plot(cumsum(empiricalXInningDist), col="blue", type = "l",
xlab="Extra Innings", ylab="Chance Game Over After X Extra Innings", main="Impact of Rule Change on Extra Inning Game Length")
lines(cumsum(NewSimXInningDist), col="red")
legend("bottomright",
c("Current","Proposed"), # puts text in the legend
lty=c(1,1), # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5),col=c("blue","red"))
## expected extra innings?
expectedSimXtraInnings = crossprod(SimXInningDist[1:8]/100, seq(8))
expectedNewSimXtraInnings = crossprod(NewSimXInningDist[1:8]/100, seq(8))
expectedishXtraInnings = crossprod(empiricalXInningDist[1:8]/100, seq(8))
expectedXtraInnings = numExtraInnings / nrow(xinning) /2
#what percent of innings would now be extra?
numNewExtraInnings = nrow(xinning) * expectedNewSimXtraInnings * 2
whatPctInningsNowExtra=numNewExtraInnings/(numNewExtraInnings+numRegInnings) #1.4%
|
c797f526acf762893f2b92f92d738d8ae1337e82
|
33657200e7557bde9ef28fc172e12463183ef3ab
|
/walmart.r
|
a696e860b8143352684bff898c2e302811bc1834
|
[] |
no_license
|
kalyaniram88/walmart-gif
|
49e787dce1834097a146bd74b93985c436d1b93a
|
137d4fa7c89f9029d29db16f2e9040c37349a19a
|
refs/heads/master
| 2016-09-06T01:45:36.781751
| 2015-09-02T22:22:44
| 2015-09-02T22:22:44
| 20,395,573
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,116
|
r
|
walmart.r
|
library(maps)
library(fields) #for US() map, which is pretty straightforward than map() as its used minimally
#open the stores opened dataset, rename
#to get the the date into year, month and day using strsplit()
d<-read.csv(‘data/store_openings.csv’)
names(d)=c(“store.number”,”opendate”,”date.super”,”conversion”,”fips_state”,”county”,”street.address”,”store.city”,”store.state”,”zipcode”,”type.of.store”)
opendate_d<-t(simplify2array(strsplit(as.character(d$opendate),”/”)))
opendate_d<-array(as.integer(opendate_d),dim=c(nrow(opendate_d),3))
#open zipcode with which the point shall be plotted, exclude the below regions
zips<-read.csv(‘data/zipcodes/zipcode.csv’)
zips<-zips[zips$state!=”HI”,]
zips<-zips[zips$state!=”PR”,]
zips<-zips[zips$state!=”AS”,]
zips<-zips[zips$state!=”VI”,]
zips<-zips[zips$state!=”AK”,]
new_stores_in_months<-numeric(diff(range(opendate_d[,3]))*12)
mcount<-2
stores_list<-as.list(rep(1,diff(range(opendate_d[,3]))*12+1))
stores_list[[1]]<-NA
for(year in min(opendate_d[,3]):max(opendate_d[,3]) )
{
for(month in 1:12)
{
new_stores_in_months[mcount-1]<-(sum(opendate_d[,1]==month & opendate_d[,3]==year))
stores_list[[mcount+1]]<-c(stores[[mcount]],which(opendate_d[,1]==month & opendate_d[,3]==year))
mcount<-mcount+1
}
}
index_zip<-numeric(length(stores_list[[542]]))
for( i in stores_list[[542]] )
{
if(d$zipcode[i] %in% zips$zip)
index_zip[i]<-which(zips$zip==d$zipcode[i])
else
index_zip[i]<-NA
}
total.scores<-1
i<-1
for(year in min(opendate_d[,3]):max(opendate_d[,3]) )
{
for(month in 1:12)
{
png(paste(year,’_’,100+month,’.png’,sep=”),width=750,height=500)
US(main=’new stores of walmart’,col=”red”,bg=”grey”,namesonly=T)
points(zips$longitude[index_zip[1:(total.scores+new_stores_in_months[i])] ],
zips$latitude[index_zip[1:(total.scores+new_stores_in_months[i])]],
pch=19,col=”green”)
points(zips$longitude[index_zip[total.scores:(total.scores+new_stores_in_months[i])] ],
zips$latitude[index_zip[total.scores:(total.scores+new_stores_in_months[i])] ],
col=’blue’,
pch=19,lwd=1.5)
text(-120,29,paste(‘Year: ‘,year),col=’red’,cex=1)
text(-120,28,paste(‘Openings: ‘,new_stores_in_months[i]+1),col=’red’,cex=1)
text(-120,27,paste(‘Total stores: ‘,total.scores),col=’red’,cex=1)
total.scores<-total.scores+new_stores_in_months[i]
i<-i+1
dev.off()
}
}
#after installing imagemagick on your computer, convert the png to gif by using the system()
#when working on linux will help overcome the posix issues.
#mencoder or ffmpeg are the alternatives if working on linux than imagemagick
system(‘”E:/Program/ImageMagick-6.8.9-Q16/convert.exe” *.png example.gif’,intern=T)
logfile<-file.remove(list.files(pattern=”.png”)
|
651cf9554679ef3251447309e98f65f32929ab72
|
45e9081e8ed8a0d117331e020e63f4cc7b7a1020
|
/open_click_models/data_wrangling_open_click.R
|
2ccd0372893960fb8f735c35d939bb284a33e948
|
[] |
no_license
|
NCSU-STATCOM/activate-good-emails
|
009f7f2cedef77778c6af8d0172740ac08b68a22
|
d6100aeeee9d540d6398805655b98f84866c3f79
|
refs/heads/main
| 2023-07-02T15:24:26.908348
| 2021-08-06T13:37:30
| 2021-08-06T13:37:30
| 320,062,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,458
|
r
|
data_wrangling_open_click.R
|
library(lubridate)
# load weeklies1 to merge in the number of characters in subject
load("initial_report/weeklies1.RData")
subscriber_agg <- read.csv("newsletters/csv/intermediate/all_files_week_agg.csv", stringsAsFactors = FALSE)
subscriber_agg$date_sent <- as.POSIXct(subscriber_agg$date_sent, tz = "America/New_York", format = "%Y-%m-%d %H:%M:%S")
# include number of characters in the subject, as well as other covariates from
# subject_summary_stats.csv
# in order to merge weeklies1$subject_length into subscriber_agg, I need a shared variable
# i.e. the date without the timestamp
weeklies1$date <- format(weeklies1$datetime, format = "%Y-%m-%d")
subscriber_agg$date <- format(subscriber_agg$date_sent, format = "%Y-%m-%d")
sub_agg_merged <- merge(subscriber_agg, subset(weeklies1,
select = c("subject", "covid", "season", "mins_since_midnight",
"subject_length", "date")),
by = "date")
# construct 2 dataframes, one for the open model, the other for the click model
# first, for the open model:
subscriber_open <- subset(sub_agg_merged, select = c("date_sent", "subscriberid", "covid", "season",
"mins_since_midnight", "subject_length",
"week_open", "clicks"))
subscriber_open$week_open[sub_agg_merged$unsubscribes == 1] <- 0
# make week_open, subscriberid, season and covid into factors
subscriber_open$week_open <- as.factor(subscriber_open$week_open)
subscriber_open$subscriberid <- as.factor(subscriber_open$subscriberid)
subscriber_open$covid <- factor(subscriber_open$covid, levels = c("Before", "After"))
subscriber_open$month <- month(subscriber_open$date_sent)
subscriber_open$season <- factor(subscriber_open$season, levels = c("Winter", "Spring",
"Summer", "Fall"))
subscriber_open$season_num <- as.numeric(subscriber_open$season)
# making days_since_start variable because the gamm function needs numeric rather than date
study_start <- as.POSIXct("2019-01-01", tz = "America/New_York", format = "%Y-%m-%d")
subscriber_open$days_since_start <- as.numeric(subscriber_open$date_sent - study_start)
save(subscriber_open, file = "open_click_models/subscriber_open.RData")
# now, for the click model:
load("open_click_models/subscriber_open.RData")
# load pt_and_html_df to merge in plain-text and html info
load("newsletters/pt_and_html_df.RData")
# in order to merge pt_and_html_df into subscriber_open, I need a shared variable
# i.e. the date without the timestamp
pt_and_html_df$date <- as.character(as.POSIXct(substr(pt_and_html_df$doc_id, start = 11, stop = 20),
format = "%m-%d-%Y"))
subscriber_open$date <- format(subscriber_open$date_sent, format = "%Y-%m-%d")
sub_agg_merged2 <- merge(subscriber_open, subset(pt_and_html_df,
select = c("num_words", "num_links", "num_clickable_pics",
"num_unclickable_pics", "date")),
by = "date")
subscriber_clicks <- subset(sub_agg_merged2, select = c("date_sent", "subscriberid", "covid",
"mins_since_midnight", "subject_length",
"clicks", "days_since_start", "num_words",
"num_links", "num_clickable_pics",
"num_unclickable_pics"))
save(subscriber_clicks, file = "open_click_models/subscriber_clicks.RData")
# exploration and checking
all.equal(sort(weeklies1$date), sort(unique(subscriber_agg$date)))
# week_open and opened are actually similar anyways
table(sub_agg_merged$neither, sub_agg_merged$week_open)
# small proportion of bounces, and can still open even if bounced, so I decided
# to ignore bounces
table(sub_agg_merged$bounces, sub_agg_merged$week_open)
mean(sub_agg_merged$bounces)
# Seems like a significant amount of unsubscribes do occur. Thus, I will set week_open as 0 if there's
# an unsubscribe.
table(sub_agg_merged$unsubscribes, sub_agg_merged$week_open)
879 / nrow(sub_agg_merged)
# EDA
cor(subscriber_agg[, c(4:8)])
|
9f98f1c173acd98b7cf6c963e5314a7adf0912de
|
0b05920b9b5a2371f510812e0a34a172cfa65562
|
/ASAR/SVR_ASAR_smap_grid.R
|
7250c08f482a083e338ead6f2d25f3c911595e0e
|
[] |
no_license
|
felixgreifeneder/EURAC_R
|
962d77f0579cae1257b29aabda09b74070e17b70
|
0256893e67e3ea3c34247b88472af40b6ef2e64d
|
refs/heads/master
| 2021-01-10T11:26:53.843753
| 2017-02-09T09:26:57
| 2017-02-09T09:26:57
| 47,684,976
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,765
|
r
|
SVR_ASAR_smap_grid.R
|
#Initialisation
#--------------------------------------------------------------------------------------------------
#loading required packages
library("e1071")
library("caret")
library("ggplot2")
library("reshape2")
library("hexbin")
library("RColorBrewer")
library("gridExtra")
#load the training-set
training_noScl <- read.table("X:/Workspaces/GrF/Processing/SMAP/svr/training_stations_sig0lcmask.txt",
header=T,
sep=",")
training_Scld <- read.table("X:/Workspaces/GrF/Processing/SMAP/svr/training_scaled_sig0lcmask.txt",
header=T,
sep=",")
#--------------------------------------------------------------------------------------------------
#Building the SVR model
#package e1071
#setting the tuning options
SVRtuningsettings <- tune.control(sampling = "cross",
sampling.aggregate = mean,
sampling.dispersion = sd,
cross = 5,
best.model = TRUE,
performances = TRUE)
#tuning the model (finding the best set hyper parameters)
SVRtuning <- tune(svm, SMC ~ SIG0 + height + aspect + slope + lc,
data=training_noScl,
kernel="radial",
ranges = list(epsilon = 10^seq(-2,-0.5,len=3), gamma=10^seq(-2,1,len=3), cost=10^seq(-2,2,len=3)),
tunecontrol=SVRtuningsettings)
tunedModel_noScl <- SVRtuning$best.model
SVRtuning <- tune(svm, SMC ~ SIG0 + height + aspect + slope + lc,
data=training_Scld,
kernel="radial",
ranges = list(epsilon = 10^seq(-2,-0.5,len=3), gamma=10^seq(-2,1,len=3), cost=10^seq(-2,2,len=3)),
tunecontrol=SVRtuningsettings)
tunedModel_Scld <- SVRtuning$best.mode
#--------------------------------------------------------------------------------------------------
#Estimating SMC
print("Overall performance (no scaling) based on training:")
SMCpredicted_noScl <- predict(tunedModel_noScl, training_noScl)
error_noScl <- sqrt(mean((training_noScl$SMC - SMCpredicted_noScl)^2))
r2_noScl <- cor(training_noScl$SMC, y=SMCpredicted_noScl, method="pearson")^2
print(paste("Error:",error_noScl,"R2:",r2_noScl))
print("Overall performance (scaled) based on training:")
SMCpredicted_Scld <- predict(tunedModel_Scld, training_Scld)
error_Scld <- sqrt(mean((training_Scld$SMC - SMCpredicted_Scld)^2))
r2_Scld <- cor(training_Scld$SMC, y=SMCpredicted_Scld, method="pearson")^2
print(paste("Error:",error_Scld,"R2:",r2_Scld))
#Plot true vs estimated SMC
tmp <- data.frame(x=c(training_noScl$SMC, training_Scld$SMC),
y=c(SMCpredicted_noScl, SMCpredicted_Scld),
Scaling=c(rep("no", length(training_noScl$SMC)), rep("yes", length(training_Scld$SMC))), stringsAsFactors = F)
rf <- colorRampPalette(rev(brewer.pal(11,"Spectral")))
r <- rf(32)
ggplot(tmp, aes(x=x, y=y, colour=Scaling)) +
geom_point(shape=1, size=3) +
theme(aspect.ratio=1) +
scale_x_continuous(name="\nTRUE SMC [m3m-3]", limits=c(0.1,0.5)) +
scale_y_continuous(name="PREDICTED SMC [m3m-3]\n", limits=c(0.1,0.5)) +
scale_colour_hue(l=50, guide=F) +
facet_grid(.~ Scaling) +
theme(axis.title.x = element_text(face="bold", size=18),
axis.text.x = element_text(size=14),
axis.title.y = element_text(face="bold", size=18),
axis.text.y = element_text(size=14),
strip.text.x = element_text(size=14, face="bold")) +
geom_smooth(method=glm, se=F, fullrange=T, linetype="dashed", size=1)
ggsave("X:/Workspaces/GrF/02_Documents/VZJ_paper/fig_for_upload/fig8_new.tiff", dpi=300)
|
5c08da71a81665117cdea2fff23367d0f0dc030a
|
9d308bfa1dd2081d5cb5b2d1b69270827d77acff
|
/1. First R functions.R
|
ca6d73b8c4ea82a3798f28015e1e559865e16f59
|
[] |
no_license
|
Rachitahuja20/R-Language
|
34f70e6d348e7d5d0baf8a7c80573461b8f401b6
|
b589a2f89fa3c922b5c1d63f6efbbd89a3dbc2e2
|
refs/heads/master
| 2020-03-20T19:43:02.871298
| 2018-06-18T17:51:35
| 2018-06-18T17:51:35
| 137,650,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 828
|
r
|
1. First R functions.R
|
#Assigning value to an object
die <- 1:6
#Creating a function with sample function
Roll2 <- function() {
Dice <- sample(die, size=2, replace = TRUE)
}
#Calling the created function
Roll2()
#Above function with weighted dice using probabilities.
Roll <- function(Big = 1:6) {
win <- sample(Big, size= 2, replace = TRUE, prob = c(1/8,1/8,1/8,1/8,1/8,3/8))
sum(win)
}
#Calling a funtion, using an argument
Rolls(1:20)
#Calling a package from library
library("ggplot2")
#Assigning values to x and y, for plotting
x <- c(-8, -6, -4, -2, 0, 2,4,6,8)
y<- -x
#qplot is used for plotting
qplot(x,y)
z <- c(1,2,2,3,5,6,7,7,7,2,2)
#Plotting a histogram
qplot(z, binwidth=0.5)
#Replicat function, to replicate a peice of code multiple times
replicate(10, Roll())
rolls <- replicate(10000, Roll())
qplot(rolls, binwidth=1)
|
45d157a61df3c78c220e4f7bb78e774dd3b5c316
|
aba010bbf8165acc06349b629ddf4593628325de
|
/NEED_cluster_analysis.R
|
96ab482a43ab893513e33da79601d83b7a998c8a
|
[] |
no_license
|
karafede/Energy
|
f2557d0e7e6b5ca1141177c836b2f2f2dc610355
|
9b3f8a0b344dd01fb9944ac85e326d28e1fa615e
|
refs/heads/master
| 2021-01-10T09:40:27.074294
| 2016-03-22T11:32:35
| 2016-03-22T11:32:35
| 54,454,862
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,592
|
r
|
NEED_cluster_analysis.R
|
# Set-up ---------------------------
# Load packages
library(threadr)
library(ggplot2)
library(dplyr)
library(tidyr)
library (devtools)
library(readxl)
library(Hmisc)
# devtools::install_github("skgrange/threadr")
# Set global options
options(stringsAsFactors = FALSE)
# Set working directory
setwd("C:/NEED")
# setwd("C:/RICARDO-AEA/NEED")
# Clear all objects
rm(list = ls(all = TRUE))
# Energy NEED consumption data ---------------------------
# Load data
# data_energy <- read.csv("university_of_warwick_energy_data.csv.bz2")
data_energy <- read.csv("need_public_use_file_2014.csv")
#names(data_energy) <- str_to_lower(names(data_energy))
data_energy_gas <- data_energy %>%
filter(!is.na(IMD_ENG)) %>%
filter(MAIN_HEAT_FUEL != 2) %>% ## keep only gas data (1)
filter(REGION != "W99999999") %>% ## remove Wales region
filter(grepl("V", Gcons2012Valid, ignore.case = TRUE)) %>%
filter(grepl("V", Econs2012Valid, ignore.case = TRUE))
#### Rename regions ##############################################################
data_energy_gas$REGION <- ifelse(grepl("E12000001", data_energy_gas$REGION, ignore.case = TRUE),
"North East", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000002", data_energy_gas$REGION, ignore.case = TRUE),
"North West", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000003", data_energy_gas$REGION, ignore.case = TRUE),
"Yorkshire and The Humber", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000004", data_energy_gas$REGION, ignore.case = TRUE),
"East Midlands", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000005", data_energy_gas$REGION, ignore.case = TRUE),
"West Midlands", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000006", data_energy_gas$REGION, ignore.case = TRUE),
"East of England", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000007", data_energy_gas$REGION, ignore.case = TRUE),
"London", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000008", data_energy_gas$REGION, ignore.case = TRUE),
"South East", data_energy_gas$REGION)
data_energy_gas$REGION <- ifelse(grepl("E12000009", data_energy_gas$REGION, ignore.case = TRUE),
"South West", data_energy_gas$REGION)
data_energy_gas$PROP_TYPE <- ifelse(grepl("101", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"Detached house", data_energy_gas$PROP_TYPE)
data_energy_gas$PROP_TYPE <- ifelse(grepl("102", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"Semi-detached house", data_energy_gas$PROP_TYPE)
data_energy_gas$PROP_TYPE <- ifelse(grepl("103", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"End terrace house", data_energy_gas$PROP_TYPE)
data_energy_gas$PROP_TYPE <- ifelse(grepl("104", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"Mid terrace house", data_energy_gas$PROP_TYPE)
data_energy_gas$PROP_TYPE <- ifelse(grepl("105", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"Bungalow", data_energy_gas$PROP_TYPE)
data_energy_gas$PROP_TYPE <- ifelse(grepl("106", data_energy_gas$PROP_TYPE, ignore.case = TRUE),
"Flat (inc. maisonette)", data_energy_gas$PROP_TYPE)
# data_energy_gas$inv_FLOOR_AREA_BAND <- 1/data_energy_gas$FLOOR_AREA_BAND
data_energy_gas$LOFT_DEPTH[data_energy_gas$LOFT_DEPTH==99] <- NA
data_select_gas <- data_energy_gas %>%
select(-IMD_WALES,
-Gcons2005, -Gcons2005Valid,
-Gcons2006, -Gcons2006Valid,
-Gcons2007, -Gcons2007Valid,
-Gcons2008, -Gcons2008Valid,
-Gcons2009, -Gcons2009Valid,
-Gcons2010, -Gcons2010Valid,
-Gcons2011, -Gcons2011Valid,
-Econs2005, -Econs2005Valid,
-Econs2006, -Econs2006Valid,
-Econs2007, -Econs2007Valid,
-Econs2008, -Econs2008Valid,
-Econs2009, -Econs2009Valid,
-Econs2010, -Econs2010Valid,
-Econs2011, -Econs2011Valid)
# data_energy_gas <- sample_n(data_energy_gas, 20000)
data_select_gas$inv_IMD_ENG <- 1/(data_select_gas$IMD_ENG)
data_select_gas$energy <- (data_select_gas$Gcons2012)+(data_select_gas$Econs2012)
data_select_gas$gas_floor_area <- (data_select_gas$Gcons2012)/(data_select_gas$FLOOR_AREA_BAND)
data_select_gas$electricity_floor_area <- (data_select_gas$Econs2012)/(data_select_gas$FLOOR_AREA_BAND)
data_select_gas$energy_inv_IMD_ENG <- (data_select_gas$energy)/(1/(data_select_gas$IMD_ENG))
data_select_gas$energy_IMD_ENG <- (data_select_gas$energy)/(data_select_gas$IMD_ENG)
data_select_gas$energy_EE_BAND <- (data_select_gas$energy)/(data_select_gas$EE_BAND)
data_select <- data_select_gas %>%
select(HH_ID,
REGION,
# Gcons2012,
# Econs2012,
FLOOR_AREA_BAND,
PROP_TYPE,
EE_BAND,
inv_IMD_ENG) %>%
na.omit()
# Make REGION a factor too
data_select <- data_select %>%
mutate(REGION = ifelse(REGION == "North East", 1, REGION),
REGION = ifelse(REGION == "North West", 2, REGION),
REGION = ifelse(REGION == "Yorkshire and The Humber", 3, REGION),
REGION = ifelse(REGION == "East Midlands", 4, REGION),
REGION = ifelse(REGION == "West Midlands", 5, REGION),
REGION = ifelse(REGION == "East of England", 6, REGION),
REGION = ifelse(REGION == "London", 7, REGION),
REGION = ifelse(REGION == "South East", 8, REGION),
REGION = ifelse(REGION == "South West", 9, REGION))
data_select$REGION <- as.numeric(data_select$REGION)
# Make PROP_TYPE a factor too
data_select <- data_select %>%
mutate(PROP_TYPE = ifelse(PROP_TYPE == "Detached house", 1, PROP_TYPE),
PROP_TYPE = ifelse(PROP_TYPE == "Semi-detached house", 2, PROP_TYPE),
PROP_TYPE = ifelse(PROP_TYPE == "End terrace house", 3, PROP_TYPE),
PROP_TYPE = ifelse(PROP_TYPE == "Mid terrace house", 4, PROP_TYPE),
PROP_TYPE = ifelse(PROP_TYPE == "Bungalow", 5, PROP_TYPE),
PROP_TYPE = ifelse(PROP_TYPE == "Flat (inc. maisonette)", 6, PROP_TYPE))
data_select$PROP_TYPE <- as.numeric(data_select$PROP_TYPE)
# Plots
summary_REGION <- data_select_gas %>%
group_by(REGION) %>%
summarise(gas = mean(Gcons2012, na.rm = TRUE),
electricity = mean(Econs2012, na.rm = TRUE),
energy = mean(energy, na.rm = TRUE))
jpeg('C:/NEED/plots/Gas_summary.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
summary_REGION %>%
ggplot(aes(REGION, gas, fill = REGION)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Gas Consumption (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Electricity_summary.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
summary_REGION %>%
ggplot(aes(REGION, electricity, fill = REGION)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Electricity Consumption (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Energy_summary.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
summary_REGION %>%
ggplot(aes(REGION, energy, fill = REGION)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Total Energy Consumption (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
par(oldpar)
dev.off()
########## Ranking ###########################################
data_select_gas <- data_select_gas %>%
subset(PROP_TYPE =="Flat (inc. maisonette)")
RANKING_NEED_ENERGY <- data_select_gas %>%
# group_by(cluster) %>%
mutate(octile_energy = ntile(energy, 8)) %>%
mutate(octileBounds_energy = cut2(energy, g=8))%>%
ungroup()
# Arrange by numeric values
RANKING_NEED_ENERGY <- RANKING_NEED_ENERGY %>%
arrange(octile_energy,octileBounds_energy, energy)
RANKING_NEED_ENERGY_IMD <- data_select_gas %>%
# group_by(cluster) %>%
mutate(octile_IMD = ntile(energy_IMD_ENG, 8)) %>%
mutate(octileBounds_IMD = cut2(energy_IMD_ENG, g=8))%>%
ungroup()
# Arrange by numeric values
RANKING_NEED_ENERGY_IMD <- RANKING_NEED_ENERGY_IMD %>%
arrange(octile_IMD,octileBounds_IMD, energy_IMD_ENG)
select_energy <- RANKING_NEED_ENERGY %>%
select(HH_ID,
energy,
octile_energy,
octileBounds_energy)
select_energy_IMD <- RANKING_NEED_ENERGY_IMD %>%
select(HH_ID,
energy_IMD_ENG,
octile_IMD,
octileBounds_IMD)
# colnames(select_energy_IMD) <- c("HH_ID_IMD", "energy_IMD_ENG",
# "octile_IMD","octileBounds_IMD")
# select_data <- cbind(select_energy, select_energy_IMD)
select_energy_IMD <- select_energy %>%
inner_join(select_energy_IMD, "HH_ID") ## "HH-ID" is the common field to join
select_energy_octile <- select_energy_IMD %>%
subset(octile_energy == 7)
select_energy_octile %>%
ggplot(aes(energy, fill = as.factor(octile_energy),
colour = as.factor(octile_energy))) +
geom_density(alpha = 0.5) +
ggtitle("Energy 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
select_energy_octile %>%
ggplot(aes(energy, fill=octile_energy))+
geom_histogram(binwidth = 100)
select_energy_octile %>%
ggplot(aes(energy_IMD_ENG, fill = as.factor(octile_energy),
colour = as.factor(octile_energy))) +
geom_density(alpha = 0.5) +
ggtitle("Energy / IMD (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
select_energy_octile %>%
ggplot(aes(energy_IMD_ENG, fill=octile_energy))+
geom_histogram(binwidth = 100)
RANKING_NEED_ENERGY %>% ### without clustering
ggplot(aes(energy, fill = as.factor(octile_energy),
colour = as.factor(octile_energy))) +
geom_density(alpha = .5) +
ggtitle("Energy 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
# write.csv(DATA_FINAL_ALL, file = "DATA_FINAL_ALL.csv", row.names=TRUE)
# Counts of flats per cluster quartile
RANKING_NEED_ENERGY %>%
group_by(octile) %>%
summarise(n = n())
# ##Get HH_ID with high consumption for their cluster
# data_high <- TOT_ENERGY %>%
# filter(octile == 4)
# View(a[, 50:101])
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==1] <- "LLL"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==2] <- "LL"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==3] <- "L"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==4] <- "M"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==5] <- "MM"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==6] <- "H"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==7] <- "HH"
RANKING_NEED_ENERGY$octile[RANKING_NEED_ENERGY$octile==8] <- "HHH"
### subset....
#### filter...
write.csv(RANKING_NEED_ENERGY, file = "RANKING_NEED_ENERGY.csv", row.names=TRUE)
Summary_Energy <- RANKING_NEED_ENERGY %>%
group_by(octile) %>%
summarise(n = n())
write.csv(Summary_Energy, file = "Summary_Energy.csv", row.names=TRUE)
###############################################################################
# data_select %>%
# ggplot(aes(HH_ID, Gcons2012)) +
# geom_point(colour = "darkred", size = 4) +
# geom_smooth(method = "lm", se=FALSE, color="black", aes(group=1))
# Store unique identifiers
rownames <- data_select$HH_ID
# Give data frame unique row names
row.names(data_select) <- rownames
data_select$HH_ID <- NULL
# Standardise variables (rescale data based on the meand and Standard deviation)
data_select_standard <- standardise(data_select)
# data_select_standard <- data.frame(scale(data_select))
# data_select_standard$HH_ID <- data_select_gas$HH_ID
# data_select_standard %>%
# ggplot(aes(HH_ID, Gcons2012)) +
# geom_point(colour = "darkred", size = 4) +
# geom_smooth(method = "lm", se=FALSE, color="black", aes(group=1))
############### PRINCIPAL COMPONENT ANALYSIS ################
NEED.pca <- prcomp(data_select_standard,
center = TRUE,
scale. = TRUE)
plot(NEED.pca, type = "l")
summary(NEED.pca) ### cumulative
plot(NEED.pca)
Prop_Var <- as.data.frame(NEED.pca$sdev^2/sum(NEED.pca$sdev^2)*100)
jpeg('C:/NEED/plots/PCA_NEED.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
plot(NEED.pca)
par(oldpar)
dev.off()
################ CLUSTER ANALYSIS #############################
# ### Sample_data
# x <- rbind(matrix(rnorm(100, sd = 0.3), ncol = 2),
# matrix(rnorm(100, mean = 1, sd = 0.3), ncol = 2))
# colnames(x) <- c("x", "y")
# (cl <- kmeans(x, 2))
# cl <- kmeans(x, 4)
# plot(x, col = cl$cluster)
# points(cl$centers, col = 1:2, pch = 8, cex = 2)
kms <- kmeans(data_select_standard, centers = 5) ### number of cluster = 4
class(kms)
cluster <- kms$cluster
# Give observations cluster variable
data_post_cluster <- data_select %>%
mutate(HH_ID = rownames, ### add flat name (new column)
cluster = unname(cluster)) %>% ### add cluste column
arrange(cluster)
# Join cluster group to data
# Select
data_post_cluster <- data_post_cluster %>%
select(HH_ID, cluster)
# Join
data_post_cluster <- data_select_gas %>%
inner_join(data_post_cluster, "HH_ID") ## "HH-ID" is the common field to join
write.csv(data_post_cluster, file = "data_post_cluster.csv", row.names=TRUE)
cluster_1 <- subset(data_post_cluster, cluster == 1)
cluster_2 <- subset(data_post_cluster, cluster == 2)
cluster_3 <- subset(data_post_cluster, cluster == 3)
cluster_4 <- subset(data_post_cluster, cluster == 4)
cluster_5 <- subset(data_post_cluster, cluster == 5)
######## Rename Cluster identification ##################################
data_post_cluster_NAMES <- data_post_cluster
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("101", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"before 1930", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("102", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"1930-1949", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("103", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"1950-1966", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("104", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"1967-1982", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("105", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"1983-1995", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$PROP_AGE <- ifelse(grepl("106", data_post_cluster_NAMES$PROP_AGE, ignore.case = TRUE),
"1996 onwards", data_post_cluster_NAMES$PROP_AGE)
data_post_cluster_NAMES$FLOOR_AREA_BAND <- ifelse(grepl("1", data_post_cluster_NAMES$FLOOR_AREA_BAND, ignore.case = TRUE),
"1 to 50 m2", data_post_cluster_NAMES$FLOOR_AREA_BAND)
data_post_cluster_NAMES$FLOOR_AREA_BAND <- ifelse(grepl("2", data_post_cluster_NAMES$FLOOR_AREA_BAND, ignore.case = TRUE),
"51-100 m2", data_post_cluster_NAMES$FLOOR_AREA_BAND)
data_post_cluster_NAMES$FLOOR_AREA_BAND <- ifelse(grepl("3", data_post_cluster_NAMES$FLOOR_AREA_BAND, ignore.case = TRUE),
"101-150 m2", data_post_cluster_NAMES$FLOOR_AREA_BAND)
data_post_cluster_NAMES$FLOOR_AREA_BAND <- ifelse(grepl("4", data_post_cluster_NAMES$FLOOR_AREA_BAND, ignore.case = TRUE),
"Over 151 m2", data_post_cluster_NAMES$FLOOR_AREA_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("1", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band A or B", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("2", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band C", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("3", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band D", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("4", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band E", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("5", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band F", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$EE_BAND <- ifelse(grepl("6", data_post_cluster_NAMES$EE_BAND, ignore.case = TRUE),
"Band G", data_post_cluster_NAMES$EE_BAND)
data_post_cluster_NAMES$LOFT_DEPTH <- ifelse(grepl("1", data_post_cluster_NAMES$LOFT_DEPTH, ignore.case = TRUE),
"Less than 150mm", data_post_cluster_NAMES$LOFT_DEPTH)
data_post_cluster_NAMES$LOFT_DEPTH <- ifelse(grepl("2", data_post_cluster_NAMES$LOFT_DEPTH, ignore.case = TRUE),
"Greater than or equal to 150mm", data_post_cluster_NAMES$LOFT_DEPTH)
data_post_cluster_NAMES$LOFT_DEPTH <- replace(data_post_cluster_NAMES$LOFT_DEPTH,
is.na(data_post_cluster_NAMES$LOFT_DEPTH), "no information")
data_post_cluster_NAMES$WALL_CONS <- ifelse(grepl("1", data_post_cluster_NAMES$WALL_CONS, ignore.case = TRUE),
"Cavity wall", data_post_cluster_NAMES$WALL_CONS)
data_post_cluster_NAMES$WALL_CONS <- ifelse(grepl("2", data_post_cluster_NAMES$WALL_CONS, ignore.case = TRUE),
"Other", data_post_cluster_NAMES$WALL_CONS)
data_post_cluster_NAMES$CWI <- ifelse(grepl("1", data_post_cluster_NAMES$CWI, ignore.case = TRUE),
"CWI_government", data_post_cluster_NAMES$CWI)
data_post_cluster_NAMES$CWI <- replace(data_post_cluster_NAMES$CWI,
is.na(data_post_cluster_NAMES$CWI), "no cavity record")
data_post_cluster_NAMES$BOILER <- ifelse(grepl("1", data_post_cluster_NAMES$BOILER, ignore.case = TRUE),
"New boiler installed", data_post_cluster_NAMES$BOILER)
data_post_cluster_NAMES$BOILER <- replace(data_post_cluster_NAMES$BOILER,
is.na(data_post_cluster_NAMES$BOILER), "no boiler record")
data_post_cluster %>%
ggplot(aes(HH_ID, energy, colour = as.factor(cluster))) +
geom_point(size = 4) + facet_wrap(~cluster)
data_post_cluster %>%
ggplot(aes(HH_ID, Econs2012, colour = as.factor(cluster))) +
geom_point(size = 4) + facet_wrap(~cluster)
data_post_cluster %>%
ggplot(aes(HH_ID, Gcons2012, colour = as.factor(cluster))) +
geom_point(size = 4) + facet_wrap(~cluster)
summary_Consumption <- data_post_cluster %>%
group_by(cluster) %>%
summarise(energy = mean(energy, na.rm = TRUE),
electricity = mean(Econs2012, na.rm = TRUE),
gas = mean(Gcons2012, na.rm = TRUE))
jpeg('C:/NEED/plots/Energy_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
summary_Consumption %>%
ggplot(aes(cluster, energy, fill = cluster)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=14,face="bold", colour = "black")) +
ggtitle("Total Energy") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
summary_Consumption %>%
ggplot(aes(cluster, electricity, fill = cluster)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
ggtitle("Summary Electricity (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
summary_Consumption %>%
ggplot(aes(cluster, gas, fill = cluster)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
ggtitle("Summary Gas (2012)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
IMD <- data_post_cluster_NAMES %>%
group_by(cluster,inv_IMD_ENG) %>%
summarise(n = n())
jpeg('C:/NEED/plots/inv_IMD_ENG_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
IMD %>%
ggplot(aes(cluster, n, fill = inv_IMD_ENG)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) + geom_bar(stat = "identity") +
theme(axis.text=element_text(size=14,face="bold", colour = "black")) +
ggtitle("1/IMD (deprivation)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
TYPE <- data_post_cluster %>%
group_by(cluster,PROP_TYPE) %>%
summarise(n = n())
jpeg('C:/NEED/plots/prop_type_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
TYPE %>%
ggplot(aes(cluster, n, fill = PROP_TYPE)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) + geom_bar(stat = "identity") +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Property Type") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
AREA <- data_post_cluster_NAMES %>%
group_by(cluster,FLOOR_AREA_BAND) %>%
summarise(n = n())
jpeg('C:/NEED/plots/area_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
AREA %>%
ggplot(aes(cluster, n, fill = FLOOR_AREA_BAND)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) + geom_bar(stat = "identity") +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Floor Area Band") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
EE <- data_post_cluster_NAMES %>%
group_by(cluster,EE_BAND) %>%
summarise(n = n())
jpeg('C:/NEED/plots/EE_Band_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
EE %>%
ggplot(aes(cluster, n, fill = EE_BAND)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) + geom_bar(stat = "identity") +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("EE Band") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
#######################################################################
DEPTH <- data_post_cluster_NAMES %>%
group_by(cluster,LOFT_DEPTH) %>%
summarise(n = n())
jpeg('C:/NEED/plots/loft_depth_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
DEPTH %>%
ggplot(aes(cluster, n, fill = LOFT_DEPTH)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Loft Depth") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
CAVITY <- data_post_cluster_NAMES %>%
group_by(cluster,WALL_CONS) %>%
summarise(n = n())
jpeg('C:/NEED/plots/cavity_wall_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
CAVITY %>%
ggplot(aes(cluster, n, fill = WALL_CONS)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Wall Construction") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
CWI_gov <- data_post_cluster_NAMES %>%
group_by(cluster,CWI) %>%
summarise(n = n())
jpeg('C:/NEED/plots/goverment_CWI_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
CWI_gov %>%
ggplot(aes(cluster, n, fill = CWI)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Cavity Wall Insulated") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
#AAA <- data_post_cluster %>%
# subset(BOILER==1 & BOILER_YEAR<2012)
BOILER_records <- data_post_cluster_NAMES %>%
group_by(cluster,BOILER) %>%
summarise(n = n())
jpeg('C:/NEED/plots/boiler_5_clusters.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
BOILER_records %>%
ggplot(aes(cluster, n, fill = BOILER)) + geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text=element_text(size=12,face="bold", colour = "black")) +
ggtitle("Boiler") +
theme(plot.title = element_text(lineheight=.8, face="bold", size=14))
par(oldpar)
dev.off()
####### Calculate quartiles within clusters #####################
TOT_ENERGY <- data_post_cluster_NAMES %>%
group_by(cluster) %>%
mutate(quartile = ntile(energy, 4)) %>%
ungroup()
# Arrange by numeric values
DATA_FINAL_ALL <- TOT_ENERGY %>%
arrange(cluster, quartile, energy)
# write.csv(DATA_FINAL_ALL, file = "DATA_FINAL_ALL.csv", row.names=TRUE)
# Counts of flats per cluster quartile
TOT_ENERGY %>%
group_by(cluster, quartile) %>%
summarise(n = n())
# # Get HH_ID with high consumption for their cluster
# data_high <- TOT_ENERGY %>%
# filter(quartile == 4)
# View(a[, 50:101])
DATA_ENERGY <- TOT_ENERGY %>%
arrange(cluster, quartile, energy)
# %>%
#select(HH_ID, cluster,quartile, energy)
DATA_ENERGY$quartile[DATA_ENERGY$quartile==1] <- "LL"
DATA_ENERGY$quartile[DATA_ENERGY$quartile==2] <- "L"
DATA_ENERGY$quartile[DATA_ENERGY$quartile==3] <- "H"
DATA_ENERGY$quartile[DATA_ENERGY$quartile==4] <- "HH"
write.csv(DATA_ENERGY, file = "Ranking_and_Clustering_DATA_ENERGY.csv", row.names=TRUE)
Summary_Energy <- TOT_ENERGY %>%
group_by(cluster,quartile) %>%
summarise(n = n())
Summary_Energy$quartile[Summary_Energy$quartile==1] <- "LL"
Summary_Energy$quartile[Summary_Energy$quartile==2] <- "L"
Summary_Energy$quartile[Summary_Energy$quartile==3] <- "H"
Summary_Energy$quartile[Summary_Energy$quartile==4] <- "HH"
#
# Summary_Energy %>%
# ggplot(aes(cluster, n, fill = cluster)) + geom_bar(stat = "identity") +
# theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
# ggtitle("Summary n.properties") +
# theme(plot.title = element_text(lineheight=.8, face="bold"))
#
#
# Summary_Energy %>%
# ggplot(aes(cluster, n, fill = quartile)) + geom_bar(stat = "identity") +
# theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))+
# ggtitle("Summary n. properties") +
# theme(plot.title = element_text(lineheight=.8, face="bold"))
# write.csv(Summary_Energy, file = "Summary_Energy.csv", row.names=TRUE)
##############################################################
#### Gas #####################################################
jpeg('C:/NEED/plots/Gas_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(Gcons2012)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Gas 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
#data_energy_gas <- data_energy %>%
# filter(!is.na(IMD_ENG)) %>%
BOILER_NULL <- data_post_cluster %>%
subset(is.na(BOILER))
BOILER_OK <- data_post_cluster %>%
subset(BOILER ==1)
BOILER_Before2012 <- data_post_cluster %>%
subset(BOILER == 1 & BOILER_YEAR < 2012)
BOILER_2012 <- data_post_cluster %>%
subset(BOILER ==1 & BOILER_YEAR == 2012)
Data_Cluster_1 <- data_post_cluster %>%
subset(cluster == 1)
Data_Cluster_2 <- data_post_cluster %>%
subset(cluster == 2)
Data_Cluster_3 <- data_post_cluster %>%
subset(cluster == 3)
Data_Cluster_4 <- data_post_cluster %>%
subset(cluster == 4)
jpeg('C:/NEED/plots/Gas_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(Gcons2012, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Gas 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
###### Electricity #########
jpeg('C:/NEED/plots/Electricity_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(Econs2012)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Electricity 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Electriciy_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(Econs2012, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Electricity 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
####### Total Energy #####################################
jpeg('C:/NEED/plots/Energy_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(energy)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Total Energy 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Energy_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(energy, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Total Energy 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
####### Gas vs floor area ###########################################
jpeg('C:/NEED/plots/Gas_vs_area_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(gas_floor_area)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Gas/Floor Area Band 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Gas_vs_area_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(gas_floor_area, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Gas/Floor Area Band 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
######### Electricity ve floor area ########
jpeg('C:/NEED/plots/Electricity_vs_area_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(electricity_floor_area)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Electricity/Floor Area Band 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Electricity_vs_area_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(electricity_floor_area, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Electricity/Floor Area Band 2012") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
###### Total Energy vs 1/IMD #########
jpeg('C:/NEED/plots/Energy_vs_inv_IMD_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(energy_inv_IMD_ENG)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Energy/ (1/IMD)") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Energy_vs_inv_IMD_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(energy_inv_IMD_ENG, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Energy/ (1/IMD)") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
data_post_cluster <- read.csv("data_post_cluster.csv")
data_post_cluster$energy_IMD_ENG <- (data_post_cluster$energy)/(data_post_cluster$IMD_ENG)
write.csv(data_post_cluster, file = "data_post_cluster.csv", row.names=TRUE)
###### Total Energy vs IMD #########
jpeg('C:/NEED/plots/Energy_vs_IMD_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(energy_IMD_ENG)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Energy / IMD") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Energy_vs_IMD_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(energy_IMD_ENG, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Energy / IMD") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
###### Total Energy vs EE band #########
jpeg('C:/NEED/plots/Energy_vs_EE_band_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>% ### without clustering
ggplot(aes(energy_EE_BAND)) +
geom_density(fill = "dodgerblue", alpha = .5) +
ggtitle("Energy/EE band") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
jpeg('C:/NEED/plots/Energy_vs_EE_band_cluster_density.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
data_post_cluster %>%
ggplot(aes(energy_EE_BAND, fill = as.factor(cluster),
colour = as.factor(cluster))) +
geom_density(alpha = 0.5) +
ggtitle("Energy/EE band") +
theme(plot.title = element_text(lineheight=.8, face="bold",
size=18, colour = "black"))
par(oldpar)
dev.off()
|
d10d928e6875c748c37a9051bf3f966cf894039d
|
2077c16a184d041a8b36a511bd47266e4e74bf7a
|
/R/mod_SiteStats.R
|
fd09dc2db86761bdfef5ed6d66724c3d023ecd58
|
[
"MIT"
] |
permissive
|
genobobeno/buildaflame
|
aa4325858144a41360ac3e62e21719013383311d
|
ebc3197792cf37377bc7922821d2e6ec3d91b708
|
refs/heads/master
| 2022-11-23T12:17:56.845574
| 2020-07-10T04:26:52
| 2020-07-10T04:26:52
| 263,195,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
mod_SiteStats.R
|
#' SiteStats UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_SiteStats_ui <- function(id){
ns <- NS(id)
tagList(
)
}
#' SiteStats Server Function
#'
#' @noRd
mod_SiteStats_server <- function(input, output, session){
ns <- session$ns
}
## To be copied in the UI
# mod_SiteStats_ui("SiteStats_ui_1")
## To be copied in the server
# callModule(mod_SiteStats_server, "SiteStats_ui_1")
|
7d5b4974423651e9e32bb9625642c3b58b135e12
|
6b0a7dad237b597261591f95aaf495f7019c27d9
|
/shiny_msconvert/app.R
|
3805724c9b8745025fef238cff384b5ab74b9193
|
[] |
no_license
|
danielbraas/ShinyDB
|
8f5abccbaf60779ec40a7867edd527e65d4148ce
|
c8d9f6e8efdf30123b91b50add9966ea1f492d13
|
refs/heads/master
| 2022-04-04T15:43:37.327413
| 2019-12-24T04:53:27
| 2019-12-24T04:53:27
| 115,495,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,271
|
r
|
app.R
|
library(tidyverse)
library(shiny)
library(shinyFiles)
make_mzXML <- function(files, pol = NULL, msconvert, out=NULL){
files <- files()
msconvert <- msconvert()
if (!is.null(out)){
out <- paste('-o', out())
print(out)
}
#convert the files
for (i in 1:length(files)){
shell(paste(msconvert,
files[i],
'--mzXML --filter "peakPicking true 1-"',
pol,
out))
}
#move the files to the respective folder
if (grepl('pos', pol)){
dir.create('pos')
file.rename()
}
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Convert .raw files to mzXML"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
shinyFilesButton(id = 'msconvert',
label = 'MSConvert.exe',
title = 'Please point to the MSConvert.exe file',
multiple = F),
hr(),
shinyFilesButton(id = 'files',
label = 'Choose files',
title = 'Please files to be converted',
multiple = T),
hr(),
selectInput(inputId = 'pol',
label = 'Polarity',
choices = c('Negative', 'Positive', 'Both', 'Default')),
shinyDirButton(id = 'out',
label = 'Target directory',
title = 'Please chose directory for files to be converted to'),
actionButton(inputId = 'go',
label = 'Start')
),
mainPanel(
p("MSConvert.exe:"),
verbatimTextOutput('MSConvert'),
p('Selected files:'),
verbatimTextOutput('Files'),
p('Target directory:'),
verbatimTextOutput('OUT')
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
#define what hard drive(s) can be accessed
roots = c(wd = 'C:/Users/Daniel/Dropbox/R_projects/test',
msconvert = 'C:/pwiz')
shinyFileChoose(input = input,
id = 'msconvert',
roots = roots,
filetypes = 'exe')
msconvert <- reactive({
req(input$msconvert)
parseFilePaths(roots = roots, input$msconvert)$datapath
})
output$MSConvert <- renderPrint({
req(input$msconvert)
parseFilePaths(roots = roots, input$msconvert)$datapath
})
shinyFileChoose(input = input,
id = 'files',
roots = roots,
filetypes = 'raw')
files <- reactive({
req(input$files)
parseFilePaths(roots = roots, input$files)$datapath
})
output$Files <- renderPrint({
req(input$files)
parseFilePaths(roots = roots, input$files)$datapath
})
shinyDirChoose(input = input,
id = 'out',
roots = roots)
out <- reactive({
req(input$out)
parseDirPath(roots = roots, input$out)
})
output$OUT <- renderPrint({
req(input$out)
parseDirPath(roots = roots, input$out)
})
observeEvent(input$go, {
req(input$files, input$msconvert, input$pol)
switch(input$pol,
Negative = make_mzXML(files, pol=' --filter \"polarity negative\"', msconvert, out),
Positive = make_mzXML(files, pol=' --filter \"polarity positive\"', msconvert, out),
Both = {make_mzXML(files, pol=' --filter \"polarity negative\"', msconvert, out);
make_mzXML(files, pol=' --filter \"polarity positive\"', msconvert, out)},
Default = make_mzXML(files, pol = NULL, msconvert = msconvert, out=out)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
## To start this app from the R command line type:
## shell("R -e \"shiny::runApp(\'load_table5.R')\"")
|
db4aa58c68273cfde30acfb025fbf481a0ae5614
|
c84337f414140e2fd23b5392d01e557b3fd95528
|
/R/zzz.R
|
3f24f62eea8cd3dd6dee0a71b2b62288018c49af
|
[] |
no_license
|
cran/maxent
|
15ac2bcc6b47281c2d4880d28ea9b10f926c3ff7
|
9d46c6aad27a1f41a78907b170ddd9a586192be9
|
refs/heads/master
| 2020-05-31T16:03:29.572899
| 2013-04-06T00:00:00
| 2013-04-06T00:00:00
| 17,697,316
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
zzz.R
|
.onLoad <-
function(libname, pkgname) {
library.dynam("maxent",pkgname,libname);
assign("maximumentropy",Module("maximumentropy",PACKAGE="maxent"),envir=.BaseNamespaceEnv);
setClass("maxent", representation(model = "character", weights = "data.frame"));
}
|
fb29a189375a309aae07e234a2c0a09f3a31aadd
|
93d426e1a913d462a7969c84feae89cf59fef34e
|
/man/data_process.1_core.Rd
|
62cf831bb000bcd68b630e771a1e60828b446e2b
|
[] |
no_license
|
cran/polypharmacy
|
2c14ad772d1ccd01118b24d602388a2366fc7057
|
0f9cc8c7ffe7849356d176ece3f09b8f30c9b1b1
|
refs/heads/master
| 2023-06-21T22:59:22.184540
| 2021-07-12T08:30:02
| 2021-07-12T08:30:02
| 385,299,314
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,234
|
rd
|
data_process.1_core.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_process.R
\encoding{UTF-8}
\name{data_process.1_core}
\alias{data_process.1_core}
\title{Data Process}
\usage{
data_process.1_core(
Rx_deliv,
Rx_id,
Rx_drug_code,
Rx_drug_deliv,
Rx_deliv_dur,
Cohort = NULL,
Cohort_id = NULL,
Hosp_stays = NULL,
Hosp_id = NULL,
Hosp_admis = NULL,
Hosp_discharge = NULL,
study_start = NULL,
study_end = NULL,
grace_fctr = 0.5,
grace_cst = 0,
max_reserve = NULL
)
}
\arguments{
\item{Rx_deliv}{Name of the table listing all prescription drugs deliveries including the run-in period. See \emph{Details}.}
\item{Rx_id}{Column name of \code{Rx_deliv} containing individual unique identifier (any format).}
\item{Rx_drug_code}{Column name of \code{Rx_deliv} that contains the drug unique identifier (any format).}
\item{Rx_drug_deliv}{Column name of \code{Rx_deliv} that contains the dates of the drug delivery (Date format, see \emph{Details}).}
\item{Rx_deliv_dur}{Column name of \code{Rx_deliv} that contains the duration of the delivery (integer number).}
\item{Cohort}{Name of the table providing the unique identifiers of the study cohort. Only the ids listed in both the \code{Cohort} and the \code{Rx_deliv} tables will be returned. if \code{Cohort = NULL}, all ids of the \code{Rx_deliv} table will be returned.}
\item{Cohort_id}{Column name of \code{Cohort} containing individual’s unique identifiers (same format as \code{Rx_id}). If \code{Cohort} is not \code{NULL} and \code{Cohort_id} is \code{NULL}, \code{Cohort_id} will take the same value as \code{Rx_id}.}
\item{Hosp_stays}{Name of the table listing all hospital stays. (see \emph{Details} for possible format).}
\item{Hosp_id}{Column name of \code{Hosp_stays} containing individual’s unique identifier (same format as \code{Rx_id}). If \code{Hosp_stays} is not \code{NULL} and \code{Hosp_id} is \code{NULL}, \code{Hosp_id} will take the same value as \code{Rx_id}.}
\item{Hosp_admis}{Column name of \code{Hosp_stays} that contains the date of admission in hospital (Date format, see \emph{Details}).}
\item{Hosp_discharge}{Column name of Hosp_stays that contains the date of discharge from hospital (Date format, see \emph{Details}).}
\item{study_start}{Defines the first and last day of the study period for which the polypharmacy indicator(s) need to be calculated. All treatment periods prior to \code{study_start} and past \code{study_end} are not transcribed into the result table (Date format, see \emph{Details}).}
\item{study_end}{Defines the first and last day of the study period for which the polypharmacy indicator(s) need to be calculated. All treatment periods prior to \code{study_start} and past \code{study_end} are not transcribed into the result table (Date format, see \emph{Details}).}
\item{grace_fctr}{Numbers \eqn{\ge} 0. Two types of grace periods can be applied. One is proportional to the treatment duration of the latest delivery (\code{grace_fctr}) and the other is a constant number of days (\code{grace_cst}).}
\item{grace_cst}{Numbers \eqn{\ge} 0. Two types of grace periods can be applied. One is proportional to the treatment duration of the latest delivery (\code{grace_fctr}) and the other is a constant number of days (\code{grace_cst}).}
\item{max_reserve}{An integer number \eqn{\ge} 0 or \code{NULL}. Longest treatment duration, in days, that can be stored from successive overlapping deliveries. When \code{max_reserve = NULL} no limit is applied. When \code{max_reserve = 0} no accumulation of extra treatment duration is accounted for.}
}
\value{
\code{data.table} with four (4) variables:
\itemize{
\item The individual unique identifier which name is defined by \code{Rx_id}.
\item The drug unique identifier which name is defined by \code{Rx_drug_code}.
\item \code{tx_start}: The date of initiation of the reconstructed continued treatment (format as date).
\item \code{tx_end}: The date of the last day of the reconstructed continued treatment (format as date).
}
}
\description{
\code{\link{data_process}} but with only 1 core. To use in the multicores process.
}
\keyword{internal}
|
b77e4fa4c0653e5b20758c751759a0b043e192de
|
11311aea4618398dc7a217241809e6fc48fca8e0
|
/man/progress_ring_percent_construct.Rd
|
c20936426aa63c161f66c57d1cc1d03d0998233a
|
[] |
no_license
|
suharoschi/switchboard
|
81a8a6b4f3dd9698ed207cd5ec122a0c7db7edd4
|
9fc25eb9e0facba3d8da971dff742bc9701aec0b
|
refs/heads/main
| 2023-08-06T18:44:19.906946
| 2021-10-08T19:07:35
| 2021-10-08T19:07:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 466
|
rd
|
progress_ring_percent_construct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/progress_ring_percent.R
\name{progress_ring_percent_construct}
\alias{progress_ring_percent_construct}
\title{helper function that constructs canvas items of a progress_ring_percent widget}
\usage{
progress_ring_percent_construct(.switchboard, label = " ", size = 1, ...)
}
\description{
helper function that constructs canvas items of a progress_ring_percent widget
}
\keyword{internal}
|
5724fda64e51249227004d05d8232bcba324f61a
|
68fb75bc1bf3bde79d6b3c3ad2cdcf697a713762
|
/man/orthogproj.Rd
|
c9e98df9127bdd414a7bd1027129158fdad625b2
|
[] |
no_license
|
baddstats/globe
|
993aa9b056fb3bbedf4506fbb115619e51a3a453
|
f2af47e917376daa13d4ba478de79bbde4a8dcf4
|
refs/heads/master
| 2021-01-13T06:10:18.820841
| 2017-05-12T05:22:46
| 2017-05-12T05:22:46
| 49,102,940
| 4
| 0
| null | 2017-02-11T06:33:40
| 2016-01-06T01:09:12
|
R
|
UTF-8
|
R
| false
| false
| 1,403
|
rd
|
orthogproj.Rd
|
\name{orthogproj}
\alias{orthogproj}
\title{
Orthogonal Projection
}
\description{
Project points from the unit sphere onto a plane orthogonal
to the viewing direction.
}
\usage{
orthogproj(eye, top, loc)
}
\arguments{
\item{eye}{
Viewpoint. A vector of length 3 (or a \code{list(lon,lat)})
determining a position in space.
}
\item{top}{
Top point. A location which will be projected onto the \eqn{y} axis.
Vector of length 3 (or a \code{list(lon,lat)})
determining a position in space.
}
\item{loc}{
Vector of length 3, or matrix with 3 columns,
or \code{list(lon,lat)}. The points on the sphere to be projected.
}
}
\details{
This function is used to obtain orthogonal projections of
points on the sphere, for use in plotting 3D views of the sphere.
Each point of \code{loc} is expressed
in an orthonormal coordinate system determined by the
arguments \code{eye} and \code{top}. The coordinate system is
such that the third (\eqn{z}) axis
passes through the eye of the viewer and the
centre of the sphere.
}
\value{
A vector (or matrix) giving the transformed coordinates of each point,
with the first two coordinates giving the orthogonal projection.
}
\author{
Adrian Baddeley and Tom Lawrence
}
\seealso{
\code{\link{globeearth}}
}
\examples{
orthogproj(place("newyorkcity"), place("northpole"), c(1,0,0))
}
\keyword{spatial}
|
51f4b11abfab4cb36339875836b0df2923c1da4c
|
27b622ba3d99a98cd886c75fa321592c387b42ef
|
/Materialy/L8.R
|
5f3bb4261fd33bf6b253c9163f69c6faaa9b9fa5
|
[] |
no_license
|
Kaketo/2020Z-ProgramowanieWR
|
76c560d06b1705a6ba8ab904bbab7fafba035d99
|
d4c3b8654666716ac93f7c55c841e0f79fc9cd01
|
refs/heads/master
| 2020-08-05T15:44:34.804707
| 2020-01-30T07:13:43
| 2020-01-30T07:13:43
| 212,601,214
| 1
| 0
| null | 2020-01-30T07:09:58
| 2019-10-03T14:30:35
|
HTML
|
UTF-8
|
R
| false
| false
| 2,388
|
r
|
L8.R
|
library(archivist)
# tylko do celów demonstracyjnych
cacheRepo <- tempfile()
createLocalRepo(cacheRepo)
# https://github.com/ropensci/git2r
library(ggplot2)
# objects of class ggplot for which the session_info was archvied
# https://github.com/pbiecek/graphGallery
md5plots <- searchInRemoteRepo(
pattern = c("class:ggplot", "session_info"),
intersect = TRUE, repo = "graphGallery",
user = "pbiecek", fixed = FALSE
)
plots <- lapply(md5plots, function(pl) {
loadFromRemoteRepo(
md5hash = pl,
repo = "graphGallery",
user = "pbiecek",
value = TRUE
) +
ggtitle(pl)
})
aread('pbiecek/graphGallery/5e9558aed86ab3d6657f52441d0f9b5a')
library(drake)
library(mlr)
library(kernlab)
dat <- read.csv("https://raw.githubusercontent.com/mini-pw/2020Z-ProgramowanieWR/master/Wyjsciowki/W2/gr1/SawickiJan/ShinyIris/iris.csv")
task <- makeClassifTask(id = "drake_test", data = dat, target = "variety")
bench <- benchmark(learners = makeLearner("classif.ksvm"), tasks = task)
preds <- data.frame(getBMRPredictions(bench))
plan <- drake_plan(
dat = read.csv("https://raw.githubusercontent.com/mini-pw/2020Z-ProgramowanieWR/master/Wyjsciowki/W2/gr1/SawickiJan/ShinyIris/iris.csv"),
task = makeClassifTask(id = "drake_test", data = dat, target = "variety"),
bench = benchmark(learners = makeLearner("classif.randomForest"), tasks = task),
preds = data.frame(getBMRPredictions(bench))
)
make(plan)
readd(bench)
# ls()
# mean(x <- 1L:5)
# ls()
my_first_plan <- drake_plan(
dat = read.csv("https://raw.githubusercontent.com/mini-pw/2020Z-ProgramowanieWR/master/Wyjsciowki/W2/gr1/SawickiJan/ShinyIris/iris.csv"),
task = makeClassifTask(id = "drake_test", data = dat, target = "variety"),
bench = benchmark(learners = makeLearner("classif.ksvm"), tasks = task),
preds = data.frame(getBMRPredictions(bench)),
save_bench = save(bench, file = "bench.RData")
)
my_second_plan <- drake_plan(
dat1 = read.csv("https://raw.githubusercontent.com/mini-pw/2020Z-ProgramowanieWR/master/Wyjsciowki/W2/gr1/SawickiJan/ShinyIris/iris.csv"),
task = makeClassifTask(id = "drake_test", data = dat1, target = "variety"),
bench = benchmark(learners = makeLearner("classif.ksvm"), tasks = task),
preds = data.frame(getBMRPredictions(bench))
)
(my_first_plan)
readd("bench")
make(my_first_plan)
make(my_second_plan)
vis_drake_graph(drake_config(my_first_plan))
|
8b0d47b4f05aad8dd96532d518a1ac28a72b8fca
|
82a2ceaa30010b57a5632a7d9a0b06e06cfa1efb
|
/NJW algrithom.R
|
4e4e653cf94701484eeefa98b6a2c18755f08211
|
[] |
no_license
|
xinz19/SpectralClustering
|
01cfe4c400826b884c6999a91284c94f15c89b5c
|
ebbcc800eeef3a3ae717dbcd4ad3184be481f7ee
|
refs/heads/main
| 2023-06-09T13:29:08.994978
| 2021-07-02T17:46:45
| 2021-07-02T17:46:45
| 382,422,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,423
|
r
|
NJW algrithom.R
|
#NJW Algorithm
#toy data simulation
#load library for multivariate normal random data
library(MASS)
#specify the means
mu1 <- c(35,10)
mu4 <- c(10,55)
#specify the covariance matrix (multivariate version of variance)
sigma.matrix <- matrix(c(1,0,0,1),2,2,byrow = FALSE)
#check if the covariance matrix is positive definite
#we want all of the eigenvalues to be positive
eigen(sigma.matrix)
#make the data randomly from 2 populations
set.seed(1000)
gaussian1 <- mvrnorm(n = 200, mu=mu1, Sigma = sigma.matrix)
gaussian4 <- mvrnorm(n = 200, mu=mu4, Sigma = sigma.matrix)
my.gaussian.data <- rbind(gaussian1,gaussian4) #400*2
head(my.gaussian.data)
plot(my.gaussian.data)
#go to line 124 for scalable NJW
#####Original NJW#####
#firstly apply L2 normalization for A(cos similiarty)
L2.Normalization=function(x){
matrix.normalization=normalize.rows(x)
return(matrix.normalization)
}
#step1: compute W matrix W=A %*% t(A)
W.matrix<-function(x){
w=tcrossprod(x)
w[which(w<0)]=0
return(w)
}
#step2: compute D
D.matrix.original<-function(x,alpha){
n1=nrow(x)
D=rowSums(x)
labels=rep(T,n1)
if(alpha>0){
D.keep=D[-order(D)[1:round(n1*alpha)]]
D.out=D.keep
A.out=x[-order(D)[1:round(n1*alpha)],]
labels[order(D)[1:round(n1*alpha)]]=F
}else{
A.out=x
D.out=D}
return(list(D=D.out,A=A.out,label=labels))
}
#setp3: compute W_tilda
W_tilda<-function(W,D){
D.sqrt=as.numeric(D**(-0.5))
w_tilda<-Diagonal(x=D.sqrt) %*% W %*% Diagonal(x=D.sqrt)
return(w_tilda)
}
#step4:eigen decomposition to w_tilda compute U
U_matrix<-function(x,k){
u=eigs(x,k,which = "LM")$vectors
return(u)
}
#step5: Uk row normalizations
#L2 normalization
k=2
new.U=U[,1:k]
v=L2.normalization(new.U)
#original NJW function
original.NJW<-function(A, alpha,k){
A.nor<-L2.Normalization(A)
W=W.matrix(A.nor)
D=D.matrix.original(W,alpha)$D
W_tilda=W_tilda(W,D)
U=U_matrix(W_tilda,k)
V=L2.Normalization(U)
cluster<- kmeans(x = V, centers = k, iter.max = 10, nstart = 10)$cluster
return(cluster)
}
t1=proc.time()
a<-original.NJW(usps_data,0,10)
proc.time()-t1
accuracy(usps_labels,a)
###Scalable NJW########
#step0: normalize A
library(Matrix)
library(wordspace)
#L2.Normalization=function(x){
# row.norm=apply(x,1,norm,type="2")
# matrix.normalization=x/row.norm
# return(matrix.normalization)
#}
x=usps_data
L2.Normalization=function(x){
matrix.normalization=normalize.rows(x)
return(matrix.normalization)
}
q<-L2.Normalization(x)
#step1: calculate D
D.matrix=function(x,alpha){
n1=nrow(x)
one=matrix(1,nrow = n1)
D=x%*%(t(x)%*%one)-one
labels=rep(T,n1)
if(alpha>0){
D.keep=D[-order(D)[1:round(n1*alpha)]]
D.out=D.keep
A.out=x[-order(D)[1:round(n1*alpha)],]
labels[order(D)[1:round(n1*alpha)]]=F
}else{
A.out=x
D.out=D}
return(list(D=D.out,A=A.out,label=labels))
}
D<-D.matrix(q,0.01)$D
plot(sort(1/D,decreasing=T),col=ifelse(a>0.00028,'red','blue'),xlab='',ylab='',xaxs='i')
axis(1,at=seq(0,9000,by=1000))
a=sort(1/D,decreasing=T)
summary(a)
A<-D.matrix(q,0)$A
a<-D.matrix(usps_data,0)$D
a<-a^(-1)
plot(sort(a))
#Step 2: calculate A_tilda
A.tilda=function(D,A){
D.sqrt=as.numeric(D**-0.5)
A.tilda=Diagonal(x=D.sqrt) %*% A
return(A.tilda)
}
#a<-A.tilda(D,A)
#install.packages("RSpectra")
library(RSpectra)
#step 3: Apply SVD on A
A.svd<-function(A,k){
out=svds(A,k)
return(out$u)
}
#accurarcy: from group2
library(clue)
accuracy <- function(truelabels, clusters) {
#Hungarian Algorithm
# labels from cluster A will be matched on the labels from cluster B
minWeightBipartiteMatching <- function(clusteringA, clusteringB) {
require(clue)
idsA <- unique(clusteringA) # distinct cluster ids in a
idsB <- unique(clusteringB) # distinct cluster ids in b
nA <- length(clusteringA) # number of instances in a
nB <- length(clusteringB) # number of instances in b
if (length(idsA) != length(idsB) || nA != nB) {
stop("number of clusters do not match")
}
nC <- length(idsA)
tupel <- c(1:nA)
# computing the assignment matrix
assignmentMatrix <- matrix(rep(-1, nC * nC), nrow = nC)
for (i in 1:nC) {
tupelClusterI <- tupel[clusteringA == i]
solRowI <- sapply(1:nC, function(i, clusterIDsB, tupelA_I) {
nA_I <- length(tupelA_I) # number of elements in cluster I
tupelB_I <- tupel[clusterIDsB == i]
nB_I <- length(tupelB_I)
nTupelIntersect <- length(intersect(tupelA_I, tupelB_I))
return((nA_I - nTupelIntersect) + (nB_I - nTupelIntersect))
}, clusteringB, tupelClusterI)
assignmentMatrix[i, ] <- solRowI
}
# optimization
result <- solve_LSAP(assignmentMatrix, maximum = FALSE)
attr(result, "assignmentMatrix") <- assignmentMatrix
return(result)
}
test <- minWeightBipartiteMatching(clusters, truelabels)
predicted = NULL;
predicted <- rep(NA, length(clusters))
for(i in 1:length(test)) {
predicted[which(clusters == i)] <- test[i]
}
table <- table(predicted, truelabels)
accuracy <- (sum(diag(table))/length(truelabels))
classaccuracy <- vector()
colsums <- colSums(table)
for(i in 1:length(test)) {
classaccuracy[i] <- table[i,i]/colsums[i]
}
return(list("accuracy" = accuracy, "classaccuracy" = classaccuracy, "table" = table,
"mapping" = test, "mappedlabels" = predicted))
}
#combine everything together
sca.NJW<-function(A,alpha,k){
A.nor=L2.Normalization(A)
D=D.matrix(A.nor,alpha)$D
A=D.matrix(A.nor,alpha)$A
Atilda=A.tilda(D,A)
U=A.svd(Atilda,k)
v=L2.Normalization(U)
set.seed(1000)
Kmeans=kmeans(x = v, centers = k, iter.max = 100, nstart = 10)
Cluster=Kmeans$cluster
return(Cluster)
}
#obtaining outlier label
outlier.function<-function(data,alpha){
data.nor=L2.Normalization(data)
label=D.matrix(data.nor,alpha)$label
return(label)
}
#check results on toy data
predicted_cluster=sca.NJW(my.gaussian.data,0,2)
true_label=c(rep(1,200),rep(2,200))
accuracy(predicted_cluster,true_label)
plot(my.gaussian.data,col=true_label+1,pch=true_label+1)
#simulation of more data
gaussian.data<-function(n){
new.gaussian1<-rep(gaussian1,n)
new.gaussian4<-rep(gaussian4,n)
my.gaussian.data <- cbind(new.gaussian1,new.gaussian4)
return(my.gaussian.data)
} #400*2
#head(gaussian.data(2))
#dim(gaussian.data(2))
i=1
data.list<-list()
while(i<=10){
data.list[[i]]<-gaussian.data(i)
i=i+1
}
str(data.list)
running.time<-function(x){
elapse.time=NULL
for(i in 1:x){
elapse.time[i]=system.time(sca.NJW(data.list[[i]],0,2))[2]
}
return(elapse.time)
}
#try with 400-4000
time.vector=running.time(10)
n=400*c(1:10)
plot(n,time.vector,type="o",xlab="sample size",ylab="time",
main="Running time")
#usps data without remove outliers
#install.packages("R.matlab")
setwd("~/Dropbox/sjsu/SPRING 2018/MATH 203/data")
library(R.matlab)
usps_mat <- readMat("usps.mat")
usps_data <- usps_mat$fea
dim(usps_data)
usps_labels <- usps_mat$gnd
length(usps_labels)
table(usps_labels)
#without removing outliers
predicted_cluster.original<-sca.NJW(usps_data,0,10)
accuracy(usps_labels,predicted_cluster.original)
#69.26%
#usps with 10% outlier removed
data.nor=L2.Normalization(usps_data)
data.outlier.removed=D.matrix(data.nor,0.02)$A
label=D.matrix(data.nor,0.02)$label
#obtaining outlier label
outlier.function<-function(data,alpha){
data.nor=L2.Normalization(data)
label=D.matrix(data.nor,alpha)$label
return(label)
}
outlier.label<-outlier.function(usps_data,0.02)
table(outlier.label)
predicted_clusters.10per=sca.NJW(usps_data,0.02,10)
accuracy(usps_labels[outlier.label],predicted_clusters.10per)
#accuracy=0.7247
a=rep(0,10)
for(i in 1:11){
outlier.percent<-seq(0,0.1,by=0.01)
data.nor=L2.Normalization(usps_data)
label=D.matrix(data.nor,outlier.percent[i])$label
predicted_clusters=sca.NJW(usps_data,outlier.percent[i],10)
a[i]<-accuracy(usps_labels[label],predicted_clusters)$accuracy
}
a.vector<-unlist(a,use.names = FALSE)
outlier.percent<-seq(0,0.1,by=0.01)
plot(outlier.percent,a.vector,type='o',ylab = 'Accuracy',
main = "Outlier Removal with USPS Data",yaxt='n')
axis(2,at=pretty(a.vector),lab=pretty(a.vector)*100,las=TRUE)
#KNN with outliers
#install.packages('class')
library(class)
#try 2% outliers first
data.nor=L2.Normalization(usps_data)
dim(data.nor)
fitted.data<-D.matrix(data.nor,0.02)$A #train
dim(fitted.data)
outlier.label<-outlier.function(usps_data,0.02)
length(outlier.label)
test<-data.nor[which(outlier.label==FALSE),]
dim(test)
predicted_clusters.10per=sca.NJW(usps_data,0.02,10)
#method 1
knn.cluster<-knn(fitted.data, test, predicted_clusters.10per, k = 25, prob=F)
table(knn.cluster)
length(knn.cluster)
#Check accuracy again
y<-c(predicted_clusters.10per,knn.cluster)
usps_labels[outlier.label]
usps_labels[which(outlier.label==FALSE)]
y.true<-c(usps_labels[outlier.label],usps_labels[which(outlier.label==FALSE)])
#length(usps_labels[which(outlier.label==FALSE)])
accuracy(y.true,y)$accuracy
#run this with k=1-30
a=rep(0,35)
for(i in 1:35){
knn.cluster<-knn(fitted.data, test, predicted_clusters.10per, k = i, prob=F)
y<-c(predicted_clusters.10per,knn.cluster)
y.true<-c(usps_labels[outlier.label],usps_labels[which(outlier.label==FALSE)])
a[i]=accuracy(y.true,y)$accuracy}
a
plot(seq(1:35),a,xlab='Number of neighbors',ylab='Accuracy',yaxt='n',
main='Different k with 2% Outlier')
axis(2,at=pretty(a),lab=pretty(a)*100,las=TRUE)
abline(v=33,col='red')
a
max(a)
#experiment with document datasets
#reuters dataset#####
reuters_mat <- readMat("reuters.mat")
reuters_data <- reuters_mat$A
#library(Matrix)
#reuters_data<-Matrix(reuters_data,sparse = TRUE)
dim(reuters_data)
reuters_labels <- reuters_mat$y
length(reuters_labels)
table(reuters_labels)
#number of class=30
r.label<-ifelse(reuters_labels<=30,TRUE,FALSE)
table(r.label)
reuters_data.new<-reuters_data[which(r.label==TRUE),]
dim(reuters_data.new) #8067
reuters_labels.new<-reuters_labels[r.label]
length(reuters_labels.new) #8067
table(reuters_labels.new)
#without removing outliers
t1=proc.time()
cluster.reuters<-sca.NJW(reuters_data.new,0,30)
length(cluster.reuters)
proc.time()-t1
table(cluster.reuters)
accuracy(reuters_labels.new,cluster.reuters)
length(reuters_labels.new)
#loop with different number of clusters
reuter.accuracy<-rep(0,30)
for(i in 1:30){
r.label<-ifelse(reuters_labels<=i,TRUE,FALSE)
reuters_data.new<-reuters_data[which(r.label==TRUE),]
reuters_labels.new<-reuters_labels[r.label]
cluster.reuters<-sca.NJW(reuters_data.new,0,i)
reuter.accuracy[i]<-accuracy(reuters_labels.new,cluster.reuters)$accuracy
}
plot(seq(1:30),y=reuter.accuracy,type = "o",main='Accuracy for Different Clusters',
ylab='Accuracy(Reuters)',xlab="Number of Clusters",yaxt='n')
axis(2,at=pretty(a),lab=pretty(a)*100,las=TRUE)
save(reuter.accuracy,file = 'reuter with 30 clusters.rdata')
#looping with different percent of outliers
#without KNN
reuters.outlier=rep(0,10)
for(i in 1:11){
outlier.percent<-seq(0,0.1,by=0.01)
data.nor=L2.Normalization(reuters_data.new)
label=D.matrix(data.nor,outlier.percent[i])$label
predicted_clusters=sca.NJW(reuters_data.new,outlier.percent[i],30)
reuters.outlier[i]<-accuracy(reuters_labels.new[label],predicted_clusters)$accuracy
}
reuters.outlier
save(reuters.outlier,file = 'reuters.outliers.without KNN.rdata')
#with KNN
reuters.KNN.accuracy=rep(0,10)
library(class)
for(i in 1:11){
outlier.percent<-seq(0,0.1,by=0.01)
data.nor=L2.Normalization(reuters_data.new)
fitted.data<-D.matrix(data.nor,outlier.percent[i])$A #train
outlier.label<-outlier.function(reuters_data.new,outlier.percent[i])
test<-data.nor[which(outlier.label==FALSE),]
predicted_clusters<-sca.NJW(reuters_data.new,outlier.percent[i],30)
knn.cluster<-knn(fitted.data, test, predicted_clusters, k = 10, prob=F)
y<-c(predicted_clusters,knn.cluster)
y.true<-c(reuters_labels.new[outlier.label],reuters_labels.new[which(outlier.label==FALSE)])
reuters.KNN.accuracy[i]<-accuracy(y.true,y)$accuracy
}
length(predicted_clusters)
dim(fitted.data)
#TDT2 dataset####
TDT2_mat <- readMat("TDT2.mat")
TDT2_data <- TDT2_mat$A
#library(Matrix)
TDT2_data<-Matrix(TDT2_data,sparse = TRUE)
dim(TDT2_data)
TDT2_labels <- TDT2_mat$y
length(TDT2_labels)
table(TDT2_labels)
cluster.TDT2<-sca.NJW(TDT2_data,0,30)
table(cluster.TDT2)
accuracy(TDT2_labels,cluster.TDT2)
#plot
library(ggplot2)
#20newsgroup
newgroup_mat<-readMat("news20.mat")
newsgroup_data <- newgroup_mat$A
dim(newsgroup_data)
newsgroup_labels <- newgroup_mat$y
length(newsgroup_labels)
table(newsgroup_labels)
#single run
set.seed(1000)
newsgroup.cluster<-sca.NJW(newsgroup_data,0,20)
accuracy(newsgroup_labels,newsgroup.cluster)$accuracy
accuracy(newsgroup_labels[outlier.function(newsgroup_data,0.01)],newsgroup.cluster)
#with loops for different alpha
newsgroup.accuracy=NULL
for(i in 1:10){
alpha=seq(0.01,0.1,by=0.01)
newsgroup.cluster<-sca.NJW(newsgroup_data,alpha[i],20)
newsgroup.accuracy[i]=accuracy(newsgroup_labels[outlier.function(newsgroup_data,alpha[i])],newsgroup.cluster)$accuracy
}
alpha=seq(0.01,0.1,by=0.01)
plot(alpha,newsgroup.accuracy,type = 'b',xaxt='n',yaxt='n',ylab='Accuracy',xlab='Outlier Percentage')
axis(2,at=pretty(unlist(newsgroup.accuracy)),lab=pretty(unlist(newsgroup.accuracy))*100,las=TRUE)
axis(1,at=pretty(alpha),lab=pretty(alpha)*100)
#with KNN too slow so just give up on this method
library(class)
news.data<-L2.Normalization(newsgroup_data)
train<-news.data[outlier.function(newsgroup_data,alpha[i]),]
test<-news.data[-outlier.function(newsgroup_data,alpha[i]),]
clusters<-sca.NJW(news.data,alpha[i],20)
knn.cluster<-knn(train=train,test=test,cl=clusters,k=3,prob=FALSE)
#KNN with cluster 2nd try give up with too slow KNN function
knn.accuracy=NULL
alpha=seq(0.01,0.1,by=0.01)
center=Matrix(0,nrow=20,ncol=55570)
for(j in 1:10){
newsgroup.cluster<-sca.NJW(newsgroup_data,alpha[j],20)
outlier.label<-outlier.function(newsgroup_data,alpha[j])
newsgroup.data.withoutlier<-newsgroup_data[outlier.label,]
for(i in 1:20){
center[i,]<-apply(newsgroup.data.withoutlier[which(newsgroup.cluster==i),],2,mean)}
test<-newsgroup_data[which(outlier.function(newsgroup_data,alpha[j])==FALSE),]
knn.cluster<-knn(train=center,test=test,cl=seq(1,20),k=1,prob=FALSE)
total.cluster<-c(newsgroup.cluster,knn.cluster)
true.labels<-c(newsgroup_labels[which(outlier.label==TRUE)],
newsgroup_labels[which(outlier.label==FALSE)])
knn.accuracy[j]<-accuracy(true.labels,total.cluster)$accuracy
}
knn.accuracy
save(knn.accuracy,file='knn.accuracy.rdata')
plot(alpha,newsgroup.accuracy,type = 'b',xaxt='n',yaxt='n',col='red',pch=5,
ylab='Accuracy',xlab='Outlier Percentage',main='Newsgroup Accuracy with Outliers')
axis(2,at=pretty(unlist(newsgroup.accuracy)),lab=pretty(unlist(newsgroup.accuracy))*100,las=TRUE)
axis(1,at=pretty(alpha),lab=pretty(alpha)*100)
lines(alpha,knn.accuracy,type='b',col='blue',pch=8)
legend('topleft',c('Without KNN','With KNN'),cex=0.5,col=c('red','blue'),lty=c(1,1))
#Mnist
Mnist_mat <- readMat("mnist.mat")
Mnist.data<-Mnist_mat$fea
dim(Mnist_mat$fea)
#pendigits
library(rrr)
data("pendigits")
dim(pendigits)
pend_mat <- readMat("pend.mat")
pend.data<-pend_mat$fea
dim(pend.data)
library(mlbench)
a<-Mnist.data[which(Mnist.data==0)]
dim(Mnist.data)
length(a)/(70000*784)
mnist.data<-Matrix(Mnist.data,sparse = T)
class(mnist.data)
|
1c00cb0b90b6bca88d3952d908970d39e219d4ec
|
9f79de70350e18beefe71c197ef274ac5c6d193f
|
/R/indentify_chol.R
|
6ad5f83ce11ac214f4a6df029fe677291857f3d7
|
[
"MIT"
] |
permissive
|
pat-alt/deepvars
|
2e1f2d6c5cdbab4ac0b8e9e31d6170b38c30e7c4
|
cba45c14228767e24afcaad1d3ed735fb876c1dd
|
refs/heads/master
| 2023-05-22T17:03:59.242706
| 2022-06-10T12:13:38
| 2022-06-10T12:13:38
| 249,440,715
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
indentify_chol.R
|
#' identify_chol
#'
#' @param Sigma_res
#'
#' @return
#' @export
#'
#' @description Computes coeffcient matrix capturing contemporaneous effects through short-run restriction using Cholesky decompostion.
identify_chol = function (Sigma_res) {
# Choleski decomposition - lower triangular:
P = t(chol.default(Sigma_res)) # lower triangular Cholesky factor
B_0 = solve(P)
# B_0 %*% Sigma_res %*% t(B_0) # can verify that this is equal to the identity matrix (i.e. structural shocks are orthogonal/uncorrelated)
return(B_0)
}
|
1d86cbac97095125951dc9e4b7da1c9f0b6ac1ac
|
71563f005b965ee07fa0c142db0442b76da8c1cd
|
/plot4.R
|
c6505f66ab83f3c5c1c7706266b1691087b6a5de
|
[] |
no_license
|
albemlee/ExData_Plotting1
|
09a5dd46af316a592b426357b9f73168e0097f65
|
e2de52de7dfc16de9ec10bf7185e6d5d7e56e00e
|
refs/heads/master
| 2021-01-18T09:54:23.249864
| 2014-06-08T15:04:05
| 2014-06-08T15:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,538
|
r
|
plot4.R
|
## Calculating memory required and loading the Data
MemoryRequired <- object.size("household_power_consumption.txt")
data <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?")
# Subset data to only include needed dates
data <- data[xor(data[,"Date"] == "1/2/2007",
data[,"Date"] == "2/2/2007"),]
## Convert Date and Time columns to correct types
DateTime <- strptime(paste(data[,"Date"], data[,"Time"], sep = " "),
format = "%d/%m/%Y %H:%M:%S")
data <- cbind(DateTime, data)
## Creating 4 different scatter plots (Global Active Power, Voltage,
## Energy Sub Metering, Global Reactive Power) and printing it on png
## file named "plot4.png"
png(filename = "plot4.png")
par(mfrow = c(2,2))
plot(data[,"DateTime"], data[,"Global_active_power"], type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
plot(data[,"DateTime"], data[,"Voltage"], type = "l",
ylab = "Voltage", xlab = "datetime")
plot(data[,"DateTime"], data[,"Sub_metering_1"], type = "l", col = "black",
xlab = "", ylab = "Energy sub metering")
lines(data[,"DateTime"], data[,"Sub_metering_2"], type = "l", col = "red")
lines(data[,"DateTime"], data[,"Sub_metering_3"], type = "l", col = "blue")
legend("topright",legend=c("sub_metering_1","sub_metering_2","sub_metering_3"),
col=c("black","red","blue"), lty = 1)
plot(data[,"DateTime"], data[,"Global_reactive_power"], type = "l",
ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
d4f7d3381be86ec9988ca2ab39785e12569efaba
|
d7b7827edc7ba5c22c1c0efd635a9b7e2fd6e14b
|
/R/test.NI.R
|
0a3eb693a0b7846cc0d9dcac1202c6c30498fe58
|
[] |
no_license
|
Matteo21Q/dani
|
76b9c2c77a3d209e64c459275eb6a83ff2122cea
|
f895d6e8c6d09dd2a547458b9c410e8ed8772545
|
refs/heads/master
| 2022-06-25T08:57:58.336131
| 2022-06-15T20:55:02
| 2022-06-15T20:55:02
| 271,560,982
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,668
|
r
|
test.NI.R
|
test.NI <- function(n0, n1, e0, e1, NI.margin, sig.level=0.025, scale="RD",
print.out=TRUE, unfavourable=TRUE, test.type="Wald",
M.boot=2000, BB.adj=0.0001) {
stopifnot(is.numeric(n0), n0>0)
stopifnot(is.numeric(n1), n1>0)
stopifnot(is.numeric(e0), is.numeric(e1))
stopifnot(is.numeric(NI.margin))
stopifnot(is.numeric(sig.level), sig.level < 1, sig.level > 0)
stopifnot(is.character(scale),(( scale == "RD" ) || ( scale == "RR" ) || ( scale == "OR" ) || ( scale == "AS" )))
stopifnot(is.logical(print.out), !is.na(print.out))
stopifnot(is.character(test.type), ((test.type == "Wald") || (test.type == "Wald.cc") ||
(test.type=="Hauck.Anderson") || (test.type=="Gart.Nam") ||
(test.type == "Newcombe10") || (test.type == "Newcombe11") ||
(test.type == "Haldane") || (test.type == "Jeffreys.Perks") ||
(test.type == "Agresti.Caffo") || (test.type == "Miettinen.Nurminen") ||
(test.type == "Farrington.Manning") || (test.type == "logistic") ||
(test.type == "bootstrap") || (test.type=="Agresti.Min") ||
(test.type == "Brown.Li.Jeffreys") || (test.type=="Chan.Zhang") ||
(test.type == "BLNM") ||(test.type == "Mee") ||
(test.type == "midp") || (test.type=="Berger.Boos") ||
(test.type == "MUE.Lin") || (test.type == "MUE.parametric.bootstrap")))
stopifnot(is.logical(unfavourable), !is.na(unfavourable))
if (scale=="RD") {
if ((unfavourable == T)&&(NI.margin<=0)) stop("When outcome is unfavourable, a NI margin on the risk difference scale needs to be positive.")
if ((unfavourable == F)&&(NI.margin>=0)) stop("When outcome is favourable, a NI margin on the risk difference scale needs to be negative.")
NIm<- NI.margin
se <- se.n <- sqrt(e1/n1*(1-e1/n1)/n1+e0/n0*(1-e0/n0)/n0)
estimate <- estimate.n <- e1/n1-e0/n0
Z <- (estimate - NIm)/se
if (test.type=="Wald") {
CI <- c(estimate-qnorm(1-sig.level)*se,estimate+qnorm(1-sig.level)*se)
} else if (test.type=="Wald.cc") {
Z<-Z+(1/n0+1/n1)/2*(1-2*(unfavourable==F))
CI <- c(estimate-(qnorm(1-sig.level)*se+(1/n0+1/n1)/2),
estimate+(qnorm(1-sig.level)*se+(1/n0+1/n1)/2))
} else if (test.type=="Newcombe10") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "score")[2:3]
} else if (test.type=="Newcombe11") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "scorecc")[2:3]
} else if (test.type=="Gart.Nam") {
CI <- scasci(e1,n1,e0,n0, level=(1-sig.level*2))$estimates[c(1,3)]
} else if (test.type=="Agresti.Caffo") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "ac")[2:3]
} else if (test.type=="Haldane") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "hal")[2:3]
} else if (test.type=="Hauck.Anderson") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "ha")[2:3]
} else if (test.type=="Jeffreys.Perks") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "jp")[2:3]
} else if (test.type=="Miettinen.Nurminen") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "mn")[2:3]
} else if (test.type=="Mee") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "mee")[2:3]
} else if (test.type=="Farrington.Manning") {
CI <- farrington.manning(c(rep(TRUE, e1), rep(FALSE, n1-e1)),c(rep(TRUE, e0), rep(FALSE, n0-e0)), delta=NI.margin, alpha = sig.level, alternative = "greater")$conf.int
} else if (test.type=="Brown.Li.Jeffreys") {
CI <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "blj")[2:3]
} else if (test.type=="BLNM") {
CI1 <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "blj")[2:3]
CI2 <- BinomDiffCI(e1, n1, e0, n0, conf.level = (1-sig.level*2), method = "mn")[2:3]
CI <- CI1/3 + 2 * CI2/3
} else if (test.type=="logistic") {
y<-c(rep(1, e1),rep(0, n1-e1),rep(1, e0), rep(0, n0-e0))
treat<-factor(c(rep(1,n1), rep(0, n0)))
dd<-data.frame(y,treat)
fit<-glm(y~treat, data=dd, family = binomial)
fit.std <- summary(marginaleffects(fit))
CI <- as.numeric(fit.std[7:8])
} else if ( test.type == "bootstrap") {
y<-c(rep(1, e1),rep(0, n1-e1),rep(1, e0), rep(0, n0-e0))
treat<-factor(c(rep(1,n1), rep(0, n0)))
dd<-data.frame(y,treat)
rdif <- function(dat, indices) {
d <- dat[indices,] # allows boot to select sample
rd <- mean(d[d[,2]==1,1]) - mean(d[d[,2]==0,1])
return(rd)
}
res.b<-boot(dd, rdif, R=M.boot)
CI<-boot.ci(res.b, type="perc")$percent[4:5]
} else if (test.type == "Agresti.Min") {
fit<-uncondExact2x2(e0,n0,e1,n1, method="score", tsmethod = "square", conf.int = T)
CI <- as.numeric(fit$conf.int)
} else if (test.type == "Chan.Zhang") {
fit<-uncondExact2x2(e0,n0,e1,n1, method="score", tsmethod = "central", conf.int = T)
CI <- as.numeric(fit$conf.int)
} else if (test.type == "midp") {
fit<-uncondExact2x2(e0,n0,e1,n1, method="score", tsmethod = "square", midp=T, conf.int = T)
CI <- as.numeric(fit$conf.int)
} else if (test.type == "Berger.Boos") {
fit<-uncondExact2x2(e0,n0,e1,n1, method="score", tsmethod = "square", gamma=BB.adj, conf.int = T)
CI <- as.numeric(fit$conf.int)
} else if (test.type == "MUE.Lin") {
p0.tilde<-ifelse(((e0>0)&&(e0<n0)),(qbeta(0.5,e0,n0-e0+1)+qbeta(0.5,e0+1,n0-e0))/2,
ifelse(e0==0,(1-0.5^(1/n0))/2,(1+0.5^(1/n0))/2))
p1.tilde<-ifelse(((e1>0)&&(e1<n1)),(qbeta(0.5,e1,n1-e1+1)+qbeta(0.5,e1+1,n1-e1))/2,
ifelse(e1==0,(1-0.5^(1/n1))/2,(1+0.5^(1/n1))/2))
probs.mat<-matrix(NA,(n0+1)*(n1+1),7)
kk=1
for (ii in 0:n0) {
for (jj in 0:n1) {
probs.mat[kk,6]<-ii
probs.mat[kk,7]<-jj
probs.mat[kk,2]<-ifelse(((ii>0)&&(ii<n0)),(qbeta(0.5,ii,n0-ii+1)+qbeta(0.5,ii+1,n0-ii))/2,
ifelse(ii==0,(1-0.5^(1/n0))/2,(1+0.5^(1/n0))/2))
probs.mat[kk,3]<-ifelse(((jj>0)&&(jj<n1)),(qbeta(0.5,jj,n1-jj+1)+qbeta(0.5,jj+1,n1-jj))/2,
ifelse(jj==0,(1-0.5^(1/n1))/2,(1+0.5^(1/n1))/2))
probs.mat[kk,4]<-probs.mat[kk,3]-probs.mat[kk,2]
probs.mat[kk,1]<-choose(n0,ii)*choose(n1,jj)*p0.tilde^ii*p1.tilde^jj*(1-p0.tilde)^(n0-ii)*(1-p1.tilde)^(n1-jj)
kk=kk+1
}
}
probs.mat<-probs.mat[order(probs.mat[,4]),]
probs.mat[,5]<-cumsum(probs.mat[,1])
ci.l.l<-max(which(probs.mat[,5]<=sig.level))
ci.l.u<-min(which(probs.mat[,5]>=sig.level))
ci.l<-(probs.mat[ci.l.l,4]*(probs.mat[ci.l.u,5]-sig.level)+probs.mat[ci.l.u,4]*(-probs.mat[ci.l.l,5]+sig.level))/(probs.mat[ci.l.u,5]-probs.mat[ci.l.l,5])
ci.u.l<-max(which(probs.mat[,5]<=(1-sig.level)))
ci.u.u<-min(which(probs.mat[,5]>=(1-sig.level)))
ci.u<-(probs.mat[ci.u.l,4]*(probs.mat[ci.u.u,5]-(1-sig.level))+probs.mat[ci.u.u,4]*(-probs.mat[ci.u.l,5]+1-sig.level))/(probs.mat[ci.u.u,5]-probs.mat[ci.u.l,5])
CI<-c(ci.l,ci.u)
} else if (test.type == "MUE.parametric.bootstrap") {
p0.tilde<-ifelse(((e0>0)&&(e0<n0)),(qbeta(0.5,e0,n0-e0+1)+qbeta(0.5,e0+1,n0-e0))/2,
ifelse(e0==0,(1-0.5^(1/n0))/2,(1+0.5^(1/n0))/2))
p1.tilde<-ifelse(((e1>0)&&(e1<n1)),(qbeta(0.5,e1,n1-e1+1)+qbeta(0.5,e1+1,n1-e1))/2,
ifelse(e1==0,(1-0.5^(1/n1))/2,(1+0.5^(1/n1))/2))
y<-c(rep(1, e1),rep(0, n1-e1),rep(1, e0), rep(0, n0-e0))
treat<-factor(c(rep(1,n1), rep(0, n0)))
dd<-data.frame(y,treat)
rdif <- function(dat) {
p0.tilde.i<-ifelse(((as.numeric(dat[1,1])>0)&&(as.numeric(dat[1,1])<n0)),(qbeta(0.5,as.numeric(dat[1,1]),n0-as.numeric(dat[1,1])+1)+qbeta(0.5,as.numeric(dat[1,1])+1,n0-as.numeric(dat[1,1])))/2,
ifelse(as.numeric(dat[1,1])==0,(1-0.5^(1/n0))/2,(1+0.5^(1/n0))/2))
p1.tilde.i<-ifelse(((as.numeric(dat[1,2])>0)&&(as.numeric(dat[1,2])<n1)),(qbeta(0.5,as.numeric(dat[1,2]),n1-as.numeric(dat[1,2])+1)+qbeta(0.5,as.numeric(dat[1,2])+1,n1-as.numeric(dat[1,2])))/2,
ifelse(as.numeric(dat[1,2])==0,(1-0.5^(1/n1))/2,(1+0.5^(1/n1))/2))
rd <- p1.tilde.i-p0.tilde.i
return(rd)
}
rg <- function(dat, mle) {
# Function to generate random exponential variates.
# mle will contain the mean of the original data
out <- data.frame(matrix(NA,1,2))
out[1,1] <- rbinom(1,mle[2], mle[1])
out[1,2] <- rbinom(1,mle[4], mle[3])
out
}
res.b<-boot(dd, rdif, R = M.boot, sim = "parametric",
ran.gen = rg, mle = c(p0.tilde,n0,p1.tilde,n1))
CI<-boot.ci(res.b, type="perc")$percent[4:5]
}
estimate.n<-sum(CI)/2
se.n<-(CI[2]-CI[1])/(2*qnorm(1-sig.level))
Z <- ifelse(unfavourable==T,(estimate.n - NIm)/se.n,(-estimate.n + NIm)/se.n)
p <- pnorm(Z)
if (print.out==T) cat("Risk difference:\nMethod = ",test.type,",\n Estimate: ", estimate.n, "\nStandard error: ", se.n, "\nConfidence interval (Two-sided ", (1-sig.level*2)*100,"%): (", CI[1], ",", CI[2], ")\np-value:", p, ".\n" )
} else if (scale == "RR") {
if ((unfavourable == T)&&(NI.margin<=1)) stop("When outcome is unfavourable, a NI margin on the risk ratio scale needs to be >1.")
if ((unfavourable == F)&&(NI.margin>=1)) stop("When outcome is favourable, a NI margin on the risk ratio scale needs to be <1.")
NIm<- log(NI.margin)
se <- sqrt(1/e0-1/n0+1/e1-1/n1)
estimate <- log((e1/n1)/(e0/n0))
Z <- (estimate - NIm)/se
p <- pnorm(Z)
CI.norm <- CI <- exp(c(estimate-qnorm(1-sig.level)*se,estimate+qnorm(1-sig.level)*se))
if (print.out==T) cat("Risk ratio:\nEstimate: ", exp(estimate), "\nlog(RR):", estimate, "\nStandard error (log(RR)): ", se, "\nConfidence interval (one-sided ", sig.level*100,"%): (", CI[1], ",", CI[2], ")\np-value:", p, ".\n" )
} else if (scale == "AS") {
NIm<-NI.margin
if ((unfavourable == T)&&(NI.margin<=0)) stop("When outcome is unfavourable, a NI margin on the arc-sine difference scale needs to be >0.")
if ((unfavourable == F)&&(NI.margin>=0)) stop("When outcome is favourable, a NI margin on the arc-sine difference scale needs to be <0.")
se <- sqrt(1/(4*n0)+1/(4*n1))
estimate <- asin(sqrt(e1/n1))-asin(sqrt(e0/n0))
Z <- (estimate - NIm)/se
p <- pnorm(Z)
CI.norm <- CI <- c(estimate-qnorm(1-sig.level)*se,estimate+qnorm(1-sig.level)*se)
if (print.out==T) cat("Arc-sine difference:\nEstimate: ", estimate, "\nStandard error: ", se, "\nConfidence interval (two-sided ", sig.level*200,"%): (", CI[1], ",", CI[2], ")\np-value:", p, ".\n" )
} else if (scale == "OR") {
if ((unfavourable == T)&&(NI.margin<=1)) stop("When outcome is unfavourable, a NI margin on the odds ratio scale needs to be >1.")
if ((unfavourable == F)&&(NI.margin>=1)) stop("When outcome is favourable, a NI margin on the odds ratio scale needs to be <1.")
NIm<- log(NI.margin)
se <- sqrt(1/e0+1/(n0-e0)+1/e1+1/(n1-e1))
estimate <- log((e1/(n1-e1))/(e0/(n0-e0)))
Z <- (estimate - NIm)/se
p <- pnorm(Z)
CI.norm <- CI <- exp(c(estimate-qnorm(1-sig.level)*se,estimate+qnorm(1-sig.level)*se))
if (print.out==T) cat("Odds ratio:\nEstimate: ", exp(estimate), "\nlog(OR):", estimate, "\nStandard error (log(OR)): ", se, "\nConfidence interval (one-sided ", sig.level*100,"%): (", CI[1], ",", CI[2], ")\np-value:", p, ".\n" )
}
results <- list(estimate, se, Z, p, CI)
names(results)<-c("estimate", "se", "Z", "p", "CI")
return(results)
}
|
24989ec76a5a3b716ecbfd2ec4c1fc7692329188
|
be00ed3553040f7c7b5b752835ec61ad977ea143
|
/R_deduplication/deduplication/join_pairs_fathers_f.R
|
16567ed98b200635e66c900284308b4df0ca5cc7
|
[] |
no_license
|
nptodd/NamingForKinWW1
|
e3fc7520f05c2610426483e7906c9a2df65923ed
|
4980ab5c425786cf901afbba90548b90555c950d
|
refs/heads/master
| 2023-06-15T14:30:54.204290
| 2021-07-07T09:30:32
| 2021-07-07T09:30:32
| 270,934,769
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,550
|
r
|
join_pairs_fathers_f.R
|
join_pairs_fathers_f= function(strat_a, CUTOFF_a, diagnostic_a=F){
# test
# strat_a = strata_pairs[1]
# CUTOFF_a <- 6
# load results of deduplication procedure
result0 <- loadRLObject(strat_a)
pairs_all <- getPairs(result0, max.weight = Inf, min.weight = -Inf, single.rows=T)
setDT(pairs_all)
# load father's data
strat_name = gsub(".*/", "", strat_a)
strat_name = gsub(".RData", "", strat_name, fixed = T)
loc_fathers = fread(paste0("../data/stratified_melted/peres/", strat_name, ".csv"))
# creation of fraction. names
names_for_identification_f(loc_fathers, "name", soundex_a = F)
if(diagnostic_a){
pdf(paste0("../figs/diagnostic_classification/weights/", strat_name, ".pdf"))
hist(pairs_all[,Weight], main=strat_name, xlab="Weight" )
hist(pairs_all[Weight>0 & Weight<20,Weight],
main=paste0("Weights >0, ", strat_name),
xlab="Weight" )
dev.off()
}
#############
# Visual inspection
# pairs_inspect <- getPairs(result0, min.weight=4, max.weight=5, single.rows=T)
# setDT(pairs_inspect)
#
# var_see <- c("family_name", "name",
# "name_ancestor",
# "dep2", "lat2", "lon2", # geographical info
# "dob", "dod",
# "dob_f_y", "dod_ancestor_y") # paternal yob and yod
# X <- expand.grid(c(".1", ".2"), var_see)
# var_see <- c("Weight", paste0(X[[2]], X[[1]]) )
#
# View(pairs_inspect[, var_see, with=F])
#############
############# DIAGNOSTIC PLOT
if(diagnostic_a){
possible_weights <- seq(-10, 20, 0.5)
N_final_indiv <- rep(NA, length(possible_weights))
for(I in seq_along(possible_weights)){
table_liens = pairs_all[Weight > possible_weights[I], .(unique_id.1, unique_id.2)]
links <- graph_from_data_frame(d = table_liens, directed = F)
components_links <- components(links)
membership_dt <- data.table(unique_id = names(components_links$membership),
merged_id = paste0(strat_name, "_m" , components_links$membership) )
loc_loc_fathers <- merge(loc_fathers, membership_dt, by = c("unique_id"), all.x=T)
loc_loc_fathers[is.na(merged_id), merged_id:=unique_id]
N_final_indiv[I] <- length(unique(loc_loc_fathers[,merged_id]))
}
pdf(paste0("../figs/diagnostic_classification/cutoffs/", strat_name, ".pdf"))
plot(possible_weights, N_final_indiv/1e3, type="l", lwd=1.4,
xlab="Cut-off",
ylab="N final individuals (x1,000)",
axes=F)
mtext(strat_name, 3, 1, font = 2)
axis(1, at=5*c(-4: 12)); axis(2, las=2)
abline(v=CUTOFF_a, lty=2, lwd=1.4, col="red")
dev.off()
}
#############
table_liens = pairs_all[Weight > CUTOFF_a, .(unique_id.1, unique_id.2)]
# nombre de liens
table_liens[,.N]
# nombre de personnes impliquées dans des liens
# length(unique(c(table_liens[,"unique_id.1"], table_liens[,"unique_id.2"]) ))
# création du graphe et extraction des composantes connexes
# les 2 premières colonnes de table_liens doivent impérativement
# être les sommets de départ et d'arrivée de l'arrête
links <- graph_from_data_frame(d = table_liens, directed = F)
# description du graphe
# links
# plot(links)
# détermination du nombre de composantes connexes (individus uniques)
components_links <- components(links)
# nombre de composantes
# components_links$no
membership_dt <- data.table(unique_id = names(components_links$membership),
merged_id = paste0(strat_name, "_m" ,
components_links$membership) )
# ajout de la colonne merged_id, nouvel identifiant unique
loc_fathers <- merge(loc_fathers, membership_dt, by = c("unique_id"), all.x=T)
# les individus qui n'étaient impliqués dans aucun lien potentiel (et donc ne sont dans
# aucun lien final) doivent se voir attribuer un nouvel identifiant unique quand même !
loc_fathers[is.na(merged_id), merged_id:=unique_id]
# sauvegarde de la table de correspondance unique_id - merged_id
fwrite(x = loc_fathers[,.(unique_id, merged_id)],
file = paste0("../data/table_deduplication/table_", strat_name, ".csv") )
# voir les plus grosses composantes
# id_max <- membership_dt[, .(N_compo = .N), by=.(merged_id)][N_compo==max(N_compo), merged_id]
# View(loc_fathers[merged_id %in% id_max[1]])
##### élimination des pères doublonnés
# colonnes inutiles dans le jeu de données dédoublonné.
# NB : il est impératif de se fier aux colonnes de prénoms
# scindées par names_for_identification_f, plutôt qu'aux colonnes
# originales
loc_fathers[,`:=`(idtree=NULL, unique_id=NULL,
name=NULL, name_ancestor=NULL,
dob=NULL, dod=NULL,
dob_f=NULL, dod_ancestor=NULL,
dod_father_d=NULL, dod_father_m=NULL, dod_father_y=NULL,
ancestor=NULL, sex_ancestor=NULL, type_ancestor=NULL)]
# on retient la valeur la plus fréquente de chaque colonne (hors les NA et les "")
new_loc_fathers <- loc_fathers[, lapply(.SD, getmode_f) , by=.(merged_id)]
# ajout du nombre de contributeurs au nouvel individu
n_contrib <- loc_fathers[, .(n_contrib=.N),by=.(merged_id)]
new_loc_fathers <- merge(new_loc_fathers, n_contrib,
by=c("merged_id"), all.x=T)
# réduction du nombre d'individus due au dédoublonnage
# new_loc_fathers[,.N]
# loc_fathers[,.N]
setcolorder(new_loc_fathers,
c("stratum", "merged_id", "n_contrib", "family_name",
"name_c1", "name_c2", "name_c3",
"sex", "sex2",
"dob_y", "dob_m", "dob_d", # date of birth
"dod_y", "dod_m", "dod_d", # date of death
"place", "dep", "region", "lat","lon",
"dep2", "lon2", "lat2",
"ll_in_France", "dep_ll", "cas_geoloc",
"dob_f_y", "dob_f_m", "dob_f_d" # father's date of birth
))
# test_id <- sample(loc_fathers[,.N, by=.(merged_id)][N>1, merged_id], 1)
# View(rbindlist(list(new_loc_fathers[merged_id == test_id],
# loc_fathers[merged_id == test_id]),
# fill = T))
return(new_loc_fathers)
}
|
03c7810c315b585e15dfa391861768411ca39f58
|
f4c58fb1797080c5445fc062bfe23469a2ad4269
|
/ProjectOne.R
|
a9609d22d3e0d0230def28b73b7661910cf9ea1d
|
[] |
no_license
|
Shydhevi/DataMining
|
c832479c77a43b9224f8c9f490db210ae7f53f79
|
720600c5a56684ad0ca6eda157e0339cb42a123f
|
refs/heads/master
| 2022-11-24T05:45:12.748098
| 2020-07-20T04:42:28
| 2020-07-20T04:42:28
| 281,014,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,367
|
r
|
ProjectOne.R
|
#1. Examine the numeric variables for missing values. For five of the variables with missing values take appropriate action - eliminate variable, assign zero, average value, etc. Justify your decision.
library(tidyverse)
library(readxl)
library(knitr)
library(ggplot2)
CompustatCompustatdata <- read_excel("C:/Users/shydhevi/Documents/R/Datamining/ProjectOne/AAERDATAHWPart1.xlsx")
head(CompustatCompustatdata)
# find the number of missing values of variable CRIM
names(CompustatCompustatdata) # print a list of variables to the screen.
t(t(names(CompustatCompustatdata)))# print the list in a useful column format
# change the column's name for missing columns
colnames(CompustatCompustatdata)[1:9] <- c("GVKEY","DNUM","CNUM","SMBL","NAICS","FYR","CONAME","yeara","Year and CNUM") # change the first column's name
# Delete the first row
Compustatdata<-CompustatCompustatdata[-1,]
head(Compustatdata)
#Check the columns that has missing values
sapply(Compustatdata, class)
#view(Compustatdata)
Compustatdata[, c(10:98)] <- sapply(Compustatdata[, c(10:98)], as.numeric)
sapply(Compustatdata, class)
#1. Examine the numeric variables for missing values.
#Deleting Emply-RST
#The available values in Emply-RST is same as the Employees.
#And also Emply-RST has 12241 NA's and so we are dropping the variable
summary(Compustatdata$Employees)
summary(Compustatdata$`Emply-RST`)
Compustatdata <- select (Compustatdata,-c(`Emply-RST`))
#Deleting column Year and CNUM
#This is concatenation of columns CNUM and Year. Hence removing this variable
head(Compustatdata$`Year and CNUM`)
head(Compustatdata$GVKEY)
head(Compustatdata$yeara)
Compustatdata <- select (Compustatdata,-c(`Year and CNUM`))
# Replacing Missing values in dividents
#There are 74 missing values and the median is 0 so replacing NA's by 0s
summary(Compustatdata$Dividends)
Compustatdata$Dividends[is.na(Compustatdata$Dividends)] <- 0
summary(Compustatdata$Dividends)
# Removing the variable because of huge amount of missing data
summary(Compustatdata$WCChangTTL)
Compustatdata <- select (Compustatdata,-c(WCChangTTL))
#Replacing the Sales variable by average
Compustatdata$Sales[is.na(Compustatdata$Sales)] <- 0
summary(Compustatdata$Sales)
#2. Look for redundancies and errors.
# SMBL and CONAME means the same hence remove one
summary(Compustatdata$SMBL)
head(Compustatdata$CONAME)
Compustatdata <- select (Compustatdata,-c(SMBL))
#EPSexcRst and EPSbasic are same and redundant hence remove one
head(Compustatdata$EPSexcRst)
head(Compustatdata$EPSbasic)
Compustatdata <- select (Compustatdata,-c(EPSexcRst))
#NetIncome and NetIncomRST are the same. But NetIncomRST has a lot of missing values.
#Removing NetIncomRST
head(Compustatdata$NetIncome)
head(Compustatdata$NetIncomRST)
Compustatdata <- select (Compustatdata,-c(NetIncomRST))
#3. Identify mean, count, sum, median, standard deviation for - Sales, Price_close, Employees variables
Compustatdata %>% summarise(n = n_distinct(Sales),
sd = sd(Sales),
mean = mean(Sales),
med = median(Sales, na.rm = TRUE))
summary(Compustatdata$PriceClose)
Compustatdata %>% summarise(n = n_distinct(PriceClose),
sd = sd(PriceClose),
mean = mean(PriceClose),
med = median(PriceClose, na.rm = FALSE))
str(Compustatdata$Employees)
Compustatdata %>% summarise(n = n_distinct(Employees),
sd = sd(Employees),
mean = mean(Employees),
med = median(Employees, na.rm = TRUE))
#4. Identify outliers for - SALES, Price_close, Employees
boxplot(Compustatdata$Sales,outcol = "red", outcex = 1.5,main =" Sales")
boxplot(Compustatdata$PriceClose,outcol = "red", outcex = 1.5,main ="PriceClose")
boxplot(Compustatdata$Employees,outcol = "red", outcex = 1.5,main ="Employees")
#5. Calculate
# a. Skewness for Sales. Is there evidence of SKEWNESS?
# b. Skewness for Z-Score standardized SALES. s there evidence of SKEWNESS?
summary(Compustatdata$Sales)
hist(Compustatdata$Sales)
m <- mean(Compustatdata$Sales); s <- sd(Compustatdata$Sales)
z.weight <- (Compustatdata$Sales - m)/s
length(z.weight)
#Skewness
(3*(mean(Compustatdata$Sales) - median(Compustatdata$Sales)))/sd(Compustatdata$Sales)
(3*(mean(z.weight) - median(z.weight)))/sd(z.weight)
#6. Normal probability plots
#a. Construct a normal probability plot for Employees
#b. Derive a new variable - CASH/Total Assets
#c. Construct a normal probability plot for new Variable.
#6. Normal probability plots
# Transformations for Normality
summary(Compustatdata$Employees)
Compustatdata$Employees[is.na(Compustatdata$Employees)] <- 1
Compustatdata$Employees[Compustatdata$Employees == 0] <- 1
summary(Compustatdata$Employees)
sqrt.Employees <- sqrt(Compustatdata$Employees) # Square root
sqrt.Employees_skew <- (3*(mean(sqrt.Employees) - median(sqrt.Employees))) / sd(sqrt.Employees)
sqrt.Employees_skew
invsqrt.Employees <- 1 / sqrt(Compustatdata$Employees)
invsqrt.Employees_skew <- (3*(mean(invsqrt.Employees) - median(invsqrt.Employees))) /sd(invsqrt.Employees) # Histogram with Normal Distribution Overlay
par(mfrow=c(1,1))
invsqrt.Employees_skew
x <- rnorm(1000000, mean = mean (invsqrt.Employees), sd = sd(invsqrt.Employees))
hist(invsqrt.Employees,breaks=30,col = "lightblue",
probability = "true",border = "black",xlab = "in sql rt of wt",ylab="counts",main = "histogram")
box(which = "plot", lty = "solid", col="black")
# Overlay with Normal density
lines(density(x), col = "red")
#a. Construct a normal probability plot for Employees
qqnorm(invsqrt.Employees , datax = TRUE, col = "red",
main = "Normal Q-Q Plot of Employees")
qqline(invsqrt.Employees , col = "blue", datax = TRUE)
#b. Derive a new variable - CASH/Total Assets
Compustatdata$cashbyAsset <- Compustatdata$CashShortTermInvestments/Compustatdata$TotalAssets
summary(Compustatdata$cashbyAsset)
#c. Construct a normal probability plot for new Variable.
sqrt.cashbyAsset <- sqrt(Compustatdata$cashbyAsset) # Square root
sqrt.cashbyAsset_skew <- (3*(mean(sqrt.cashbyAsset) - median(sqrt.cashbyAsset))) / sd(sqrt.cashbyAsset)
sqrt.cashbyAsset_skew
invsqrt.cashbyAsset <- 1 / sqrt(Compustatdata$cashbyAsset)
invsqrt.cashbyAsset_skew <- (3*(mean(invsqrt.cashbyAsset) - median(invsqrt.cashbyAsset))) /sd(invsqrt.cashbyAsset) # Histogram with Normal Distribution Overlay
par(mfrow=c(1,1))
invsqrt.cashbyAsset_skew
x <- rnorm(1000000, mean = mean (invsqrt.cashbyAsset), sd = sd(invsqrt.cashbyAsset))
hist(invsqrt.cashbyAsset,breaks=30,col = "lightblue",
probability = "true",border = "black",xlab = "in sql rt of wt",ylab="counts",main = "histogram")
box(which = "plot", lty = "solid", col="black")
# Overlay with Normal density
lines(density(x), col = "red")
#a. Construct a normal probability plot for cashbyAsset
qqnorm(invsqrt.cashbyAsset , datax = TRUE, col = "red",
main = "Normal Q-Q Plot of cashbyAsset")
qqline(invsqrt.cashbyAsset , col = "blue", datax = TRUE)
#Part 1.2
Compustatdatapart2 <- read_excel("C:/Users/shydhevi/Documents/R/Datamining/ProjectOne/AAERDATAHWPart2-1.xlsx")
#View(Compustatdatapart2)
#1. Ensure no missing values or errors. Address this using methods recommended. You will want to provide a short explanation for your choice.
sapply(Compustatdatapart2, class)
sum(is.na(Compustatdatapart2))
Compustatdatapart2[is.na(Compustatdatapart2)] <- 0
#2. Determine which variables are categorical and which are numeric?
str(Compustatdatapart2)
#3. Standardize the data where relevant - ie you don't standardize GVKEY or SIC codes, year, or bktype
m <- mean(Compustatdatapart2$totval); s <- sd(Compustatdatapart2$totval)
z.weight <- (Compustatdatapart2$totval - m)/s
z.weight
length(Compustatdatapart2$totval)
# 4 Select two categorical variables and construct a bar chart - don't use GVKEY or CNUM as these are company identifiers
coname_freq <- table(Compustatdatapart2$CONAME)
barplot(coname_freq)
industry_freq <- table(Compustatdatapart2$industry)
barplot(industry_freq)
#5. Construct a histogram of three numeric variables with an overlay of bankruptcy variable (bktype)
library(ggplot2)
ggplot(data = Compustatdatapart2, aes(x=Compustatdatapart2$EBIt...66,fill=factor(Compustatdatapart2$bktype)))+
geom_histogram()+
scale_x_continuous("EBIt...66")+
scale_y_discrete("Count")+
guides(fill=guide_legend(title="bktype"))+
scale_fill_manual(values=c("blue","red"))
ggplot(data = Compustatdatapart2, aes(x=Compustatdatapart2$Avg_GR,fill=factor(Compustatdatapart2$bktype)))+
geom_histogram()+
scale_x_continuous("Avg_GR")+
scale_y_discrete("Count")+
guides(fill=guide_legend(title="bktype"))+
scale_fill_manual(values=c("blue","red"))
ggplot(data = Compustatdatapart2, aes(x=Compustatdatapart2$totval,fill=factor(Compustatdatapart2$bktype)))+
geom_histogram()+
scale_x_continuous("totval")+
scale_y_discrete("Count")+
guides(fill=guide_legend(title="bktype"))+
scale_fill_manual(values=c("blue","red"))
# 6 Conduct a correlation analysis of the "Data" variables - exclude the calculated variables
Compustatdatacor <- Compustatdatapart2 %>% select(DATA9...32,DATA12...33,
DATA24...34,
DATA25...35,
DATA34x...36,
DATA36x...37,
DATA58...38,
DATA60...39,
DATA85...40,
DATA172...41,
DATA178...42,
DATA179...43,
DATA181...44,
DATA216...45,
DATA1...47,
DATA4...48,
DATA5...49,
DATA6...50,
DATA8x...51,
DATA9...52,
DATA12...53,
DATA24...54,
DATA25...55,
DATA34x...56,
DATA36x...57,
DATA58...58,
DATA60...59,
DATA85...60,
DATA172...61,
DATA178...62,
DATA179...63,
DATA181...64,
DATA216...65)
Compustatdatacor.corr <- cor(Compustatdatacor)
Compustatdatacor.corr
summary(Compustatdatacor.corr)
library(corrplot)
corrplot(Compustatdatacor.corr, method = "circle")
#8.Examine the difference in PCA results if you use only the Data variables versus using the constructed (calculated variables).
pcsdata <- prcomp(Compustatdatacor)
summary(pcsdata)
Compustatcalc <- Compustatdatapart2 %>% select(`Total Debt`,
MVEquity,
`LOG(DEFLTA)=LOG(100*(TA/CPI))`,
`MVE+TL`,
`D60+D181`,
`prc*data25+data6-data60`,
`MF/BF`,
`P/E`,
`Tobin'sNew`,
`ME/BE`,
`MVE/TL`,
`WC/NETSALES`,
`CL/TA`,
`TD/TA`,
`cash/CL`,
`WC/TA`,
`CASH/TA`,
`CA/CL`,
`NETSALES/TA`,
`NETINC/TA`,
`NETINC/FA`,
`OPINC/TA`,
`RETERN/TA`,
`EBIT/TA`,
`CASH/NETSALES`,
`CA/NETSALE`,
`CA/TA`)
pcscalcdata <- prcomp(Compustatcalc)
summary(pcscalcdata)
pcscalcdata$rot[,1:6]
#9. Focus on the analysis using the calculated data. How many Principal components should you use? Explain
# I would use first 6 principal components since 90% of variabililty is contained in those variables.
#10 Plot the factor scores for the number of principal components you identified in #9
pairs(pcscalcdata$rot[,1:6],
labels = c("Component 6 Scores"))
|
55eab84523990f78d23b07a3e75e3d931e2f82f7
|
61180649c781ca23ee434754577acea001eb4bc0
|
/man/getunct.Rd
|
7e84b07fa7df31086c441a382615d410012ac56a
|
[] |
no_license
|
malexan/fclhs
|
87e0b4c9b86eb1c954644bbdb699699677d4163b
|
f93e69dd96bbd15bdbc68a5c52db5fe0dde3a0fa
|
refs/heads/master
| 2020-05-31T15:48:22.614152
| 2015-08-09T07:59:08
| 2015-08-09T07:59:08
| 27,025,796
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
rd
|
getunct.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/unsdapi.R
\name{getunct}
\alias{getunct}
\title{Get trading data via UNSD API.}
\usage{
getunct(area, year, partner = "all", flow = "all", code = "AG6",
compact = T, desc = F, debug = F)
}
\description{
Get trading data via UNSD API.
}
|
78a255a6a964a4d59b7e91871fed945b8c7daddb
|
ad292b23922e4d9fe84ca70b0146e12c01f4c1aa
|
/analysis.R
|
1cf22ace5adbdf7447b50df0947b03106d91dc64
|
[] |
no_license
|
zivepstein/datafest15
|
7f6dbe17fcf27be254bc72d76205a437ef76d441
|
f965b430acdc5030e10b085498622f63c706ebe6
|
refs/heads/master
| 2016-09-05T11:45:45.614388
| 2015-04-25T18:57:19
| 2015-04-25T18:57:19
| 33,838,704
| 1
| 0
| null | 2015-04-13T03:50:10
| 2015-04-12T23:49:07
|
R
|
UTF-8
|
R
| false
| false
| 2,820
|
r
|
analysis.R
|
library(dplyr)
library(igraph)
Same.Lead <- function(lead_one, lead_two) {
return(lead_one$make == lead_two$make && lead_one$model == lead_two$model)
}
leadss <- inner_join(visitor, leads, by = "visitor_key")[1:500]
adj_matrix <- matrix(0, ncol = 500,
nrow = 500)
for (i in 1:500) {
for (j in 1:500) {
if(i == j) {
next
} else if (Same.Lead(leadss[i], leadss[j])) {
adj_matrix[i, j] <- 1
}
}
}
g <- graph.adjacency(adj_matrix, mode = "undirected")
V(g)$label.cex <- 0.1
plot(g, vertex.size = 2)
# only 46 states represented, some obs. list state as "", filter those out
n_distinct(transactions$state_bought)
transactions %>%
filter(state_bought != "") %>%
group_by(state_bought) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
transactions %>%
group_by(dma_bought) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
# using comparator makes no real difference in price paid
inner_join(visitor, transactions, by = "visitor_key") %>%
group_by(comparator > 0) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
# do people who followed a paid search link pay more?
inner_join(visitor, transactions, by = "visitor_key") %>%
group_by(paid_agg_search_flag) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
inner_join(visitor, transactions, by = "visitor_key") %>%
group_by(free_agg_search_flag) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
inner_join(visitor, transactions, by = "visitor_key") %>%
group_by(paid_agg_search_flag) %>%
summarise(mean_cost = mean(na.omit(price_bought)), count = n()) %>%
arrange(desc(mean_cost))
q2 <- inner_join(transactions, leads, by = "visitor_key")
counter <- 0
for (key in q2$visitor_key) {
if (nrow(leads[visitor_key == key]) == 1) {
counter <- counter + 1
}
}
counter / nrow(q2)
makes <- c("Ford")
inner_join(visitor, transactions, by = "visitor_key") %>%
filter(make_bought == "Ford") %>%
group_by(model_bought) %>%
summarise(avg_num_ads_seen = mean(imp_ford), count = n()) %>%
filter(count > 30) %>%
arrange(desc(avg_num_ads_seen))
inner_join(visitor, transactions, by = "visitor_key") %>%
filter(make_bought == "Ford") %>%
group_by(model_bought) %>%
summarise(avg_num_ads_clicked = mean(clk_ford), count = n()) %>%
filter(count > 30) %>%
arrange(desc(avg_num_ads_clicked))
inner_join(visitor, transactions, by = "visitor_key") %>%
group_by(make_bought) %>%
filter(imp_scion > 0) %>%
summarise(avg_num_ads_seen = mean(clk_ford / imp_ford), count = n()) %>%
arrange(desc(avg_num_ads_seen))
|
237a265ed4629a5a6ba9c587ee42aa21662634c5
|
a0bc1a85ec9d49fc0afc2a9a78c247e31409364c
|
/man/hdsci.Rd
|
1189df870d845474f36fa0c8ff8094b6ac7055d5
|
[] |
no_license
|
linulysses/hdanova
|
2b0fd79ce0a45ab51607f438e49942fa9305729a
|
e0e13be70bd5ac7fd31d38927fdea2ed19f700aa
|
refs/heads/master
| 2023-03-22T14:20:09.092950
| 2021-02-11T07:14:56
| 2021-02-11T07:14:56
| 231,020,381
| 5
| 6
| null | 2021-03-14T15:58:25
| 2019-12-31T03:39:09
|
R
|
UTF-8
|
R
| false
| true
| 7,517
|
rd
|
hdsci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sci.R
\name{hdsci}
\alias{hdsci}
\title{Construct Simultaneous Confidence Interval}
\usage{
hdsci(
X,
alpha = 0.05,
side = "both",
tau = 1/(1 + exp(-0.8 * seq(-6, 5, by = 1))),
B = ceiling(50/alpha),
pairs = NULL,
Sig = NULL,
verbose = F,
tau.method = "MGB",
R = 10 * ceiling(1/alpha),
ncore = 1,
cuda = T,
nblock = 32,
tpb = 64,
seed = sample.int(2^30, 1)
)
}
\arguments{
\item{X}{a matrix (one-sample) or a list of matrices (multiple-samples), with each row representing an observation.}
\item{alpha}{significance level; default value: 0.05.}
\item{side}{either of \code{'lower','upper'} or \code{'both'}; default value: \code{'both'}.}
\item{tau}{real number(s) in the interval \code{[0,1)} that specifies the decay parameter and is automatically selected if it is set to \code{NULL} or multiple values are provided; default value: \code{NULL}, which is equivalent to \code{tau=1/(1+exp(-0.8*seq(-6,5,by=1))).}}
\item{B}{the number of bootstrap replicates; default value: \code{ceiling(50/alpha)}.}
\item{pairs}{a matrix with two columns, only used when there are more than two populations, where each row specifies a pair of populations for which the SCI is constructed; default value: \code{NULL}, so that SCIs for all pairs are constructed.}
\item{Sig}{a matrix (one-sample) or a list of matrices (multiple-samples), each of which is the covariance matrix of a sample; default value: \code{NULL}, so that it is automatically estimated from data.}
\item{verbose}{TRUE/FALSE, indicator of whether to output diagnostic information or report progress; default value: FALSE.}
\item{tau.method}{the method to select tau; possible values are 'MGB' (default), 'MGBA', 'RMGB', 'RMGBA', 'WB' and 'WBA' (see details).}
\item{R}{the number of Monte Carlo replicates for estimating the empirical size; default: \code{ceiling(25/alpha)}}
\item{ncore}{the number of CPU cores to be used; default value: 1.}
\item{cuda}{T/F to indicate whether to use CUDA GPU implementation when the package \code{hdanova.cuda} is installed. This option takes effect only when \code{ncore=1}.}
\item{nblock}{the number of block in CUDA computation}
\item{tpb}{number of threads per block; the maximum number of total number of parallel GPU threads is then \code{nblock*tpb}}
\item{seed}{the seed for random number generator}
}
\value{
a list of the following objects:
\describe{
\item{\code{sci}}{the constructed SCI, which is a list of the following objects:
\describe{
\item{\code{sci.lower}}{a vector (when <= two samples) or a list of vectors (when >= 3 samples) specifying the lower bound of the SCI for the mean (one-sample) or the difference of means of each pair of samples.}
\item{\code{sci.upper}}{a vector (when <= two samples) or a list of vectors (when >= 3 samples) specifying the upper bound of the SCI.}
\item{\code{pairs}}{a matrix of two columns, each row containing the a pair of indices of samples of which the SCI of the difference in mean is constructed.}
\item{\code{tau}}{the decay parameter that is used to construct the SCI.}
\item{\code{Mn}}{the sorted (in increasing order) bootstrapped max statistic.}
\item{\code{Ln}}{the sorted (in increasing order) bootstrapped min statistic.}
\item{\code{side}}{the input \code{side}.}
\item{\code{alpha}}{the input \code{alpha}.}
}
}
\item{\code{tau}}{a vector of candidate values of the decay parameter.}
\item{\code{sci.tau}}{a list of \code{sci} objects corresponding to the candidate values in \code{tau}.}
\item{\code{selected.tau}}{the selected value of the decay parameter from \code{tau}.}
\item{\code{side}}{the input \code{side}.}
\item{\code{alpha}}{the input \code{alpha}.}
\item{\code{pairs}}{a matrix of two columns, each row containing the a pair of indices of samples of which the SCI of the difference in mean is constructed.}
\item{\code{sigma2}}{a vector (for one sample) or a list (for multiple samples) of vectors containing variance for each coordinate.}
}
}
\description{
Construct (1-\code{alpha}) simultaneous confidence interval (SCI) for the mean or difference of means of high-dimensional vectors.
}
\details{
Four methods to select the decay parameter \code{tau} are provided. Using the fact that a SCI is equivalent to a hypothesis test problem, all of them first identify a set of good candidates which give rise to test that respects the specified level \code{alpha}, and then select a candidate that minimizes the p-value. These methods differ in how to identify the good candidates.
\describe{
\item{\code{MGB}}{for this method, conditional on the data \code{X}, \code{R=10*ceiling(1/alpha)} i.i.d. zero-mean multivariate Gaussian samples (called MGB samples here) are drawn, where the covariance of each sample is equal to the sample covariance matrix \code{Sig} of the data \code{X}. For each candidate value in \code{tau}, 1) the empirical distribution of the corresponding max/min statistic is obtained by reusing the same bootstrapped sample, 2) the corresponding p-value is obtained, and 3) the size is estimated by applying the test to all MGB samples. The candidate values with the empirical size closest to \code{alpha} are considered as good candidates.}
\item{\code{MGBA}}{an slightly more aggressive version of \code{MGB}, where the candidate values with the estimated empirical size no larger than \code{alpha} are considered good candidates.}
\item{\code{RMGB}}{this method is similar to \code{MGB}, except that for each MGB sample, the covariance matrix is the sample covariance matrix of a resampled (with replacement) data \code{X}.}
\item{\code{RMGBA}}{an slightly more aggressive version of \code{RMGB}, where the candidate values with the estimated empirical size no larger than \code{alpha} are considered good candidates.}
\item{\code{WB}}{for this method, conditional on \code{X}, \code{R=10*ceiling(1/alpha)} i.i.d. samples (called WB samples here) are drawn by resampling \code{X} with replacement. For each candidate value in \code{tau}, 1) the corresponding p-value is obtained, and 2) the size is estimated by applying the test to all WB samples without reusing the bootstrapped sample. The candidate values with the empirical size closest to \code{alpha} are considered as good candidates.}
\item{\code{WBA}}{an slightly more aggressive version of \code{WB}, where the candidate values with the estimated empirical size no larger than \code{alpha} are considered good candidates.}
}
Among these methods, MGB and MGBA are recommended, since they are computationally more efficiently and often yield good performance. The MGBA might have slightly larger empirical size. The WB and WBA methods may be subject to outliers, in which case they become more conservative. The RMGB is computationally slightly slower than WB, but is less subject to outliers.
}
\examples{
# simulate a dataset of 4 samples
X <- lapply(1:4, function(g) MASS::mvrnorm(30,rep(0,10),diag((1:10)^(-0.5*g))))
# construct SCIs for the mean vectors with pairs={(1,3),(2,4)}
hdsci(X,alpha=0.05,pairs=matrix(1:4,2,2))$sci
}
\references{
\insertRef{Lopes2020}{hdanova}
\insertRef{Lin2020}{hdanova}
}
|
91566f014d6e4313e4c6c79713d0ad6daa4129e1
|
f3963b60cc2e5c0f46394073c88884d2f27621d1
|
/data_exploration.R
|
bfc3dc4cd56d5d11625b247ab28f73c3d6226b23
|
[] |
no_license
|
juschu321/CRAN-Meta-analysis
|
f2688df0bdde97f28cafa2dffa8e93b2e6d11e19
|
922cdb6ad3397b9a15591ac705df3436d321e102
|
refs/heads/master
| 2020-05-23T15:54:49.812840
| 2019-06-30T12:31:11
| 2019-06-30T12:31:11
| 186,837,173
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
data_exploration.R
|
library(readr)
library(tidyr)
my_data <- read_csv("mydata.csv", col_types = cols(date = col_date(format = "%Y-%m-%d")))
View(my_data)
#check the data structure
str(my_data)
data_frame <- as.data.frame(my_data)
my_data_spread <- tidyr::spread(data=my_data, key = package, value = count)
View(my_data_spread)
|
37282c5092109046807a59f4d01d5579bb659b0a
|
1407e006fba9a49daba2e519357e1ae459bb1e8d
|
/dropout_olympics.R
|
c871e5d5faa0ad038d2fb96caae9b5a728c71a45
|
[] |
no_license
|
skeydan/uncertainty
|
01242bc33394b1036971a3129fc23eff1273dd22
|
2ef278c4ab3eab397a45d31f484644c8470c7b52
|
refs/heads/master
| 2021-08-19T09:35:43.010216
| 2017-11-25T16:15:10
| 2017-11-25T16:15:10
| 102,039,845
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,303
|
r
|
dropout_olympics.R
|
library(keras)
library(dplyr)
library(ggplot2)
K <- keras::backend()
source("get_data.R")
n_hidden1 <- 64
n_hidden2 <- 64
n_output <- 1
learning_rate <- 1e-6
num_epochs <- 500
batch_size <- 1
dropout <- 0.1
l2 <- 0.1
#X_train <- matrix(rnorm(n_samples * n_features, mean = 10, sd = 2), nrow = n_samples, ncol = n_features)
X_train <- matrix(male400_1996$year, ncol = 1)
dim(X_train)
y_train <- male400_1996$seconds
model <- keras_model_sequential()
model %>%
layer_dense(units = n_hidden1, activation = 'relu', input_shape = 1) %>%
layer_dropout(rate = dropout) %>%
layer_activity_regularization(l1=0, l2=l2) %>%
layer_dense(units = n_hidden2, activation = 'relu') %>%
layer_dropout(rate = dropout) %>%
layer_activity_regularization(l1=0, l2=l2) %>%
layer_dense(units = n_output, activation = 'linear')
model %>% summary()
model %>% compile(
loss = 'mean_squared_error',
optimizer = optimizer_adam())
history <- model %>% fit(
X_train, y_train,
epochs = num_epochs, batch_size = batch_size
)
plot(history)
model %>% predict(X_train)
model$layers
get_output = K$`function`(list(model$layers[[1]]$input, K$learning_phase()), list(model$layers[[7]]$output))
# output in train mode = 1
layer_output = get_output(list(matrix(X_train[1:2, ], nrow=2), 1))
layer_output
# output in test mode = 0
layer_output = get_output(list(matrix(X_train[1:2, ], nrow=2), 0))
layer_output
layer_output = get_output(list(X_train, 0))
dim(layer_output[[1]])
# http://mlg.eng.cam.ac.uk/yarin/blog_3d801aa532c1ce.html
n <- 20
inclusion_prob <- 1-dropout
num_samples <- nrow(X_train)
weight_decay <- l2
length_scale <- 0.5
preds <- matrix(NA, nrow = nrow(X_train), ncol = n)
dim(preds)
for(i in seq_len(n)) {
# train mode
preds[ ,i] <- get_output(list(X_train, 1))[[1]]
}
preds
(predictive_mean <- apply(preds, 1, mean))
(predictive_var <-apply(preds, 1, var))
(tau <- length_scale^2 * inclusion_prob / (2 * num_samples * weight_decay))
(predictive_var <- predictive_var + tau^-1)
df <- data.frame(
x = as.vector(X_train),
pred_mean = predictive_mean,
lwr = predictive_mean - sqrt(predictive_var),
upr = predictive_mean + sqrt(predictive_var)
)
ggplot(df, aes(x = x, y=predictive_mean)) + geom_point() +
geom_ribbon(aes(ymin = lwr, ymax = upr), alpha = 0.2)
|
cb483c760df6c187988aba6cf329708c2e7a34de
|
97357255a18408e4b946435c82a2b973fb862af2
|
/man/getDataOutagesProduction.Rd
|
17d2c1a2173c336a6ffff2227e63d27cc8809065
|
[] |
no_license
|
rte-antares-rpackage/unavailabilityProductionEntsoe
|
234143d2d4a0046eefb2d3115bd4eb6998bad03d
|
a732b19abc3a21e1f4a299f23dcfd74c6339ee22
|
refs/heads/master
| 2021-07-08T05:42:33.442331
| 2019-04-12T12:23:32
| 2019-04-12T12:23:32
| 153,428,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,477
|
rd
|
getDataOutagesProduction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadDataFromEnsoe.R
\name{getDataOutagesProduction}
\alias{getDataOutagesProduction}
\title{Load unavailability of production from Ensoe}
\usage{
getDataOutagesProduction(token, bz = "10YNL----------L", docType = "A80",
docStatus = "A05", start = "2017-01-01", end = "2017-12-31",
EIC = NULL, fileToMerge = "NLindispo.csv", by = "week",
entsoeHttp = "https://transparency.entsoe.eu/api?")
}
\arguments{
\item{token}{\code{character}, ENTSOE token}
\item{bz}{\code{character} build zone, transparency.entsoe.eu API guide}
\item{docType}{\code{character} docType, A80 for generation unit and A77 for produciton unit}
\item{docStatus}{\code{character} docStatus "A05" : Active, "A09" : Cancel}
\item{start}{\code{character} start date}
\item{end}{\code{character} end date}
\item{EIC}{\code{character} optional, powerSystemResourcesmRID default NULL -> "All"}
\item{fileToMerge}{\code{character} csv where data will be save, you can also add information in an existing file.}
\item{by}{\code{character} load data by day, week, month, year?
Maximum of documents load by request is 200 so if data are not correctly try an other timestep.}
\item{entsoeHttp}{\code{character}, ENTSOE adress, defalut https://transparency.entsoe.eu/api?}
}
\description{
getDataOutagesProduction request ensoe API and create RDS file who contains unavailability of production.
}
\examples{
\dontrun{
#PROD : "A77" Gene : "A80"
token <- "Mytoken"
#BiddingZone_Domain = "10YFR-RTE------C"
#BiddingZone_Domain = "10YBE----------2"
#BiddingZone_Domain = "10YNL----------L"
#BiddingZone_Domain = "10Y1001A1001A63L"
#BiddingZone_Domain = "10YDE-VE-------2" #GRT Allemand 50Hertz
#BiddingZone_Domain = "10YDE-RWENET---I" #GRT Allemand Amprion
#BiddingZone_Domain = "10YDE-EON------1" #GRT Allemand TennetDE
#BiddingZone_Domain = "10YDE-ENBW-----N" #GRT Allemand TransnetBW
#BiddingZone_Domain = "10YAT-APG------L" #Autriche
#NL
getDataOutagesProduction(token = token, bz = "10YNL----------L",
docType = "A80",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "NLindispo.csv")
getDataOutagesProduction(token = token, "10YNL----------L", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "NLindispo.csv", by = "year")
#BE
getDataOutagesProduction(token = token, bz = "10YBE----------2", docType = "A80",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "BEindispo.csv")
getDataOutagesProduction(token = token, "10YBE----------2", docType = "A77",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "BEindispo.csv")
#FR
getDataOutagesProduction(token = token, bz = "10YFR-RTE------C", docType = "A80",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "FRindispo.csv")
getDataOutagesProduction(token = token, "10YFR-RTE------C", docType = "A77",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "FRindispo.csv")
#DE
getDataOutagesProduction(bz = "10YDE-VE-------2", docType = "A80",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "DEindispo.csv")
getDataOutagesProduction("10YDE-VE-------2", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "year")
getDataOutagesProduction(token = token, bz = "10YDE-RWENET---I", docType = "A80",
start = "2015-01-01", end = "2018-12-31", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, bz = "10YDE-RWENET---I", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "year")
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A80",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "month")
##Week (for +200 row)
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A80",
start = "2015-08-01", end = "2015-09-10", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, "10YDE-EON------1", docType = "A80",
start = "2016-06-01", end = "2016-09-10", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A80",
start = "2016-07-27", end = "2016-08-03", fileToMerge = "DEindispo.csv", by = "day")
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A80",
start = "2016-12-01", end = "2017-01-10", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A80",
start = "2017-06-01", end = "2017-09-10", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, bz = "10YDE-EON------1", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv")
getDataOutagesProduction(token = token, bz = "10YDE-ENBW-----N", docType = "A80",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "month")
getDataOutagesProduction(token = token, bz = "10YDE-ENBW-----N", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "year")
getDataOutagesProduction(token = token, bz = "10YAT-APG------L", docType = "A80",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "month")
getDataOutagesProduction(token = token, bz = "10YAT-APG------L", docType = "A77",
start = "2015-01-01", end = "2019-01-01", fileToMerge = "DEindispo.csv", by = "year")
}
}
|
6f7501c3fb78f79e3362945ec2eeb337ecc8c340
|
f4c3ceb4387e76f07c1ccaaf1d63e2363c125008
|
/Example3/Hollding_T3.R
|
3c5fb0a00a33653713d948ddd7071e2c601d03c1
|
[] |
no_license
|
SenarathneSGJ/Laplace_based_sequential_design_algorithms
|
372a9630e241e75b77b38ce9dccb80470ebadcf6
|
5a6c2331990794327df0d8bce8c90136caaff6ad
|
refs/heads/master
| 2020-05-26T07:24:27.617673
| 2019-07-10T05:34:07
| 2019-07-10T05:34:07
| 188,149,611
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 145
|
r
|
Hollding_T3.R
|
Hollding_T3=function(t, state, parameters) {
with(as.list(c(state, parameters)), {
dY <- (-(a*N^2)/(1+a*T*N^2))
list(c(dY))
})
}
|
76190ac4b76855b8d0df0ae1f3fc9c5030da6968
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2016-April/8.ML-Parallel Processing/parallel-processing-foreach.R
|
9b09bb7fcf0847fa3c14662fd4b26eade5d54ed2
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996
| 2017-08-01T04:39:07
| 2017-08-01T04:39:07
| 101,746,310
| 1
| 0
| null | 2017-08-29T09:53:49
| 2017-08-29T09:53:49
| null |
UTF-8
|
R
| false
| false
| 776
|
r
|
parallel-processing-foreach.R
|
library(randomForest)
library(foreach)
library(doParallel)
#register cluster for parallel processing
cl = makeCluster(detectCores())
registerDoParallel(cl)
setwd("C:\\Users\\Thimma Reddy\\Documents\\GitHub\\datascience\\2014\\kaggle\\titanic\\data")
titanic_train = read.csv("train.csv")
dim(titanic_train)
str(titanic_train)
titanic_train$Pclass = as.factor(titanic_train$Pclass)
titanic_train$Survived = as.factor(titanic_train$Survived)
#Build random forest model in parallel
model_rf = foreach(ntree=rep(100,4), .combine=combine, .multicombine=TRUE, .packages="randomForest") %dopar%
{
randomForest(titanic_train[,c("Sex","Pclass","Embarked","Parch","SibSp","Fare")], titanic_train[,"Survived"], ntree=ntree)
}
stopCluster(cl)
|
eeb436742edd3214554098b1d7456381921e9ad6
|
5c7cd7da84653a5fbe584f088c7ba8fbcf7e67bb
|
/Temp.R
|
fefb96f82fbed4a02ad76093bacf303abfc971d2
|
[] |
no_license
|
peterbyrd/CaseStudy2_v2
|
0f9facc4cd308236555ef80a2d416adb7e8b38e3
|
0c2993e0340332d0c024257857f4e0f9bee0be78
|
refs/heads/master
| 2020-06-21T18:49:28.540051
| 2016-11-29T06:28:58
| 2016-11-29T06:28:58
| 74,775,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,971
|
r
|
Temp.R
|
## Author: Peter Byrd
## Data: November 25, 2016
## Case Study 2 - Question 4
## Set the working directory and load packages
setwd("/Users/pbyrd/Git/CaseStudy2_v2")
library(tseries)
library(ggplot2)
library(plyr)
# Part I
## Read CSV input file
Temp <- read.csv("Data/Temp.csv", header=TRUE)
## Create single date format
a <- as.Date(Temp$Date,format="%Y-%m-%d")
b <- as.Date(Temp$Date,format="%m/%d/%Y")
a[is.na(a)] <- b[!is.na(b)] # Combine both while keeping their ranks
Temp$Date <- a # Put it back in your dataframe
## Remove NA and change Country to a factor
Temp <- Temp[!(is.na(Temp$Monthly.AverageTemp)),]
Temp$Country <- as.factor(Temp$Country)
## Create a subset of the dates for 1900 and newer
Date1 <- as.Date("1900-01-01")
Temp1900 <- Temp[Temp$Date >= Date1,]
## Find the difference between max and min monthly avg temp for each country
maxtemp <- tapply(Temp1900$Monthly.AverageTemp, Temp1900$Country, max)
mintemp <- tapply(Temp1900$Monthly.AverageTemp, Temp1900$Country, min)
difftemp <- maxtemp - mintemp
## Sort the data from largest to smallest difference in monthly temperatures and show top 20
difftemp.sorted <- sort(difftemp, decreasing = TRUE)
top20countries <- difftemp.sorted[1:20]
top20countries
### Plot the data
x1names <- names(top20countries)
plot(top20countries,xaxt="n",xlab="Country",ylab="Celsius",main="Top 20 Country Temp Differences")
axis(1, at=1:length(x1names), labels=x1names)
# Part II
### Subset the data for US temperatures for dates later than 1900
UStemp <- subset(Temp1900, Country == "United States")
### Convert from C to F and add to new column
UStemp["Monthly.AverageTemp_F"] = (UStemp$Monthly.AverageTemp*1.8)+32
### Remove variables we don't want and rename variables
UStemp_new <- UStemp
UStemp_new <- UStemp_new[,-c(1,3,4)]
UStemp_new <- plyr::rename(x=UStemp_new,
replace = c("Monthly.AverageTemp"="AvgTemp_C","Monthly.AverageTemp_F"="AvgTemp_F"))
### Create a time series and aggregate the data by year
UStemp_monthly <- ts(UStemp_new, start=c(1900,1), end=c(2013,9), frequency = 12)
UStemp_yearly <- aggregate(UStemp_monthly, nfrequency=1, FUN=mean)
### Plot the average land temperature by year
plot(UStemp_yearly,type="l")
### Calculate the one year difference of average land temperature
UStemp_diff <- diff(UStemp_yearly)
maxdiff <- max(abs(UStemp_diff))
maxdiffyr <- 1900 + which.max(UStemp_diff[,2])
print(c('The max monthly temperature difference in Fahrenheit was ',maxdiff))
print(c('It occured between the years ',maxdiffyr-1,' and ',maxdiffyr,'.'))
### Since we measured the difference from the previous year, 1920 and 1921 have the
### largest average temperature difference of 2.54 degrees F
# Part III
### Read CityTemp data
CityTemp <- read.csv("Data/CityTemp.csv", header=TRUE)
### Create single date format
a <- as.Date(CityTemp$Date,format="%Y-%m-%d")
b <- as.Date(CityTemp$Date,format="%m/%d/%Y")
a[is.na(a)] <- b[!is.na(b)] # Combine both while keeping their ranks
CityTemp$Date <- a # Put it back in your dataframe
### Remove NA
CityTemp <- CityTemp[!(is.na(CityTemp$Monthly.AverageTemp)),]
### Create a subset of the dates for 1900 and newer
Date1 <- as.Date("1900-01-01")
CityTemp1900 <- CityTemp[CityTemp$Date >= Date1,]
### Find the difference between max and min monthly avg temp for each city
maxcitytemp <- tapply(CityTemp1900$Monthly.AverageTemp, CityTemp1900$City, max)
mincitytemp <- tapply(CityTemp1900$Monthly.AverageTemp, CityTemp1900$City, min)
diffcitytemp <- maxcitytemp - mincitytemp
### Sort the data from largest to smallest difference in monthly temperatures and show top 20
diffcitytemp.sorted <- sort(diffcitytemp, decreasing = TRUE)
top20cities <- diffcitytemp.sorted[1:20]
top20cities
### Plot the data
x2names <- names(top20cities)
plot(top20cities,xaxt="n",xlab="City",ylab="Celsius",main="Top 20 City Temp Differences")
axis(1, at=1:length(x2names), labels=x2names)
|
6f6c61fdea9eda86e5e054c717ad694676f85194
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dief/examples/dief.Rd.R
|
eff086c58659c5e5ee0a12914379a39240f26a5b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
dief.Rd.R
|
library(dief)
### Name: dief
### Title: Tools for Computing Diefficiency Metrics
### Aliases: dief
### Keywords: package
### ** Examples
# This example uses the answer traces provided in the package.
# These traces record the answers produced by three approaches "Selective",
# "Not Adaptive", "Random" when executing the test "Q9.sparql"
data(traces)
# Plot answer traces for test "Q9.sparql"
plotAnswerTrace(traces, "Q9.sparql")
# Compute dief@t with t the time where the fastest approach produced the last answer.
dieft(traces, "Q9.sparql")
# Compute dief@t after 7.5 time units (seconds) of execution.
dieft(traces, "Q9.sparql", 7.5)
|
c106bdd5a5712bcf3ce0dea21ea3931193a60b0f
|
7ff36b8e727cbbb71df2a071fdb287207af9914c
|
/Plot4.R
|
1594589dc79fcbc40d4b842d54e5a8de2edcbeca
|
[] |
no_license
|
Chrysofin/ExData_Plotting1
|
12b14088fff5549f07a2b770500a383aca89b513
|
24fd46dfce7bdbde7a27afed2b809a29ef4cb3b4
|
refs/heads/master
| 2021-05-27T11:31:30.511679
| 2014-08-11T00:06:09
| 2014-08-11T00:06:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,738
|
r
|
Plot4.R
|
## ##############################################################################################
## set you working directory use setwd
## Ensure that the text file has been downloaded from the website into your working directory
## Read in the data
## If the data has already been read in to h no need to run this line again.
## ##############################################################################################
h<-read.table("household_power_consumption.txt",header=TRUE,sep=";")
## convert Date to a class date
h$Date <- as.Date(h$Date,"%d/%m/%Y")
##create a new datetime field
h$DateTime <- strptime(paste(h$Date,h$Time), "%Y-%m-%d %H:%M:%S")
## ##############################################################################################
#
# Get the subset data and store it in variable x
# Convert the necessary data to numeric
#############################################################################
x<- subset(h,Date=="2007-02-01" | Date=="2007-02-02")
#Now convert the following factor data to numeric. To do this first converty to characther and then numeric.
x$Global_active_power <-as.character(x$Global_active_power)
x$Global_active_power <-as.numeric(x$Global_active_power)
x$Global_reactive_power <-as.character(x$Global_reactive_power)
x$Global_reactive_power <-as.numeric(x$Global_reactive_power)
x$Voltage<-as.character(x$Voltage)
x$Voltage<-as.numeric(x$Voltage)
x$Sub_metering_1 <- as.character(x$Sub_metering_1)
x$Sub_metering_1 <- as.numeric(x$Sub_metering_1)
x$Sub_metering_2<- as.character(x$Sub_metering_2)
x$Sub_metering_2 <- as.numeric(x$Sub_metering_2)
x$Sub_metering_3 <- as.character(x$Sub_metering_3)
x$Sub_metering_3<- as.numeric(x$Sub_metering_3)
#check data looks ok
#head(x)
#Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels.
#windows()
png(filename = 'plot4.png', width = 480, height = 480, units = 'px')
##PLOT 1
##plot chart for question 1
par(mfrow=c(2,2))
par(mar=c(4,2,1,4))
##hist(x$Global_active_power,xlab= "Global Active Power (kilowatts)",main="Global Active Power",col="red")
##PLOT1 ##R1C1
plot( x$DateTime,x$Global_active_power,xlab="",ylab ="Global Active Power",type="l")
##PLOT 2#R1C2
plot(x$DateTime,x$Voltage,xlab="datetime",ylab="Voltage",type ="l")
##PLOT 3#R2C1
plot(x$DateTime,x$Sub_metering_1,type="l",col="black",xlab="",ylab ="Energy Sub Metering")
lines(x$DateTime,x$Sub_metering_2,col="red")
lines(x$DateTime,x$Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), pch= "-", col=c("black", col="red", col="blue"),cex = 0.6)
##PLOT 4#2C2
plot( x$DateTime,x$Global_reactive_power,xlab="datetime",ylab ="Global_reactive_ower",type="l")
dev.off()
|
ce8f00aa58b7878fb863b588a3a3eb7ad547c692
|
2c09783547da268fc37456f4dfafb7016f3f9744
|
/man/tss_evaluate-function.Rd
|
83c0913c10c8327538872250ca471ad9d424ff4a
|
[] |
no_license
|
rpolicastro/deepTSSscrubbeR
|
8170048b20de8cab71d759b51b5ef6ad62e85ccd
|
ccec83f6355f9efaddd925103ca0d62082664aee
|
refs/heads/master
| 2020-09-10T17:31:05.200261
| 2020-03-17T17:27:05
| 2020-03-17T17:27:05
| 221,779,402
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 309
|
rd
|
tss_evaluate-function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deep_model.R
\name{tss_evaluate}
\alias{tss_evaluate}
\title{Evaluate TSS Model}
\usage{
tss_evaluate(deep_obj)
}
\arguments{
\item{deep_obj}{tss_obj with trained model}
}
\description{
Evaluate the trained deepTSSScrubbeR model
}
|
f16276e54a22850631a1b58f6c5a0d735f056dbb
|
b1c46d6edeab291df442e8262ccac2bcbd68e08e
|
/new.R
|
785fe0b171420da36b08c3457c4cbd364f7b1108
|
[] |
no_license
|
Dushyant08/test
|
910b86264557f011fec952444ff42589e6ae5da7
|
1c0352c49c574c683b8fd0df5d94caec3b7e0ba3
|
refs/heads/master
| 2021-08-23T16:16:24.454144
| 2017-12-05T16:30:02
| 2017-12-05T16:30:02
| 113,206,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 339
|
r
|
new.R
|
internet=read.csv('winternet.csv')
View(internet)
new=reshape::melt(internet)
View(new)
new1=new%>% group_by(variable)%>%summarise(nal=sum(value,na.rm = T))
View(new3)
str(new1)
new3=tail(new,4222)
graph=ggplot(new2,aes(variable,nal,group= 1))+geom_line(aes(col='yellow'))+geom_point(aes(col='red'))+theme_bw()
graph
|
fb9f585ce1b2c2be821298f0ba6d64abcc2e768e
|
45c37df23202a40dd78472d76af55cc27185e0c0
|
/auto_hook_sam.R
|
1175ad848d0e2e4e8c69794388f8b50ecbf6d44d
|
[] |
no_license
|
snoopycindy/stroke-recommender
|
ec5c94abd6354ab9c1c8c1ba7a08cb6d90b4221b
|
9b23711c29875e196aa3a65809876538c72f6451
|
refs/heads/master
| 2020-05-31T19:06:33.849717
| 2015-02-11T03:19:56
| 2015-02-11T03:19:56
| 28,383,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
auto_hook_sam.R
|
# Step 1: run autohotkey (control AnyBodyCon Script)
#convert to .exe
#setwd("C:/Program Files (x86)/AutoHotkey/Compiler/")
#system("Ahk2Exe.exe /in \"E:/Dropbox/touchparse_stroke/hook_sam.ahk\"")
system("c:/Users/MMnetLab/Dropbox/touchparse_stroke/hook_sam.exe")
#pause
# readline()
# setwd("C:/Users/yx/Dropbox/touchparse")
# source('C:/Users/yx/Dropbox/touchparse/parse_v5.R')
|
294a5327a3a9331d1402069288e14a94318158d1
|
37d622ca49c6cfc0eb1277ad078cdf6d83d18eef
|
/sources/initial_BUGS_gmm_G2.R
|
d0916ffb4a59e93e8abe786df8692bc72eb431e3
|
[] |
no_license
|
CynthiaXinTong/MedianGMM
|
be009ef090cd72710ba75eb9bd25331a360c801e
|
a7cc95e9ca863aaa8efc2029b4b195852e3a6f04
|
refs/heads/main
| 2023-05-01T12:18:40.625247
| 2021-05-10T22:04:43
| 2021-05-10T22:04:43
| 345,454,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 290
|
r
|
initial_BUGS_gmm_G2.R
|
x1 = c(0:(Time-1))
lm.cf.est = apply(y, 1, function(x) lm(x~x1)$coefficients)
lm.rs.est = apply(y, 1, function(x) summary(lm(x~x1))$sigma^2)
I.quant = quantile(lm.cf.est[1,], probs= c(0.75, 0.25))
S.quant = quantile(lm.cf.est[2,], probs= c(0.75, 0.25))
muLS.init = cbind(I.quant, S.quant)
|
a5b1770f778a42fd4f191724d827aa47f6359bdf
|
6c502888ac46d4a6a543df1b0c0b36a3382ba792
|
/linear_models/Example 50.1 RCB with Means Comparisons/E 50-1.R
|
c0720acaa230efbeadbe70c954603e4baf7cbfe7
|
[
"MIT"
] |
permissive
|
slophaven/CSRMLW
|
7f9c5a9669ba1d171455f99f1a160e6d619cb3d6
|
7bb4767c8bc4d1aba63ba865e6cb871bd08633c0
|
refs/heads/main
| 2023-08-16T01:30:37.309475
| 2021-09-13T17:57:25
| 2021-09-13T17:57:25
| 415,505,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,103
|
r
|
E 50-1.R
|
# Matching SAS and R ------------------------------------------------------
# Randomized Complete Blocks with Means Comparisons and Contrasts
# Source:
# https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.4/statug/statug_glm_examples01.htm
# Setup -------------------------------------------------------------------
pacman::p_load(tidyverse, broom, emmeans)
# Data --------------------------------------------------------------------
plants <- tribble(
~type, ~block, ~StemLength,
"Clarion", 1, 32.7,
"Clarion", 2, 32.3,
"Clarion", 3, 31.5,
"Clinton", 1, 32.1,
"Clinton", 2, 29.7,
"Clinton", 3, 29.1,
"Knox", 1, 35.7,
"Knox", 2, 35.9,
"Knox", 3, 33.1,
"O'Neill", 1, 36.0,
"O'Neill", 2, 34.2,
"O'Neill", 3, 31.2,
"Compost", 1, 31.8,
"Compost", 2, 28.0,
"Compost", 3, 29.2,
"Wabash", 1, 38.2,
"Wabash", 2, 37.8,
"Wabash", 3, 31.9,
"Webster", 1, 32.5,
"Webster", 2, 31.1,
"Webster", 3, 29.7) %>%
arrange(type, block) %>%
mutate(across(c(type, block), factor))
summary(plants)
plants %>%
summarize(
n_block = n_distinct(block),
n_type = n_distinct(type),
n_obs = n()
)
# Analysis ----------------------------------------------------------------
# Test for a significant difference in PULSE between values of SEX by
# running a one way ANOVA using the model PULSE = SEX
# Tidy the results using the tidy() function
# Matches Type 1/3 SS table in SAS
aov1 <- aov(StemLength ~ type + block, data = plants) %>% print()
aov1_tidy <- aov1 %>% tidy() %>% print()
aov1_sumr <- aov1 %>% summary() %>% print()
aov1_glnc <- aov1 %>% glance() %>% print() # Matches R-Sq table in SAS
lm1 <- lm(StemLength ~ type + block, data = plants) %>% print()
lm1_tidy <- lm1 %>% tidy() %>% print()
lm1_sumr <- lm1 %>% summary() %>% print()
lm1_augm <- lm1 %>% augment() %>% print()
lm1_glnc <- lm1 %>% glance() %>% print() # Matches F table in SAS
# glm1 <- glm(StemLength ~ type + block, data = plants) %>% print()
# glm1_tidy <- glm1 %>% tidy() %>% print()
# glm1_sumr <- glm1 %>% summary() %>% print()
# glm1_augm <- glm1 %>% augment() %>% print()
# glm1_glnc <- glm1 %>% glance() %>% print()
# Contrasts ---------------------------------------------------------------
lm1_tidy
# Model terms:
# Default comparisons against Clarion and Block 1
# Intercept : Stem Length for Block 1 and Type Clarion
# Type - Clinton : Difference in Stem Length between Clinton and Clarion w/ B1
# Type - Compost : Difference in Stem Length between Compost and Clarion w/ B1
# Type - Knox : Difference in Stem Length between Knox and Clarion w/ B1
# Type - O'Neill : Difference in Stem Length between O'Neill and Clarion w/ B1
# Type - Wabash : Difference in Stem Length between Wabash and Clarion w/ B1
# Type - Webster : Difference in Stem Length between Webster and Clarion w/ B1
# Block - 2 : Difference in Stem Length between Block 2 and Block 1 w/ Clarion
# Block - 3 : Difference in Stem Length between Block 3 and Block 1 w/ Clarion
emm1 <- emmeans::emmeans(lm1, ~ type)
# SAS Contrasts
# contrast 'Compost vs. others' Type -1 -1 6 -1 -1 -1 -1;
# contrast 'River soils vs. non' Type -1 -1 0 -1 -1 5 -1,
# Type -1 4 0 -1 -1 0 -1;
# contrast 'Glacial vs. drift' Type -1 0 0 1 1 0 -1;
# contrast 'Clarion vs. Webster' Type -1 0 0 0 0 0 1;
# contrast "Knox vs. O'Neill" Type 0 0 0 1 -1 0 0;
contrast_1 <- c(-1, -1, 6, -1, -1, -1, -1)
contrast_2 <- c(-1, -1, 0, -1, -1, 5, -1) # SAS quadratic contrast
contrast_3 <- c(-1, 4, 0, -1, -1, 0, -1) # SAS quadratic contrast
contrast_4 <- c(-1, 0, 0, 1, 1, 0, -1)
contrast_5 <- c(-1, 0, 0, 0, 0, 0, 1)
contrast_6 <- c( 0, 0, 0, 1, -1, 0, 0)
lm1 %>%
emmeans(~type) %>%
contrast(
method = list(
"Compost vs Others" = contrast_1, # Match
"River Soils vs Non" = contrast_2, # No Match
"Glacial vs Drift" = contrast_4, # Match
"Clarion vs Webster" = contrast_5, # Match
"Knox vs O'Neill" = contrast_6)) %>% # Match
tidy() %>%
mutate(f_value = statistic^2) %>%
DT::datatable() %>%
DT::formatRound(c("estimate", "std.error", "statistic", "p.value", "f_value"),
digits = 4)
# Brian's Work ------------------------------------------------------------
# plants2 <- plants
#
# type_contrasts = contrasts(plants2$type)
# type_contrasts = type_contrasts[,1:2]
# type_contrasts[,1] = c(-1, -1, -1, -1, 6, -1, -1)
# type_contrasts[,2] = c(0, 0, 1, -1, 0, 0, 0)
# contrasts(plants2$type) = type_contrasts
#
# # Using lm() function
# lm2 <- lm(StemLength ~ type + block, data = plants2) %>%
# print()
#
# tidy_lm2 <- tidy(lm2) %>% print()
# summary_lm2 <- summary(lm2) %>% print()
# augment_lm2 <- augment(lm2) %>% print()
# glance_lm2 <- glance(lm2) %>% print()
|
c664dcd9141ba1b14f48162e02e16cda58aa62b8
|
f1eab13fd4215e9fe995d562b8a22f2a6cb38a7a
|
/global.R
|
0b4581910814e1202f4e110fc99b68142e59e5e9
|
[] |
no_license
|
estherrvazquez/PFinal-Fundamentos-Matem-ticos
|
b69b572b72f043095eb8f0e2db3b6c0564e38d3e
|
5c7ac790e8bb0d2021d9e1ef870b4fe0b8ebdae2
|
refs/heads/main
| 2023-01-20T19:10:07.796210
| 2020-11-20T16:30:42
| 2020-11-20T16:30:42
| 314,609,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,419
|
r
|
global.R
|
rm(list = ls())
library(ggplot2)
library(dplyr)
library(class)
library(caret)
library(shiny)
library(Metrics)
library(tidyverse)
library(shinyAce)
showfiles <- list(
aceEditor("server",
value = paste(readLines("server.R"), collapse="\n"),
mode="r", height="380px",
readOnly=TRUE),
aceEditor("ui",
value = paste(readLines("ui.R"), collapse="\n"),
mode="r", height="270px",
readOnly=TRUE)
)
ds <- read.csv("datos/heart.csv", header = F)
df <- read.csv("datos/heart.csv", header = F)
names(ds) <- c( "age", "sex", "cp",
"trestbps", "chol",
"fbs", "restecg",
"thalach","exang",
"oldpeak","slope",
"ca","thal","num")
names(df) <- c( "age", "sex", "cp",
"trestbps", "chol",
"fbs", "restecg",
"thalach","exang",
"oldpeak","slope",
"ca","thal","num")
feature.list <- list("age" = "age", "sex" ="sex",
"cp"= "cp","trestbps" = "trestbps",
"chol"="chol","fbs"="fbs",
"restecg"="restecg","thalach"="thalach",
"exang"="exang","oldpeak"="oldpeak",
"slope"="slope","ca"="ca","thal"="thal")
# change the class of all columns to numeric
ds <- as.data.frame(apply(ds, 2, as.numeric))
df <- as.data.frame(apply(df, 2, as.numeric))
# remove na/missing values (original data shows as ?)
ds <- na.omit(ds)
df <- na.omit(df)
# df_1 <- df %>%
# filter(num == 1) %>%
# group_by(thal) %>%
# count()
#
# df_2 <- df %>%
# filter(num == 0) %>%
# group_by(thal) %>%
# count()
ds$num[ds$num > 0] <- 1
df$num[df$num > 0] <- 1
ds$thal[ds$thal == 0] <- 1
df$thal[df$thal == 0] <- 1
df$num <- as.factor(df$num)
theme_set(theme_bw()) # The current theme is automatically applied to every plot we draw
df_s <- df
df_s <- df %>%
mutate(num = fct_recode(num, absent = "0", present = "1"))
chart <-
df_s %>%
count(num) %>%
mutate(pct = round(n / sum(n) * 100), num = reorder(num, n)) %>%
ggplot(aes(x = num, y = n)) +
geom_segment(aes(xend = num, yend = 0), size = 1.5, lineend = "square") +
geom_point(size = 10, color = c("#17B3BF", "#FAFA77")) +
theme_bw() +
labs(x = "Heart disease", y = "Number of patients") +
geom_text(aes(label = str_c(pct, " %")), vjust = -0, size = 2.5, colour = "black") +
theme(axis.title.y = element_text(size = 12, face = "bold"), axis.title.x = element_text(size = 12, face = "bold"), axis.text.x = element_text(vjust = 0.3))
df$sex <- as.factor(df$sex)
df$cp <- as.factor(df$cp)
df$restecg <- as.factor(df$restecg)
df$exang <- as.factor(df$exang)
df$slope <- as.factor(df$slope)
df$thal <- as.factor(df$thal)
# standardize/normalize the data
standardized.X <- scale(ds[,-14])
set.seed(55)
training.index <- createDataPartition(ds$num, p = .5,list = F)
train.X <- standardized.X[training.index,]
test.X <- standardized.X[-training.index,]
train.Y <- ds$num[training.index]
test.Y <- ds$num[-training.index]
theme1 <- trellis.par.get()
theme1$plot.symbol$col = rgb(.2, .2, .2, .4)
theme1$plot.symbol$pch = 16
theme1$plot.line$col = rgb(1, 0, 0, .7)
theme1$plot.line$lwd <- 2
trellis.par.set(theme1)
table.settings <- list(searching = F, pageLength = 5, bLengthChange = F,
bPaginate = F, bInfo = F )
|
80c149eacf804fbdc81ed8d15d4d32143394c6d9
|
f5ade7e9b49dc440db18cdf971a65ec83253db3e
|
/analysis/R/students_leaving_ni.R
|
d0ec3da298c31b0e8ca2acf1cdc93869a2434605
|
[] |
no_license
|
O1sims/DigitalStrategyNI
|
3eeedd360e3ff19eb0f710c046394f0d5f182151
|
0eba24144f7e8e9dd30cebcd1f320d2f80540546
|
refs/heads/master
| 2020-03-31T13:26:23.427988
| 2019-10-14T16:02:46
| 2019-10-14T16:02:46
| 152,255,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
students_leaving_ni.R
|
library(ggplot2)
library(ggthemes)
library(magrittr)
year <- seq(
from = 2007,
to = 2018)
totalLeavingPercent <- c(
33.99, 32.91, 31.63, 34.62, 37.35, 32.08, 31.23, 31.82, 36.51, 36.01, 35.5, 38)
ictLeavingPercent <- c(
8.3, 12.5, 10.7, 9.5, 12.45, 15.54, 22.35, 20.45, 27.06, 28.45, 35.93, 42.03)
df <- data.frame(
year = year,
totalLeavingPercent = totalLeavingPercent,
ictLeavingPercent = ictLeavingPercent,
stringsAsFactors = FALSE)
ggplot(data = df, aes(x = year)) +
geom_point(aes(y = totalLeavingPercent, colour = "a")) +
geom_line(aes(y = totalLeavingPercent, colour = "a")) +
geom_point(aes(y = ictLeavingPercent, colour = "b")) +
geom_line(aes(y = ictLeavingPercent, colour = "b")) +
scale_y_continuous(limits = c(0, 50)) +
xlab("Year") +
ylab("Students leaving NI (%)") +
scale_colour_discrete(
name = " ",
labels = c("Average", "EEECS")) +
theme_minimal() +
theme(legend.position = "bottom")
ggsave(
filename = getwd() %>%
paste0("/analysis/images/student-percentages-leaving-ni.png"),
device = "png")
|
86fd39290a1a225ded99142113e64ac0c83ecc0c
|
fa458927b2c8baee881318dc64d4cb786905950e
|
/R/genderizeTrain.R
|
5472c4ff7fff548cab3deb5b6df218eec99e6a2c
|
[] |
no_license
|
nathanvan/genderizeR
|
eb4627cdd28d4b7b2e09781c4393ce14b27a37ae
|
7b66bb2d3ef54d6e88c15b387a23688d7e5f5131
|
refs/heads/master
| 2021-01-13T06:08:27.031339
| 2015-10-14T18:54:48
| 2015-10-14T18:54:48
| 44,268,976
| 0
| 0
| null | 2015-10-14T18:54:48
| 2015-10-14T18:47:14
|
R
|
UTF-8
|
R
| false
| false
| 4,813
|
r
|
genderizeTrain.R
|
#' Training genderize function
#'
#' \code{genderizeTrain} predicts gender and checks different combination
#' of 'probability' and 'count' paramters.
#'
#'
#' @param x A text vector that we want to genderize
#' @param y A text vector of true gender labels for x vector
#' @param givenNamesDB A dataset with gender data (could be an output
#' of \code{findGivenNames} function)
#' @param probs A numeric vector of different probability values.
#' Used to subseting a givenNamesDB dataset
#' @param counts A numeric vector of different count values.
#' Used to subseting a givenNamesDB dataset
#' @param parallel If TRUE it computes errors with the use
#' of \code{parallel} package and available cores. It is design to work
#' on windows machines. Default is FALSE.
#'
#' @return A data frame with all combination of parameters and computed
#' sets of prediction indicators for each combination:
#' \item{errorCoded}{classification error for predicted & unpredicted gender}
#' \item{errorCodedWithoutNA}{for predicted gender only}
#' \item{naCoded}{proportion of items with manually codded gender and with unpredicted gender }
#' \item{errorGenderBias}{net gender bias error}
#'
#' @seealso Implementation of parallel mclapply on Windows machines by Nathan VanHoudnos \url{http://edustatistics.org/nathanvan/setup/mclapply.hack.R}
#'
#' @examples
#' \dontrun{
#'
#' x = c('Alex', 'Darrell', 'Kale', 'Lee', 'Robin', 'Terry', 'John', 'Tom')
#' y = c(rep('male',length(x)))
#' givenNamesDB = findGivenNames(x)
#' probs = seq(from = 0.5, to = 0.9, by = 0.05)
#' counts = c(1, 10)
#' genderizeTrain(x = x, y = y, givenNamesDB = givenNamesDB,
#' probs = probs, counts = counts)
#'
#' }
#'
#' @export
genderizeTrain = function(x,
y,
givenNamesDB,
probs,
counts,
parallel = FALSE
){
probability <- count <- NULL
givenNamesDB = data.table::as.data.table(givenNamesDB)
grid = expand.grid(prob = probs, count = counts)
if (parallel == FALSE) {
grid$errorCoded = NA
grid$errorCodedWithoutNA = NA
grid$naCoded = NA
grid$errorGenderBias = NA
# print(grid)
for (g in 1:NROW(grid)) {
givenNamesTrimed = givenNamesDB[probability >= grid[g,]$prob &
count >= grid[g,]$count,]
xGenders = genderize(x = x, genderDB = givenNamesTrimed)
errors = classificatonErrors(labels = y, predictions = xGenders$gender)
grid[g,]$errorCoded = errors$errorCoded
grid[g,]$errorCodedWithoutNA = errors$errorCodedWithoutNA
grid[g,]$naCoded = errors$naCoded
grid[g,]$errorGenderBias = errors$errorGenderBias
print(grid[g,])
cat('Total combinations of paramaters: ',NROW(grid),'\n')
}
return(grid)
}
# parallel version
# writeLines(c("starting parallel computations..."), "training.log")
funcPar = function(g, x, y) {
givenNamesTrimed =
givenNamesDB[probability >= (grid[g,]$prob) &
count >= (grid[g,]$count),]
xGenders = genderize(x = x, genderDB = givenNamesTrimed)
errors = classificatonErrors(labels = y, predictions = xGenders$gender)
# sink("training.log", append = TRUE)
#
# cat(paste0('[',NROW(grid),']: ', g,'\n'))
#
# sink()
list(prob=grid[g,]$prob,
count=grid[g,]$count,
errorCoded=errors$errorCoded,
errorCodedWithoutNA = errors$errorCodedWithoutNA,
naCoded = errors$naCoded,
errorGenderBias = errors$errorGenderBias
)
}
# Inspired by:
# Nathan VanHoudnos
## nathanvan AT northwestern FULL STOP edu
## July 14, 2014
## Create a cluster
size.of.list <- length(list(1:NROW(grid))[[1]])
cl <- parallel::makeCluster( min(size.of.list, parallel::detectCores()) )
#parallel::clusterExport(cl, c('x', 'y', 'grid'))
loaded.package.names = c('genderizeR', 'data.table')
parallel::parLapply( cl, 1:length(cl), function(xx){
lapply(loaded.package.names, function(yy) {
require(yy , character.only=TRUE)})
})
## Run the lapply in parallel
outcome = parallel::parLapply( cl, 1:NROW(grid), function(i) funcPar(i, x,y))
parallel::stopCluster(cl)
data.table::rbindlist(outcome)
}
|
444a12e430cda046af25f35af30e4894553616ca
|
7035ebb7e67f0efb97f0cae286064dcd453be963
|
/man/makeWellTS.Rd
|
e36f6bb60b367645d6638d1df0936c79fe7b343d
|
[
"Apache-2.0"
] |
permissive
|
jayrbrown/bcgroundwater
|
6747a1f0e25516d618e3936b7653a7b4f2e85bf3
|
91edfe11eaa982f966fdcc030a47c613f0fd7252
|
refs/heads/master
| 2020-12-11T07:33:13.091570
| 2015-06-04T17:58:02
| 2015-06-04T17:58:02
| 36,933,497
| 0
| 0
| null | 2015-06-05T13:14:27
| 2015-06-05T13:14:27
| null |
UTF-8
|
R
| false
| false
| 657
|
rd
|
makeWellTS.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/makeWellTS.R
\name{makeWellTS}
\alias{makeWellTS}
\title{Get full time series with missing values interpolated}
\usage{
makeWellTS(df)
}
\arguments{
\item{df}{A monthly dataframe created by `monthlyValues`. Must minimally include
fields `EMS_ID`, `Well_Num` `Date`, 'med_GWL`, `nReadings`}
}
\value{
A full monthly time series with interpolated missing values,
retaining all of the columns in the original data frame.
}
\description{
Takes a dataframe with monthly values and creates a full time series, interpolating
missing values.
}
\examples{
\dontrun{
}
}
|
40105d31b7a75e5dcd37b2702e6559b52260b1b0
|
154f590295a74e1ca8cdde49ecbb9cbb0992147e
|
/R/ra2.R
|
d8a32b367aa6306ad364989bade2c2143fe2e7d0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] |
permissive
|
klingerf2/EflowStats
|
2e57df72e154581de2df3d5de3ebd94c3da0dedf
|
73891ea7da73a274227212a2ca829084149a2906
|
refs/heads/master
| 2017-12-07T10:47:25.943426
| 2016-12-28T20:52:42
| 2016-12-28T20:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 987
|
r
|
ra2.R
|
#' Function to return the RA2 hydrologic indicator statistic for a given data frame
#'
#' This function accepts a data frame that contains a column named "discharge" and calculates
#' RA2; Variability in rise rate. Compute the standard deviation for the positive flow changes. RA2 is 100 times
#' the standard deviation divided by the mean (percent-spatial).
#'
#' @param qfiletempf data frame containing a "discharge" column containing daily flow values
#' @return ra2 numeric containing RA2 for the given data frame
#' @export
#' @examples
#' qfiletempf<-sampleData
#' ra2(qfiletempf)
ra2 <- function(qfiletempf) {
qfiletempf <- qfiletempf[order(qfiletempf$date),]
meanra2 <- ra1(qfiletempf, pref = "mean")
diffbtdays <- diff(qfiletempf$discharge, lag = 1,
differences = 1)
findrisevalues <- subset(diffbtdays, diffbtdays >
0)
sddra2 <- sd(findrisevalues)
ra2 <- round((sddra2 * 100)/meanra2,digits=2)
return(ra2)
}
|
497667347f8a0856bba335ccb298767f63a50c64
|
9eb652d1c1026d21b74d732c6b8f4ce7392b26e1
|
/BPC_Test_Illumina.R
|
2c22653284023c32d69dde5763b273338f0a7ab8
|
[] |
no_license
|
qindan2008/Scripts_MolCharAAvsEA
|
6af5c2a64469e920fa073a451b810ac58377cef5
|
642b58b997dcd4cd1fc3142dd0b487450afd13be
|
refs/heads/master
| 2023-06-18T08:44:41.356085
| 2021-07-22T09:07:31
| 2021-07-22T09:07:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
BPC_Test_Illumina.R
|
#This script is comprised of functions to define a feature of Chromotripsis called:: Breakpoint Cluster in Illumina assay called:: GSA.
BPC_Test<- function(CNV_Test, Events_Possible, Events_Threshold){
CNV_Test=CNV_Test[!(CNV_Test$Chromosome=='X' | CNV_Test$Chromosome=='XY' | CNV_Test$Chromosome=='Y'),]
pp=lapply(rownames(table(as.character(CNV_Test$Chromosome))), function(x) split(CNV_Test, CNV_Test$Chromosome==x))
names(pp)=rownames(table(as.character(CNV_Test$Chromosome)))
##result_Details:: Critical for Visualization
Result_Details=lapply(pp, function(x) Iterative_Wilcox_Test( Event_Distance(OSC_Events( tryCatch(x[[2]], error=function(cond){data.frame()}), Events_Possible ) ), Event_Distance( x[[1]]) ) )
names(Result_Details)[sapply(Result_Details, function(x) (x[3]>Events_Threshold & x[1]<0.1) & x[2]>5 )]
}
#Returns Oscillatory CNV events
OSC_Events<- function(Events_DF, Events_Possible){
Events_DF[!is.na(match(Events_DF$CN.State, Events_Possible)),]
}
##Given a dataframe of Start and End, following function generates distance between events.
Event_Distance<- function(DF){
if(nrow(DF)>1){
sapply(1:(nrow(DF)-1), function(i) as.numeric(DF$Start[i+1]) - as.numeric(DF$End[i]) )
}
}
#Given a Chromosome CN Breakpoint vector, this functions find the cluster which having significantly closer breakpoints.
Iterative_Wilcox_Test<- function(a, b){
Test= tryCatch(wilcox.test(a, b, alternative='l')$p.value, error=function(cond) { 1 })
while(Test>0.1 & Test!=1) {
a = Remove_Distant_Event(a)
Test= tryCatch(wilcox.test(a, b, paired= F, correct=F, exact=F, alternative='l')$p.value, error=function(cond) { 1 })
}
c(P.Value=Test, Total_Events_Count= length(b), CHTP_Events_Count=length(a), Median=median(a))
}
#Given a distance vector, this one removes the first or the last events:: Whichever is great.
Remove_Distant_Event<- function(a){
First_Event= 1
Last_Event= length(a)
if(a[First_Event] > a[Last_Event]){
a=a[-First_Event]
}
else{
a=a[-Last_Event]
}
a
}
|
46592f887a5ff24946d916883f7d61366dd5afeb
|
17205acfbd00aeb7edf976966f251698c5ae2458
|
/GpOutput2D.Rcheck/00_pkg_src/GpOutput2D/R/gp_Fpca2d.R
|
b64761ff4d344a10b41ce419349790c19d373511
|
[] |
no_license
|
tranvivielodie/GpOutput2D
|
148be512b5ecd99fba7d105fa3171d8c86d531fd
|
9f779d33fb86f8bff8b3da3db4845c37b3045f75
|
refs/heads/main
| 2023-03-04T19:28:27.694988
| 2021-02-18T10:11:00
| 2021-02-18T10:11:00
| 316,458,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,666
|
r
|
gp_Fpca2d.R
|
#' @title Gaussian Process Model on principal components of \code{Fpca2d},
#' by using \code{kergp} package
#'
#' @description the function \code{gp} of the \code{\link{kergp}} package is use to
#' fit kriging models on each principal component modeled on \code{\link{Fpca2d}} (for more details, see \code{\link{gp}} ).
#'
#'
#' @param formula an object of class "formula" (or a list of "formula" which the length is equal to the number of modeled principal component)
#' specifying the linear trend of the kriging model (see \code{\link{lm}}) on each principal component.
#' This formula should concern only the input variables (\code{design}), and not the output (\code{response}).
#' The default is ~1, which defines a constant trend on each principal component.
#' @param design a data frame representing the design of experiments.
#' The ith row contains the values of the d input variables corresponding
#' to the ith evaluation.
#' @param response n object of class \code{Fpca2d} which contains eigen
#' decomposition of the model/function ouput.
#' @param cov a covariance kernel object or call
#' (or a list of covariance kernel objects or call )
#'
#' @param estim Logical. If TRUE, the model parameters are estimated
#' by Maximum Likelihood. The initial values can then be specified using
#' the parCovIni and varNoiseIni arguments of mle,covAll-method passed though dots. If FALSE, a simple Generalized Least Squares estimation will be used, see gls,covAll-method. Then the value of varNoise must be given and passed through dots in case noise is TRUE.
#' @param ... other inputs of \code{\link{gp}}.
#'
#' @seealso \code{\link{gp}} \code{\link{kergp}}
#'
#' @importFrom kergp gp
#' @importFrom stats as.formula
#'
#' @return a list of object of class \code{\link{gp}} for each modeled principal component.
#'
#' @examples
#'
#' ################################
#' ### two-dimensional data set ###
#' ################################
#' n<-200 # size of the learning sample
#' nz<-64; z <- seq(-90,90,length=nz) # spatial domain
#'
#' ### inputs of Campbell2D ###
#' library(lhs)
#' library(DiceDesign)
#'
#' x <- maximinLHS(n=n,k=8)
#' X <-maximinSA_LHS(x)$design
#' X<-X*6 -1
#'
#' # Campbell2D
#' Y <- Campbell2D(X,z,z)
#'
#' # change X on data.frame
#' colnames(X)<-paste("x",1:8,sep="")
#'
#' ############
#' ### FPCA ###
#' ############
#'
#' ### by using wavelet basis ###
#' fpca_w<- Fpca2d(Y,method="Wavelets",
#' wf="d4", J=1, # wavelet parameters
#' ncoeff=1200, rank.=2) # FPCA configuration
#'
#' #####################
#' ### ###
#' ### Kriging model ###
#' ### ###
#' #####################
#'
#' #------------------------------------#
#' #------------------------------------#
#' # Example by using wavelet basis #
#' #------------------------------------#
#' #------------------------------------#
#'
#' #--------------------------------------------#
#' # Same kernel for all principal components #
#' #--------------------------------------------#
#'
#' ## kernel
#' myCov <- covTS(inputs = colnames(X),
#' kernel = "k1Matern5_2",
#' dep = c(range = "input"),
#' value = c(range = 0.4))
#'
#' myGp<- gp_Fpca2d(design=X, response=fpca_w, cov=myCov,estim=FALSE)
#'
#' #-------------------------------------------------------------#
#' # Different kernel and formula for each principal component #
#' #-------------------------------------------------------------#
#'
#' \dontrun{
#' ## kernel of firt principal component
#' myCov1<-myCov
#'
#' ## kernel of second principal component
#' myCov2 <- covTS(inputs = colnames(X),
#' kernel = "k1Matern3_2",
#' dep = c(range = "input"),
#' value = c(range = 0.4))
#'
#' ## List of both kernels
#' myCovList <- list(myCov1,myCov2)
#'
#' ## Gp model
#' myGp2<- gp_Fpca2d(formula=list(~1,~x1+x2+x3+x4+x5+x6+x7+x8),
#' design=X, response=fpca_w, cov=myCovList,estim=FALSE)
#' }
#' ##################
#' ### Prediction ###
#' ##################
#'
#' NewX<-matrix(runif(5*8,min=-1,max=5),ncol=8) # newdata
#' RealY <- Campbell2D(NewX,z,z)# real maps
#'
#' # change NewX on data.frame
#' colnames(NewX)<-colnames(X)
#'
#' #------------------------------------#
#' #------------------------------------#
#' # Example by using wavelet basis #
#' #------------------------------------#
#' #------------------------------------#
#' pw.UK <- predict(myGp,NewX,"UK")
#'
#' ###############################
#' ### Prediction RMSE and Q2 ###
#' ###############################
#'
#' #------------------------------------#
#' #------------------------------------#
#' # Example by using wavelet basis #
#' #------------------------------------#
#' #------------------------------------#
#' err.pw.UK <-error.predict(RealY,pw.UK,fpca_w,rtx.scores=TRUE)
#'
#' ### scores ###
#' print(err.pw.UK$scores$rmse)
#' print(err.pw.UK$scores$Q2)
#'
#' ### images/maps ###
#' library(fields)
#' image.plot(err.pw.UK$y$rmse, main="RMSE")
#' image.plot(err.pw.UK$y$Q2, main="Q2")
#'
#' @export
gp_Fpca2d <-function(formula=~1, design, response, cov,estim=TRUE,...){
# scores of fpca
y<-response$x; nPC <-ncol(y)
#________________
# formula to list
#________________
if(class(formula)=="formula"){
res_i <-formula
formulaPC <-(foreach(i=1:nPC,.combine = list,.multicombine = TRUE)%do%
res_i) # end foreach
}else{
formulaPC = formula
}# end ifelse
#---------------------------------------------
# write formulas in the right form for each PC
#---------------------------------------------
formula <- lapply(1:nPC, function(i){
output <- "yi" # output name of gp
# as.character(formula)
formula_character <- as.character(formulaPC[[i]])
formule <- foreach(i=1:length(formula_character),.combine = paste,.multicombine = TRUE)%do%{
formula_character[i]
} # end formule
past_formula <- paste(output,formule,sep="")
return(as.formula(past_formula))
})# end formula
#________________
#________________
# function to list
#________________
if((!is.list(cov))){
res_i <-cov
cov <-(foreach(i=1:nPC,.combine = list,.multicombine = TRUE)%do%
res_i)# end foreach
}# end if
#________________
#%%%%%%%%%%%%%
### Models ###
#%%%%%%%%%%%%%
designNames<-colnames(design) # input names
m <-lapply(1:nPC,function(i){
yi<-as.numeric(y[,i])
data.input <- data.frame(design,yi=yi)
mi<-gp(formula=formula[[i]], data=data.input, inputs = designNames,cov=cov[[i]],
estim=estim,...)
return(mi)
}) # end m
class(m)<-"gp_Fpca2d"
attr(m,"fpca")<-response
return(m)
}# end gp_Fpca2d
|
03294f864e5388cee84aae834c2b13d9c52efb46
|
cda18b3a8116cf8e0d056e30216ce5b1c36108c3
|
/Code/montyhall.R
|
fe51ec6238868587909a8e61805b876a02b531c6
|
[] |
no_license
|
dspluta/StatsGradBootcamp2019
|
cd3b42890630257c927ae19d6999cfbe24632fd5
|
35692c5e8895612bff287a37483a24e243358313
|
refs/heads/master
| 2021-07-19T10:42:43.007787
| 2020-08-24T15:57:04
| 2020-08-24T15:57:04
| 206,882,968
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
montyhall.R
|
library(ggplot2)
nsims <- 100
## Need a modified version of the sample function
resample <- function(x, ...) x[sample.int(length(x), ...)]
dat <- data.frame(stay = rep(NA, nsims), switch = NA)
for (i in 1:nsims) {
doors <- 1:3
car <- resample(doors, 1)
goats <- doors[doors != car]
pick <- resample(doors, 1)
reveal <- resample(goats[goats != pick], 1)
new_pick <- doors[(doors != reveal) & (doors != pick)]
dat$stay[i] <- pick == car
dat$switch[i] <- new_pick == car
}
ggplot() + emojifont::geom_emoji("goat", color='steelblue') +
theme_void()
colMeans(dat)
|
4b4da998b639df32ccecd5076bc495a5814499c9
|
ee1247f9415bb05e6be6c466e947ddf649f49530
|
/_engagement.R
|
5d054c894bca31b27ec4c675bcb67aa4226a0488
|
[] |
no_license
|
ronald-1996/Ronald_proj
|
8e2f65accfc40e3f498ba6b21ae0815e7a2ed9d2
|
61f1748f4532f2207f08fafb5c1177f88ec519f7
|
refs/heads/master
| 2020-12-20T09:35:53.159853
| 2020-01-24T16:49:10
| 2020-01-24T16:49:10
| 236,030,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,337
|
r
|
_engagement.R
|
library (PivotalR) #deals with PostgreSQL or Pivotal databases
library (RPostgreSQL) #access PostgreSQL databases
library (DBI) #interface between R and relational DBMS
library (data.table)
library (randomForest) # tree based model - bagging
library (nnet) # neural network
library (Matrix)
library (foreach)
library (glmnet) # linear model
library (brnn)
library (lattice)
library(ggplot2)
library (caret)
library (RRF)
library (dummies)
library (gbm)
library (xgboost)
library (LiblineaR)
library (nnls)
library(dplyr)
library(caret)
library(caretEnsemble)
library(mice)
library(doParallel)
library(car)
library(mlbench)
library(ggpubr)
library(Hmisc)
library(corrr)
library(xlsx)
library(UBL)
library(e1071)
training_data <- read.csv("Training.csv");
for(i in names(training_data))
{
if(typeof(training_data[[i]]) == "integer")
{
x<-which(training_data[[i]] == 9999);
x1 <- count(as.data.frame(x));
if(x1>= 25000)
{
index = grep(i, colnames(training_data));
training_data<-training_data[,-index];
}
}
}
for(i in names(training_data))
{
total <- sum(is.na(training_data[[i]]))
if(total>= 5000)
{
index = grep(i, colnames(training_data));
training_data<-training_data[,-index];
}
}
for(i in names(training_data))
{
if(typeof(training_data[[i]]) == "integer" && !startsWith(i,"target"))
{
x<-which(training_data[[i]] == 0);
x1 <- count(as.data.frame(x));
if(x1>= 30000)
{
index = grep(i, colnames(training_data));
training_data<-training_data[,-index];
}
}
}
training_data$db_industry <- NULL
training_data$idc_verticals <- NULL
training_data$db_audience <- NULL
balancedata<-NULL
#Smote encoding
balancedata <- SmoteClassif(target~., training_data, C.perc="balance", k=5, dist="HEOM", p=2)
target <- training_data$target
training_data$target <- as.factor(training_data$target)
#Random forest on training data
model_rf<-randomForest(target~.,data = training_data,importance=TRUE,ntree=100,mtry=20)
for(i in names(balancedata))
{
if(nlevels(balancedata[[i]]) > 53)
{
index = grep(i, colnames(balancedata));
balancedata<-balancedata[,-index];
}
}
for(i in names(training_data))
{
if(is.factor(training_data[[i]]) && nlevels(training_data[[i]])>53)
{
index = grep(i, colnames(training_data));
training_data<-training_data[,-index];
}
}
training_data$Training_final_265.target <- NULL
training_data$db_country <- NULL
training_data$db_state <- NULL
training_data$db_audience <- NULL
validation_data<- read.csv("Validation.csv", header=TRUE)
validation_data$target <- as.factor(validation_data$target)
validation_Rf <-validation_data
#Calculating confusion matrix
prediction_train_rf<-predict(model_rf,validation_Rf,type = "class")
str(validation_Rf$target)
confusionMatrix_RF <- confusionMatrix(prediction_train_rf,validation_Rf$target)
confusionMatrix_RF
#Linear regression
library(LiblineaR)
BalanceData_LogReg <-balancedata
#BalanceData_LL$target <-BalanceData_LL$Training_final_265.target
BalanceData_LogReg$Training_final_265.target <- NULL
BalanceData_LogReg$db_country <- NULL
BalanceData_LogReg$db_state <- NULL
BalanceData_LogReg$db_audience <- NULL
#remove all the factors from balancedData_Logreg
for(i in names(BalanceData_LogReg))
{
if(is.factor(BalanceData_LogReg[[i]]))
{
index = grep(i, colnames(BalanceData_LogReg));
BalanceData_LogReg<-BalanceData_LogReg[,-index];
}
}
s<-scale(BalanceData_LogReg,center = T,scale = T)
yTrain<-balancedata$Training_final_265.target #target
test_logReg<- read.csv("Validation.csv", header=TRUE)
test_logReg$target <- as.factor(test_logReg$target)
test_logReg_data <- test_logReg[,colnames[c(1:91)]]
test_logReg_data$target <- test_logReg$target
test_logReg_data$db_country <- NULL
test_logReg_data$db_state <- NULL
test_logReg_data$db_audience <- NULL
str(test_logReg_data)
test_logReg_data1=test_logReg_data
test_logReg_data1$target <- NULL
#TestData <-Test
xtest<-test_logReg_data1
ytest<-test_logReg_data$target
Logistic_Regression<-LiblineaR(data=s,target=yTrain,type=0,cost=0.8,cross=5)
# Find the best model with the best cost parameter via 10-fold cross-validations
tryTypes <- c(0:7)
tryCosts <- c(1000,1,0.001)
bestCost <- NA
bestAcc <- 0
bestType <- NA
for(ty in tryTypes){
for(co in tryCosts){
acc <- LiblineaR(data=s, target=yTrain, type=ty, cost=co, cross=5, verbose=FALSE)
cat("Results for C=",co," : ",acc," accuracy.\n",sep="")
if(acc>bestAcc){
bestCost <- co
bestAcc <- acc
bestType <- ty
}
}
}
bestCost
cat("Best model type is:",bestType,"\n")
cat("Best cost is:",bestCost,"\n")
cat("Best accuracy is:",bestAcc,"\n")
str(yTrain)
m <- LiblineaR(data=s,target=yTrain,type=bestType,cost=bestCost)
s2 <- scale(xtest,attr(s,"scaled:center"),attr(s,"scaled:scale"))
p <- predict(m,s2)
res <- table(p$predictions,ytest)
confusionMatrix_LR <- confusionMatrix(p,ytest)
confusionMatrix_LR
print(res)
acc<-sum(diag(res))/sum(res)
acc
#--------------L1 Norm----------------------------------------------------------------------------
set.seed(555)
m1<-LiblineaR(data=s,target=yTrain,type=6,cost=bestCost)
p1<-predict(m1,s2)
res1 <- table(p1$predictions,ytest)
acc1<-sum(diag(res1))/sum(res1)
acc1
write.csv(training_data,file = "training_datax.csv")
View(training_data)
|
1d8d45161ed4fc595030d14a459dc4449eb9c1a0
|
039644ba9b0adb4f65ffa2162a5741dc5ee03570
|
/2_importData.R
|
993618ffc2193fea06fb67efad62dcae2c480b2f
|
[] |
no_license
|
theresakat/FDIA
|
b3b6133f8ae6f7bc422057f74b9dea0bdeb0cae7
|
2477ce5975c59f88ea22f68c81810f90f44da1dc
|
refs/heads/master
| 2021-06-25T05:49:22.910601
| 2018-11-30T18:11:48
| 2018-11-30T18:11:48
| 131,357,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,589
|
r
|
2_importData.R
|
# 2_importData.R
#
# Purpose; Use this script to load your CSV data for clean up using the 2_clean.R script.
# Simple loading code that reads a directory #
# library(plyr)
#
# files <- dir("raw", full = T)
# names(files) <- gsub("\\.csv", "", dir("raw"))
#
# Load all csv files into a single data frame and give informative column names
# bnames <- ldply(files, read.csv, header = F, skip = 1, nrows = 1000, stringsAsFactors = FALSE)
# names(bnames) <- c("file", "rank", "boy_name", "boy_num", "girl_name", "girl_num")
# Set working directory and path to input CSV on Windows laptop
mywd<-"C:\\temp\\FDIA"
setwd(mywd)
myfile<-paste(mywd,"\\CSV\\Framework Data Inventory Assessment v. 1.0.csv",
sep = "")
# myfile<-paste(mywd,"\\CSV\\data_num.csv", sep = "")
scores<-paste(mywd,"\\CSV\\Scoring.csv", sep="")
# # Set working directory and path to input CSV on Mac
# mywd<-"/Users/tkb/Work/GEO/fdia-mac"
# setwd(mywd)
# # myfile<-paste(mywd,"/CSV/Framework Data Inventory Assessment v. 1.0.csv", sep = "")
# scores<-paste(mywd,"/CSV/Scoring.csv", sep="")
# Read the CSV file (Survey Monkey provides a CSV in its downloads)
surveyData<-read.csv(c(myfile),header=F, sep=",", skip = 3)
scoring<-read.csv(c(scores), header=T, sep=",", nrows = 268)
# Read the exported Framework MASTER from Framework database
# tblfwname<-paste("","CSV", "1_tblFrameworkData_MASTER.csv", sep = "/") # Mac
tblfwname<-paste("\\","CSV", "1_tblFrameworkData_MASTER.csv", sep = "\\") # Windows
myfile<-paste(mywd,tblfwname, sep = "")
dat<-read.csv(c(myfile),header=T, sep = ",")
|
6cbdd8b3fe17d649e1810f87275f386cde04b41f
|
f44f88f39935e2879ebb3ff7f2abb11258e5d46f
|
/R-scripts/Plot_MAPK-compare2.R
|
73f710fd6e4890795bf06496c9dd2ea506904ef4
|
[] |
no_license
|
oncoapop/data_reporting
|
d5d98b9bf11781be5506d70855e18cf28dbc2f29
|
7bb63516a4bc4caf3c92e31ccd6bcd99a755322b
|
refs/heads/master
| 2022-08-23T20:21:18.094496
| 2020-05-22T00:51:51
| 2020-05-22T00:51:51
| 261,604,041
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,623
|
r
|
Plot_MAPK-compare2.R
|
# Script to plot data from NMD expression correlation
# Load R libraries
library(ggplot2)
library(reshape2)
library(ggrepel)
library(scales)
##########################################################
### CHANGE THESE PARAMETERS FOR EACH RUN OF THE SCRIPT ###
##########################################################
# Inputs
#file1="HCT116_T_202_normalized.fpkm"
#file1="HCT116_T_595_normalized.fpkm"
#file1="Hela_T_202_normalized.fpkm"
file1="Hela_T_595_normalized.fpkm"
dir1="/home/amazloomian/Projects/SplicingProject/EIF4A3_STAR/expressionClustering_NMD_plots/data_normalized"
infile1=paste(dir1,file1,sep="/")
# To make sure do not mix and match, use $base
base=""
#file2=paste(base, "HCT116_T_228_normalized.fpkm", sep="")
#file2=paste(base,"HCT116_T_598_normalized.fpkm", sep="")
#file2=paste(base,"Hela_T_228_normalized.fpkm", sep="")
file2=paste(base,"Hela_T_598_normalized.fpkm", sep="")
dir2="/home/amazloomian/Projects/SplicingProject/EIF4A3_STAR/expressionClustering_NMD_plots/data_normalized"
infile2=paste(dir2,file2,sep="/")
##########################################################
# Outputs
cell_line="HeLa"
#cell_line="HCT-116"
drug1="T-595"
#drug1="T-202"
drug2="T-598"
#drug2="T-298"
series="0, 0.5, 2.0, 5.0, 10, 20"
Expt<-paste(paste(paste(design,cell_line,sep="_"),drug1,sep="_"),drug2,sep="-")
#Design
design=paste("Normalized_",drug1,"vs",drug2,sep="")
outdir="/home/dyap/Projects/eIF4A3_NMD/Plots"
fname=paste(outdir,Expt,sep="/")
##########################################################
data1<-read.table(file=infile1, header=TRUE)
data2<-read.table(file=infile2, header=TRUE)
#Expr1 <- data1[grep("MAPK", data1$gene_short_name), ]
Expr1<-data1[data1[,2] %in% c("MAPK4","MAPK2","MAPK1", "MAPK3","MAPK5","MAPK6","MAPK7","MAPK8","MAPK9","MAPK10","MAPK11","MAPK12","MAPK13","MAPK14","MAPK15"),]
#Expr2 <- data2[grep("MAPK", data2$gene_short_name), ]
Expr2<-data2[data2[,2] %in% c("MAPK4","MAPK2","MAPK1", "MAPK3","MAPK5","MAPK6","MAPK7","MAPK8","MAPK9","MAPK10","MAPK11","MAPK12","MAPK13","MAPK14","MAPK15"),]
plotExpr1 <- melt(Expr1, id.vars="gene_short_name")
plotExpr2 <- melt(Expr2, id.vars="gene_short_name")
###############
# QC QC QC
file1
file2
cell_line
drug1
drug2
design
####################################################
########## FOR checking slope only #################
design.mat <- cbind(1,1:6)
response.mat <- t(Expr1[,3:8])
reg <- lm.fit(design.mat, response.mat)$coefficients
Expr1 <- cbind(Expr1, t(reg))
names(Expr1)[-(1:8)] <- c("Expr1_Intercept","Expr1_Slope")
Expr1_ord<-Expr1[order(-Expr1$Expr1_Slope),]
response.mat <- t(Expr2[,3:8])
reg <- lm.fit(design.mat, response.mat)$coefficients
Expr2 <- cbind(Expr2, t(reg))
names(Expr2)[-(1:8)] <- c("Expr2_Intercept","Expr2_Slope")
Expr2_ord<-Expr2[order(-Expr2$Expr2_Slope),]
###################################################
# testing ########## for all genes
conc=c("0.0","0.5","2.0","5.0","10.0","20.0")
names(Expr1)[3:8]<-conc
names(Expr2)[3:8]<-conc
pdf3=paste(outdir, "/", "Chart_", "MAPKs_", drug1, "_", cell_line, ".pdf", sep="" )
pdf(pdf3,height=15, width=10)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
row=4
coln=2
par(mfrow=c(row,coln))
for (i in 1:length(Expr1$gene_short_name)) {
gene<-(as.character(Expr1$gene_short_name[i]))
y1<-as.numeric(Expr1[i,][3:8])
y2<-as.numeric(Expr2[i,][3:8])
x<-as.numeric(names(Expr1)[3:8])
low=min(y1,y2)
high=max(y1,y2)
col1="red"
col2="dark green"
des=paste(drug1," (",col1, ") vs ", drug2," (",col2,")")
if ( low == high )
{
print("skip!")
} else {
plot(x,y1,type="l",col=col1, ylim=range(low,high), xlab="", ylab="")
par(new=TRUE)
plot(x,y2,type="l",col=col2, ylim=range(low,high), xlab="", ylab="")
title(main = gene, sub = des, xlab = "Drug conc", ylab = "Relative Norm. Gene Expr", cex=0.5)
fit1 <- lm(y1 ~ x)
fit2 <- lm(y2 ~ x)
# abline(fit1, lty=1)
# abline(fit2, lty=5)
summary(fit1)
summary(fit2)
c1<-fit1$coefficient[1]
m1<-fit1$coefficient[2]
r1<-summary(fit1)$r.squared
ypos1<-3/4 * ceiling(max(y1) * 2) / 2
eq1<-paste("y =", round(m1,3), "x + ", round(c1,3), " r^2 = ", round(r1,3))
c2<-fit2$coefficient[1]
m2<-fit2$coefficient[2]
r2<-summary(fit2)$r.squared
ypos2<-3/4 * ceiling(max(y2) * 2) / 2
eq2<-paste("y =", round(m2,3), "x + ", round(c2,3), " r^2 = ", round(r2,3))
# Add Legend
legend("topleft",legend=c(drug1,drug2),
text.col=c(col1,col2), lty=1:1, col=c(col1,col2), cex=0.8)
# legend("topright",legend=c(eq1,eq2),
# text.col=c(col1,col2),lty=1:5, col=c(col1,col2), cex=0.8)
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%
dev.off()
|
94fa1a8a76a55fe36ff78fe9a5c296e8cd1d7010
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH19/EX19.19.19/Ex19_19_19.R
|
6b8c00489ff92001230a468c9311e5ca940cfe85
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 361
|
r
|
Ex19_19_19.R
|
#PAGE=449
y=c(1980,1981,1985)
y1=c(29.11,306.5,343.6)
y2=c(30.69,325.6,367.8)
y3=c(28.38,310.4,356.4)
y4=c(58410,520,1082)
y5=c(60360,558,1211)
y6=c(65320,567,1297)
a1=sum(y3*y6)
a2=sum(y1*y6)
a=a1/a2
a=a*100
a=round(a,digits = 1)
cat(a,'%')
c3=(y1+y2)/2
c4=sum(y6*y3)
c5=sum(c3*y6)
c6=c4/c5
c6=c6*100
c6=round(c6,digits = 1)
cat(c6,'%')
|
88616827a58050b670c6ada1c15801691be91cd0
|
2ac95a12f199cbd56d86381c0b0f88c5a51b19f3
|
/R/package.R
|
a89b0921f317007a1f28c8788e408892c370bf6c
|
[] |
no_license
|
ugcd/solarius
|
45664e45eef8aeb8fdbbbae0ba52fa3619a80515
|
301361a6f83ab1947c2c8b2452328951a83b4849
|
refs/heads/master
| 2020-04-03T20:03:42.676349
| 2018-09-01T18:18:18
| 2018-09-01T18:18:18
| 24,883,541
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
package.R
|
#' solarius: an R package wrapper to SOLAR.
#'
#' @name solariusPackage
#' @docType package
#' @import methods
#' @import plyr
#' @import ggplot2
#' @import data.table
#' @importFrom graphics abline plot
#' @importFrom stats median na.omit ppoints qchisq qnorm quantile residuals sd as.formula update
#' @importFrom utils head read.fwf read.table tail write.table
{}
|
df4c6a3834db86ce58ae0763a054b5707765e0c9
|
07415ac38572229f10a1868523e0e1ee9df302f3
|
/LiveDemo/20150911LiveDemo.R
|
a6627412e17152a190973f92e2c8ae8a9bc7c175
|
[] |
no_license
|
datasci-info/tabf-trading-201509
|
e481bdebb22268f02c75ace6f61283420e9959d1
|
7269142db14f4aa62778d316a02bbfff654c69b8
|
refs/heads/master
| 2018-12-28T23:46:54.936212
| 2015-09-17T11:29:35
| 2015-09-17T11:29:35
| 42,191,173
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,157
|
r
|
20150911LiveDemo.R
|
1 + 1
install.packages("PerformanceAnalytics")
install.packages("foreach")
install.packages("quantstrat", repos="http://R-Forge.R-project.org")
library(quantstrat)
# ?getSymbols
# example(getSymbols)
# getSymbols
getSymbols("^TWII")
View(TWII)
chartSeries(TWII)
getSymbols("0050.TW")
0050.TW
TW0050 <- get("0050.TW")
assign("b",123)
b=123
b<-123
`<-`("c",123)
TW0050 = getSymbols("0050.TW",auto.assign = F)
F == FALSE
T == TRUE
?getSymbols.FI
?getSymbols.FRED
?getSymbols.oanda
getSymbols("USD/TWD",src="oanda",from="2000-01-01")
chartSeries(USDTWD)
debug(getSymbols)
getSymbols("^TWII")
undebug(getSymbols)
debug(download.file)
getSymbols("^TWII")
undebug(download.file)
as.list(body(getSymbols))
as.list(body(download.file))
# print(getsymbols.returned) at the bigenning of BLOCK 14
trace(download.file, quote(print(url)),at=c(1))
getSymbols("^TWII")
untrace(download.file)
f = function(x,y) c(x,y)
f(1,2)
do.call(f,list(3,4))
do.call(f,list(y=3,x=4))
g = function(...) list(...)
g(a=1,b=10,c=31,12,321)
do.call(g, list(a=1,b=10,c=31,12,321))
library(quantmod)
|
3d81a1d480c676a53c57674af59ea716112b5105
|
7b88b32fd29a95935a4b7818a507a53357eeaef6
|
/scripts/G_infiltration_visualization.R
|
3ee5173689d4d4e085d95648d7aabbf986dcd627
|
[] |
no_license
|
lysmarfreitas/tourismdashboard
|
5377b286cb49086695bb61cbdafdf62cc09fb618
|
8636e601c2c22111fdd39dfcfdd4b36b6cef2196
|
refs/heads/master
| 2022-01-20T08:39:43.853247
| 2019-07-03T11:24:50
| 2019-07-03T11:24:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,992
|
r
|
G_infiltration_visualization.R
|
#########################################################################
## Script that makes a leaflet visualization of airbnb infiltration #
## Inputs is neighbourhood geojson #
## Output is a leaflet visualization #
#########################################################################
infiltration <- function(nbr){
pal <- colorNumeric("YlOrRd", c(nbr$dist2hot2019,nbr$dist2hot2018,nbr$dist2hot2017,nbr$dist2hot2016,nbr$dist2hot2015),
na.color = "transparent")
m <- leaflet() %>% setView(lng = 4.898940, lat = 52.382676, zoom = 11)
m %>% addProviderTiles(providers$OpenStreetMap.BlackAndWhite) %>%
addPolygons(data = nbr,color = "#444444", weight = 0.4, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.3,
fillColor = ~pal(dist2hot2015),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), popup = ~airbnbpopup, group="2015") %>%
addPolygons(data = nbr,color = "#444444", weight = 0.4, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.3,
fillColor = ~pal(dist2hot2016),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), popup = ~airbnbpopup,group="2016") %>%
addPolygons(data = nbr,color = "#444444", weight = 0.4, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.3,
fillColor = ~pal(dist2hot2017),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), popup = ~airbnbpopup,group="2017") %>%
addPolygons(data = nbr,color = "#444444", weight = 0.4, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.3,
fillColor = ~pal(dist2hot2018),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), popup = ~airbnbpopup,group="2018") %>%
addPolygons(data = nbr,color = "#444444", weight = 0.4, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.3,
fillColor = ~pal(dist2hot2019),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), popup = ~airbnbpopup,group="2019") %>%
addLegend("bottomright", pal = pal, values = nbr$dist2hot2019,
title = HTML("avg. distance <br/>airbnb-hotel"),
opacity = 0.5, na.label = "No beds") %>%
addLayersControl(
baseGroups = c("2015", "2016", "2017", "2018", "2019"),
options = layersControlOptions(collapsed = FALSE))
}
|
e3173f0303ccb0171a03e6862148b42a620dde7e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/prim/examples/prim.box.Rd.R
|
f5b9c5aa17c84ae4a656c40f19a9d7b1a03ef4f6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
prim.box.Rd.R
|
library(prim)
### Name: prim.box
### Title: PRIM for multivariate data
### Aliases: prim.box prim.hdr prim.combine
### Keywords: multivariate
### ** Examples
data(quasiflow)
qf <- quasiflow[1:1000,1:2]
qf.label <- quasiflow[1:1000,4]
## using only one command
thr <- c(0.25, -0.3)
qf.prim1 <- prim.box(x=qf, y=qf.label, threshold=thr, threshold.type=0)
## alternative - requires more commands but allows more control
## in intermediate stages
qf.primp <- prim.box(x=qf, y=qf.label, threshold.type=1)
## default threshold too low, try higher one
qf.primp.hdr <- prim.hdr(prim=qf.primp, threshold=0.25, threshold.type=1)
qf.primn <- prim.box(x=qf, y=qf.label, threshold=-0.3, threshold.type=-1)
qf.prim2 <- prim.combine(qf.primp.hdr, qf.primn)
plot(qf.prim1) ## orange=x1>x2, blue x2<x1
points(qf[qf.label==1,], cex=0.5)
points(qf[qf.label==-1,], cex=0.5, col=2)
|
872d0551a9b64a4290b12724bd51fdc7b02259b1
|
57ad5699cd427042bbf93ae9cd7d54af39788f73
|
/R/prox.grouplasso.R
|
567fc626dca620b6f9dbe8846fb0682bb1f3d697
|
[] |
no_license
|
AlePasquini/apg
|
94f9eef7f233d721c663a4956581963d04d537df
|
8900984a8e7b1a469d2b775d56bf07bd192e252b
|
refs/heads/master
| 2020-04-12T04:44:58.238828
| 2016-06-04T01:44:00
| 2016-06-04T01:44:00
| 162,304,614
| 3
| 0
| null | 2018-12-18T14:59:41
| 2018-12-18T14:59:41
| null |
UTF-8
|
R
| false
| false
| 1,662
|
r
|
prox.grouplasso.R
|
#' Proximal operator of the group lasso penalty
#'
#' Computes the proximal operator of the group lasso penalty: \deqn{h(x) =
#' \sum_{g group} w_g ||x[g]||_2 .} Note that the groups should not
#' overlap.
#'
#' @param x The input vector
#' @param t The step size
#' @param opts List of parameters, which can include: \itemize{ \item
#' \code{groups} : a list of groups, each group is just a sequence of indices
#' of the components that form the group (default: all singletons). \item \code{groupweigths} : a vector of weights for the groups. If a single number, all groups have the same weight (default \code{1})
#' }
#' @return The proximal operator of the group lasso, which is a soft-thresholing
#' operator applied to the restriction of the \code{x} to each group.
#'
#' @export
#' @examples
#' x <- rnorm(5)
#' # When groups are all the singletons we recover the L1 (lasso) penalty
#' prox.grouplasso(f,1,list(groups=as.list(seq(length(f)))))
#' prox.elasticnet(f,1,list(lambda=1,alpha=1))
prox.grouplasso <- function(x, t, opts=list(groups=as.list(seq(length(xc))))) {
if (!exists("groups",where=opts))
stop("No list of groups provided for the group lasso.")
ngroups <- length(opts$groups)
if (!exists("groupweights",where=opts)) {
w <- rep(t, ngroups)
} else {
if (length(opts[["groupweights"]]) == ngroups) {
w <- t*opts[["groupweights"]]
} else {
w <- t*rep(opts[["groupweights"]][1], ngroups)
}
}
u <- x
for (i in seq(ngroups)) {
g <- opts[["groups"]][[i]]
u[g] <- max(0, 1 - w[i] / norm_vec(x[g]) ) * x[g]
}
return(u)
}
|
c7bc8c0d3d5980171e5047bdf7b338b73c3ca2bc
|
0780299cc9ab6837b046e78acea68b5214e058df
|
/Chapter06.R
|
50cf303462d7b5e4aece7d0b90941e7fcce432be
|
[] |
no_license
|
IshidaMotohiro/WebScraping
|
7d208338b41a3884a3f93fbfc6fbb0c9cc5a0e23
|
852c1cc68d71bdaf63a66ada7b00f209d5d6f56b
|
refs/heads/master
| 2021-01-14T08:10:40.008651
| 2020-02-20T00:40:02
| 2020-02-20T00:40:02
| 82,005,513
| 6
| 1
| null | 2020-02-20T00:14:16
| 2017-02-15T01:09:53
|
R
|
UTF-8
|
R
| false
| false
| 4,398
|
r
|
Chapter06.R
|
# 『Rによるスクレイピング入門』 第6章
## ----chapter06 section-029 身近なオープンデータ
### 横浜市のオープンデータ
library(rvest)
#### データ操作のためにmagrittr, dplyrを読み込む
library(magrittr)
library(dplyr)
# df.table <- read_html("http://www.city.yokohama.lg.jp/seisaku/seisaku/opendata/catalog.html") %>%
# html_table(header = TRUE) %>%
# extract(1)
# head(df.table)
# df.table %>% filter(grepl("区別将来人口推計", データ名))
#### 区別将来人口推計(xls)
#### http://archive.city.yokohama.lg.jp/seisaku/seisaku/chousa/kihou/175/data.html
library(rio)
#### ファイルを読み込み、必要な列だけを選択する
df.pop.forecast <-
rio::import("http://archive.city.yokohama.lg.jp/seisaku/seisaku/chousa/kihou/175/opendata/kihou175-p15-z6.xls",
skip = 5,
range = "G6:M24") %>%
rename(Ward = `...1`)
# df.pop.forecast %<>% select(Ward = `NA`, everything())
head(df.pop.forecast)
library(tidyr)
df.pop.forecast %<>%
tidyr::pivot_longer(cols = -1,
names_to = "year",
values_to = "value")
head(df.pop.forecast)
library(ggplot2)
#### 日本語フォントを表示させるための設定
quartzFonts(YuGo = quartzFont(rep("IPAexGothic", 4)))
theme_set(theme_classic(base_size = 12, base_family = "IPAexGothic"))
df.pop.forecast %>%
ggplot(aes(year, value, group = Ward, color = Ward)) +
geom_line() +
xlab("年") + ylab("将来人口") +
ggtitle("横浜市 区別将来人口推計")
### 郵便局
download.file(url = "http://www.post.japanpost.jp/zipcode/dl/roman/ken_all_rome.zip",
destfile = "ken_all_rome.zip")
unzip(zipfile = "ken_all_rome.zip", overwrite = TRUE)
#### 解凍したファイルの存在を確認します
path2file <- list.files(getwd(), pattern = ".csv$", full.names = TRUE, ignore.case = TRUE)
path2file
library(readr)
df.address <- read_csv("KEN_ALL_ROME.CSV",
locale = locale(encoding = "cp932"),
col_names = c("郵便番号", "都道府県名", "市区町村名", "町域名", "都道府県名roman", "市区町村名roman", "町域名roman"))
df.address %>%
select(都道府県名, 市区町村名) %>%
unique() %>%
count(都道府県名, sort = TRUE)
## ----chapter06 section-030 LinkDataの活用事例
### 福井県福井市地区別人口の推移
source("http://linkdata.org/api/1/rdf1s4022i/R")
class(chikubetsu201601)
library(dplyr)
glimpse(chikubetsu201601)
library(tidyr)
chikubetsu201601 %>% pivot_longer(
cols = chikubetsu201601,
names_to = "obs_month",
values_to = "row_num"
) %>% head()
library(purrr)
df.chikubetu <- list(chikubetsu201601,chikubetsu201602, chikubetsu201603, chikubetsu201604,
chikubetsu201605, chikubetsu201606, chikubetsu201607, chikubetsu201608,
chikubetsu201609, chikubetsu201610)
#### chikubetsu で始まる変数名をpivot_longer()のキーとして扱う
df.chikubetu.bind <- df.chikubetu %>% map_df(pivot_longer, names_to = "obs_month", values_to = "row_num", cols = starts_with("chikubetsu"))
dim(df.chikubetu.bind)
unique(df.chikubetu.bind$地区名称)
d <- df.chikubetu.bind %>%
filter(地区名称 %in% c("社北", "麻生津", "円山")) %>%
mutate(obs_month = paste0(substr(obs_month, start = 15, stop = 16), "月")) %>%
select(地区名称, 合計, obs_month)
library(ggplot2)
quartzFonts(YuGo = quartzFont(rep("IPAexGothic", 4)))
theme_set(theme_classic(base_size = 12, base_family = "IPAexGothic"))
d %>% ggplot(aes(obs_month, 合計, group = 地区名称)) +
geom_point() +
geom_line(aes(linetype = 地区名称)) +
xlab("月") + ylab("世帯数合計") +
ggtitle("福井市内3地区の人口数の推移")
### 富山県砺波市へのふるさと納税者コメント
source("http://linkdata.org/api/1/rdf1s4456i/R")
class(hometown_donation_comment)
library(RMeCab)
hometown_donation_comment$コメント[1] %>% RMeCabC() %>% unlist()
comment.morph <- hometown_donation_comment %>% docDF("コメント", type = 1, pos = "名詞")
head(comment.morph[1:8])
comment.morph$count <- comment.morph %>%
select(starts_with("Row")) %>%
t() %>%
as.data.frame() %>% map_int(sum)
comment.morph %>%
arrange(desc(count)) %>%
select(TERM, count) %>%
filter(count >= 10)
|
1adefb0129cc0825c692120aec7171831758b7f5
|
6b76309319c7effce72f1baeec25072a9754bcdc
|
/code/1Save_Samples.R
|
2c940a1089bd86c34abd0086e269fbe927008dd4
|
[] |
no_license
|
sardination/policy-responsiveness
|
ac4ee5841d7ba688a13e5dc556b679a52987279d
|
10be9b944dd32b4139b888d390c4f7cc940c3c54
|
refs/heads/master
| 2022-06-08T09:37:45.036182
| 2020-05-06T21:11:26
| 2020-05-06T21:11:26
| 239,045,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,409
|
r
|
1Save_Samples.R
|
# setwd("../") ## replication directory
(rep.dir <- getwd())
# Selection of social and economic opinion data to use
mass.social.name <-
"170628_1936-2014_social_varyingitems_blackurban_nocovariates"
mass.econ.name <-
"170915_1936-2014_economic_varyingitems_blackurban_nocovariates_1000iter_DC"
### Read
# Read in input data, including social opinion, economic opinion, social policy liberalism measures,
# economic policy liberalism measures, and public party identification information for states by year
setwd(paste0(rep.dir, "/input-data"))
# Read in the opinion data specified above
social.opinion.samples <-
mget(load(paste0("dgirt_output_", mass.social.name, ".RData")))
economic.opinion.samples <-
mget(load(paste0("dgirt_output_", mass.econ.name, ".RData")))
# Read in policy measures over time. Each variable in the dataframe of these measures
# is named with two "indices": the year (1-indexed) and the state (1-indexed by abbreviation).
# Each variable contains several rows, each representing another iteration of the policy
# liberalism measure algorithm (since the measure calculation involves executing a
# random walk).
social.policy.samples <-
mget(load("dynamic.irt.continuous.evolving.diff.stan-social-10003.Rdata"))
economic.policy.samples <-
mget(load("dynamic.irt.continuous.evolving.diff.stan-economic-1000.Rdata"))
# Read in state public party identification information from a Stata data file into an R dataframe.
# This dataframe contains party identification information for each year for each state.
pid.samples <- foreign::read.dta("StYrPID-poststratified-posterior-170918.dta")
### Transform
# Convert the sample sets into a dataframe of relevant variables, instead of being
# dataframes with a single column containing all the relevant information.
social.opinion.samples.df <- as.data.frame(social.opinion.samples$dgirt_out)
economic.opinion.samples.df <- as.data.frame(economic.opinion.samples$dgirt_out)
social.policy.samples.df <- as.data.frame(social.policy.samples$stan.cmb)
economic.policy.samples.df <- as.data.frame(economic.policy.samples$stan.cmb)
library(plyr)
library(dplyr)
library(reshape2)
# Verify that the list of state abbreviations is in ascending alphabetical order
stpos <- c("AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DE", "FL", "GA", "HI",
"IA", "ID", "IL", "IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI",
"MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV",
"NY", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT",
"VA", "VT", "WA", "WI", "WV", "WY")
length(stpos)
identical(stpos, sort(stpos))
# From the social policy liberalism metrics, select all theta measures, then, using
# melt, assign each numerical theta measure to its own row with its relevant value.
# Group the rows by theta measure name and then add another column `iteration` that
# records the iteration number for each value of each theta measure.
# Finally, add in two more columns: Year - take the year "index" provided in each theta
# variable name in each row and convert it to the actual year; StPO - take the state
# "index" provided in each theta variable name in each row and convert it to the actual
# state abbreviation.
social.policy.samples.df <- social.policy.samples.df %>%
select(contains("theta")) %>%
melt %>%
group_by(variable) %>%
mutate(iteration = row_number(value)) %>%
arrange(iteration) %>%
ungroup %>%
mutate(Year = as.integer(gsub("theta\\[(.*),.*\\]", "\\1", variable)),
Year = factor(Year, labels=as.character(1935:2014)),
StPO = as.integer(gsub("theta\\[.*,(.*)\\]", "\\1", variable)),
StPO = factor(StPO, labels=stpos))
# Repeat the process executed above for the social policy liberalism metrics applied
# to the economic policy liberalism metrics.
economic.policy.samples.df <-
economic.policy.samples.df %>%
select(contains("theta")) %>%
melt %>%
group_by(variable) %>%
mutate(iteration = row_number(value)) %>%
arrange(iteration) %>%
ungroup %>%
mutate(Year = as.integer(gsub("theta\\[(.*),.*\\]", "\\1", variable)),
Year = factor(Year, labels=as.character(1935:2014)),
StPO = as.integer(gsub("theta\\[.*,(.*)\\]", "\\1", variable)),
StPO = factor(StPO, labels=stpos))
# Update the social policy liberalism measure dataframe to only contain the columns
# that indicate the actual year (Year), actual state abbreviation (StPO),
# liberalism measure (Liberalism), and iteration (Iteration). All extraneous
# column names are omitted. This dataframe is then sorted first by iteration,
# then year, and then state abbreviation.
social.policy.samples.df <- social.policy.samples.df %>%
select(c(StPO=StPO, Year=Year, Liberalism=value, Iteration=iteration))
social.policy.samples.df <- social.policy.samples.df %>%
arrange(Iteration, Year, StPO)
# Repeat the process executed above for the social policy liberalism dataframe applied
# to the economic policy liberalism dataframe.
economic.policy.samples.df <- economic.policy.samples.df %>%
select(c(StPO=StPO, Year=Year, Liberalism=value, Iteration=iteration))
economic.policy.samples.df <- economic.policy.samples.df %>%
arrange(Iteration, Year, StPO)
# From the party identification data, remove all entries that have missing state
# abbreviations and then rename `iterations` column to `Iteration`,
# add a column `StPO` that contains the state abbreviation, and convert the
# `Year` strings to integers. Then convert the dataframe to rows of
# Year, StPo, and Iteration with the respective proportions of the sampled
# population that identify as democrat, independent, and republican under
# the column headers `DemPID`, `IndPID`, and `RepPID`, respectively.
pid.samples.df <- pid.samples %>%
filter(!is.na(StPOAbrv)) %>%
mutate(Iteration = iterations,
StPO = factor(StPOAbrv, levels=stpos),
Year = as.integer(Year)) %>%
group_by(Year, StPO, Iteration) %>%
summarise(DemPID = pid_total[PID=="D"],
IndPID = pid_total[PID=="I"],
RepPID = pid_total[PID=="R"])
# Get the public "liberalism" measure by dividing the proportion of
# the population that is democrat-identifying by the total proportion of
# the population that identifiies as either democrat or republicam, and then
# store this value in the `Dem2PID` column and sort the dataframe by
# Iteration, Year, and finally StPO (state abbreviation).
pid.samples.df <- pid.samples.df %>%
mutate(Dem2PID = DemPID / (DemPID + RepPID)) %>%
arrange(Iteration, Year, StPO)
## Write
# Write the modified social opinion, economic opinion, social policy, and economic
# policy liberalism value dataframes as well as the modified public party
# identification dataframe into intermediate data store files.
setwd(paste0(rep.dir, "/intermediate-data"))
foreign::write.dta(
social.opinion.samples.df,
paste0("samples", mass.social.name, ".dta"))
foreign::write.dta(
economic.opinion.samples.df,
paste0("samples", mass.econ.name, ".dta"))
foreign::write.dta(
social.policy.samples.df,
"samples_dynamic_irt_continuous_evolving_diff_stan-social-10003.dta")
foreign::write.dta(
economic.policy.samples.df,
"samples_dynamic_irt_continuous_evolving_diff_stan-economic-1000.dta")
foreign::write.dta(
pid.samples.df,
"samples_PID.dta")
# q()
|
9da4902cbd084fdf0a11cea3546d02e3f8109716
|
dd6ae9d277bccd00e752702708a6571694f7c83c
|
/Script Exercises 1.R
|
55f5df3eef6f2423bc026d17f868039dffbc89c9
|
[] |
no_license
|
evilla-19/marta-eva-R
|
87a1f01603ed9b90fbecd3f8c0d36e976d37b826
|
462bd1fbcdb90de7581bcc49eec5c27050e9bce4
|
refs/heads/master
| 2022-11-13T07:33:31.020174
| 2020-06-23T15:40:44
| 2020-06-23T15:40:44
| 263,638,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,336
|
r
|
Script Exercises 1.R
|
rm(list = ls()) # Clear environment
dev.off() # Clear plots
cat("\014") # Clear console
library("tidyverse")
library("pacman")
pacman::p_load(pacman, rio)
#Exercise 1
library("tidyverse")
read_tsv("small_file.txt") -> small.file
library("pacman")
pacman::p_load(pacman, rio)
rio_txt <- import("~/Desktop/small_file.txt")
head(rio_txt)
small.file <- rio_txt
class(small.file$Category)
class(small.file$Length)
small.file$Category <- as.factor(small.file$Category)
class(small.file$Category)
rio_csv <- import("~/Desktop/Child_Variants.csv")
child <- rio_csv
head(child)
ggplot(child, aes(x = CHR, y = COVERAGE)) + geom_point()
#Exercise 2
rio_csv <- import("~/Desktop/genomes.csv")
genomes <- rio_csv
genomes %>%
filter(Chromosomes > 40)%>%
select(Organism, Chromosomes)
genomes %>%
filter(Plasmids > 0) %>%
filter(Chromosomes > 1) %>%
select(Organism, Chromosomes, Plasmids)
genomes %>%
arrange(desc(Size)) %>%
slice(1:10) %>%
select(Organism, Size)
genomes %>%
select(-Groups)
genomes %>%
select(starts_with("O"))
genomes %>%
distinct(Groups) %>%
nrow()
genomes %>%
arrange(Size)%>%
filter(Chromosomes > 0)%>%
distinct(Chromosomes,.keep_all = TRUE) -> genomes
ggplot(genomes, aes(x=Chromosomes, y=Size)) +geom_point()
ggplot(genomes, aes(x=Chromosomes, y=log(Size))) +geom_point()
#Exercise 3
#Exercise 3
rio_csv<- import ("~/Desktop/cancer_stats.csv")
cancer <- rio_csv
cancer %>%
filter(Class=="Digestive System", `Female Cases`> `Male Cases`)
cancer %>%
filter(is.na('Male Cases'))
cancer %>%
filter(is.na('Female Cases'))
cancer %>%
mutate(SurvivalRate = `Male Deaths`/`Male Cases`)%>%
arrange(desc(SurvivalRate))%>%
select(Class, Site, SurvivalRate)
cancer %>%
filter(str_detect(Site,"acute"))
cancer %>%
filter(Class == "soft tissue") %>%
select(Class, Site )
cancer %>%
filter(str_length(Site) <=4 )
cancer %>%
filter(endsWith(Site,"y"))
#Exercise 4
rio_csv<- import ("~/Desktop/tidy_data1.csv")
tidy_data1 <- rio_csv
rio_csv<- import ("~/Desktop/tidy_data2.csv")
tidy_data2 <- rio_csv
rio_csv<- import ("~/Desktop/tidy_data3.csv")
tidy_data3 <- rio_csv
#tidy1
tidy_data1n<- tidy_data1 %>%
gather(key, value)
tidy_data1n %>%
filter(!is.na(value))
#tidy2
tidy_data2 %>%
pivot_longer(cols=A:E, names_to = "sample", values_to = "value")-> tidy_data2
tidy_data2 %>%
filter(!is.na(value))
#tidy3
tidy_data3n <-tidy_data3 %>%
pivot_longer(cols=WT_1:KO_3, names_to = "sample", values_to = "value")
tidy_data3n %>%
select(Probe_ID:Symbol)%>%
distinct(Probe_ID, .keep_all = TRUE)
#back to genomes data
genomes %>%
separate(Groups, c("Domain", "Kingdom", "Class"), ";") %>%
filter(! str_detect(Organism, "'")) %>%
filter(! (Kingdom=="Other")) %>%
filter(! (Class=="Other"))
#Exercise 5
cancer %>%
mutate(Deaths=`Male Deaths` + `Female Deaths`, Cases = `Male Cases` + `Female Cases`)
child.1<-child %>%
mutate(Type = if_else(str_length(REF)==1 & str_length(ALT)==1, "SNP", "INDEL"))
small.file %>%
group_by(Category)%>%
summarise(meanLength= mean(Length), sdLength= sd(Length))
child.2 <-child.1 %>%
filter(dbSNP==".") %>%
filter(Type=="SNP") %>%
group_by(GENE) %>%
summarise(COVERAGE=mean(COVERAGE),count=n()) %>%
filter(count>2) %>%
arrange(desc(COVERAGE))
tidy_data2 %>%
select(Chr, sample)%>%
group_by(sample, Chr) %>%
summarise(valuemean=mean(value))
cancer %>%
mutate(difMF=abs(`Male Cases` - `Female Cases`)) %>%
select(Class,difMF) %>%
arrange(difMF)
cancer %>%
filter(!(is.na(`Male Cases`) | is.na(`Female Cases`)))%>%
mutate(MaleRate=`Male Deaths`/ `Male Cases` , FemaleRate= `Female Deaths`/ `Female Cases` , difFM=abs(MaleRate-FemaleRate)) %>%
select(Class,Site,difFM) %>%
arrange(desc(difFM))
cancer %>%
mutate(Cases = `Male Cases` + `Female Cases`, Deaths = `Male Deaths` + `Female Deaths`, survival= Cases - Deaths/Cases)%>%
group_by(Class) %>%
arrange(desc(survival))%>%
select(Class, Site, survival)
child %>%
group_by(CHR, GENE) %>%
summarise(count=n()) %>%
arrange(desc(count))
#Exercise 9
rio_csv <-import ("~/Desktop/dna_methylation.csv")
dna_methylation <- rio_csv
dna_methylation2<- dna_methylation %>%
pivot_wider(names_from = State, values_from = Count) %>%
mutate(metpercent=(100*Meth/(Meth+Unmeth)))
dna_methylation2 %>%
group_by(Gene, Group)%>%
summarise(meanMet= mean(metpercent))
rio_txt<-import ("~/Desktop/methylation_annotation.txt")
rio_txt -> methylation_annotation
dna_methylation %>%
rename(Gene_name=Gene) %>%
left_join(methylation_annotation)
child %>%
select(ALT,REF) %>%
mutate(allvariants=str_c(ALT,">",REF)) %>%
group_by(allvariants) %>%
summarise(count=n()) %>%
arrange(desc(count))
small.file1 <- small.file%>%
group_by(Category)%>%
summarise(minLength = min(Length))%>%
right_join(small.file)%>%
mutate(normalizedLength= Length - minLength)
ggplot(small.file1, aes (x=Category, y=normalizedLength)) + geom_point()
#Exercise 7
lowestqual <- function(genename) {
child %>%
filter(GENE == genename) %>%
arrange(QUAL) %>%
select(QUAL, GENE)%>%
slice(1)
}
lowestqual("AGRN")
lowestqual2 <- function(tbl) {
tbl %>%
arrange(QUAL) %>%
slice(1)
}
child %>% lowestqual2()
child %>%
group_by(GENE) %>%
lowestqual2() %>%
arrange(QUAL)
|
05fc47704cc148670ee54a64c22583f011aac2c0
|
dacf6605e992a22e75601c35c1367b95ad9ef732
|
/7.RandomWalk/RandomWalk.R
|
a8e29e7ecb37f57e243de68020ed6ad53b80fd8f
|
[] |
no_license
|
BabyTong/biocode_matlab_R
|
e279112eca80bc2bc5b997b2304363e4f0dea6ac
|
f14dd20cc05a3943d378bf7a056ce9410c195d50
|
refs/heads/master
| 2022-01-12T05:36:44.956235
| 2019-06-17T01:41:11
| 2019-06-17T01:41:11
| 83,211,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,004
|
r
|
RandomWalk.R
|
##############################################################
#函数随机游走
##############################################################
##RandomWalk function
library(igraph)
#子程序
RandomWalk2igraph<-function(igraphM,VertexWeight,EdgeWeight=TRUE,gamma=0.7){
if(EdgeWeight==TRUE){
adjM<-get.adjacency(igraphM,attr="weight") # convert igraph object to a weight matrix
}
if(EdgeWeight==FALSE){
adjM<-get.adjacency(igraphM) # convert igraph object to a conventional matrix
}
res<-rw(adjM,VertexWeight,gamma)
# print(Sys.time());
return(drop(res))
}
rw<-function(W,p0,gamma) {
## perform a random walk;
p0<-t(p0)
p0 <- p0/sum(p0)
PT <- p0
k <- 0
delta <- 1
Ng <- dim(W)[2]
for (i in 1:Ng) {
sumr<-sum(W[i,])
if(sumr==0){
W[i,] <-numeric(length=length(W[i,]))
}
if(sumr>0){
W[i,] <- W[i,]/sum(W[i,])
}
}
W<-as.matrix(W)
W <- t(W)
while(delta>1e-6) {
PT1 <- (1-gamma)*W
PT2 <- PT1 %*% t(PT)
PT3 <- (gamma*p0)
PT4 <- t(PT2) + PT3
delta <- sum(abs(PT4 - PT))
PT <- PT4
k <- k + 1
}
PT<-t(PT)
rownames(PT)<-NULL
return(PT)
}
#主程序
#文件net包含三列:前两列代表你的网络,第三列是边的权重
#seed_id是你筛选的种子节点
#score是随机游走运行结束后所有节点的最终的打分
library(igraph)
edge1<-as.character(net[,1])
edge2<-as.character(net[,2])
weight<-as.numeric(net[,3])
relationship<-data.frame(edge1,edge2,weight)
point_0<-data.frame(name=unique(union(edge1,edge2)),size=0)
rownames(point_0)<-point_0[,1]
g1<-graph.data.frame(relationship,directed = FALSE,vertices = point_0)
adjweight<-get.adjacency(g1,sparse = T,attr = 'weight')
adjnotweight<-get.adjacency(g1,sparse = T)
# adjweight 有权邻接矩阵
# adjnotweight 无权邻接矩阵
point<-point_0
seed_id<-seed_id
point[seed_id,]$size<-1
score<-rw(W =adjweight,p0 = point[,2],gamma = 0.85)
#score是随机游走运行结束后所有节点的最终的打分
|
23f82ac0ee6e5355be24e43fe67322843d024a15
|
ae889cb569a96b4e71e7828966248bbce04963ff
|
/R/hydrosanity_timeperiod.R
|
854fcd6c5b4a10cbcc199d51cc1ad2d77c3defe0
|
[] |
no_license
|
cran/hydrosanity
|
b1aac0de2ab07d1e7937de6c37867e0e418a2355
|
1428e15f158b6489314184ef0cc2aa2d90f43c33
|
refs/heads/master
| 2020-04-27T02:52:29.043766
| 2007-12-08T00:00:00
| 2007-12-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,180
|
r
|
hydrosanity_timeperiod.R
|
## Hydrosanity: an interface for exploring hydrological time series in R
##
## Copyright (c) 2007 Felix Andrews <felix@nfrac.org>, GPL
updateTimePeriodPage <- function() {
if (!is.null(hsp$yearStart)) {
theWidget("scope_yearstart_combobox")$setActive(hsp$yearStart - 1)
}
if (is.null(hsp$timePeriod)) {
theWidget("timeperiod_chosenperiod_entry")$setText("")
} else {
timePeriodString <- paste(format(hsp$timePeriod), collapse=" to ")
theWidget("timeperiod_chosenperiod_entry")$setText(timePeriodString)
theWidget("timeperiod_updateperiod_button")$setSensitive(FALSE)
}
if (is.null(hsp$region)) {
theWidget("scope_region_x_entry")$setText("")
theWidget("scope_region_y_entry")$setText("")
} else {
regionXString <- paste(format(hsp$region$xlim), collapse=" to ")
regionYString <- paste(format(hsp$region$ylim), collapse=" to ")
theWidget("scope_region_x_entry")$setText(regionXString)
theWidget("scope_region_y_entry")$setText(regionYString)
theWidget("scope_set_region_button")$setSensitive(FALSE)
}
StateEnv$update$timeperiod <- F
StateEnv$win$present()
if (length(hsp$data) == 0) {
return()
}
# overall time period
wholePeriod <- timelim.timeblobs(hsp$data)
wholePeriodString <- paste(format(wholePeriod), collapse=" to ")
theWidget("timeperiod_overallperiod_entry")$setText(wholePeriodString)
# overall spatial extent
loc <- lapply(hsp$data, attr, "location.xy")
ok <- (sapply(loc, length) == 2)
if (any(ok)) {
tmp.locs <- sapply(hsp$data[ok], attr, "location.xy")
tmp.locs <- data.frame(x=tmp.locs[1,], y=tmp.locs[2,])
wholeX <- round(range(tmp.locs$x), digits=3)
wholeY <- round(range(tmp.locs$y), digits=3)
regionXString <- paste(format(wholeX), collapse=" to ")
regionYString <- paste(format(wholeY), collapse=" to ")
theWidget("scope_overall_x_entry")$setText(regionXString)
theWidget("scope_overall_y_entry")$setText(regionYString)
}
}
.hs_on_scope_viewdbsitemap_button_clicked <- function(button) {
freezeGUI(echo.to.log=F)
on.exit(thawGUI())
siteListFile <- theWidget("scope_sitelist_filechooserbutton")$getFilename()
siteListFormatIndex <- theWidget("scope_sitelist_format_combobox")$getActive()+1
dataYears <- theWidget("scope_datayears_spinbutton")$getValue()
doInterpElev <- theWidget("scope_sitemap_elevation_checkbutton")$getActive()
if (is.null(siteListFile)) {
errorDialog("Choose the site list file.")
return()
}
addLogComment("Generate site map from database and select region")
select.call <- call(SITELIST.FORMATS[[siteListFormatIndex]])
select.call$siteListFile <- siteListFile
select.call$timelim <- quote(hsp$timePeriod)
select.call$min.years <- dataYears
select.assign.call <- quote(tmp.sites <- foo)
select.assign.call[[3]] <- select.call
guiDo(call=select.assign.call)
plot.call <- quote(
xyplot(y ~ x, tmp.sites, aspect="iso", pch=ifelse(tmp.sites$ok, 21, 4))
)
plot.call$panel <- function(x, y, z, labels, ...) {
panel.levelplot.interp(x, y, z, col.regions=grey.colors(100, start=0.9, end=0.6))
panel.worldmap()
panel.rivers()
panel.cities()
panel.points(x, y, ...)
if (FALSE) panel.text(x, y, labels=labels)
if (!is.null(hsp$catchment))
sp.polygons(hsp$catchment)
}
# turn layer off by wrapping it in quote()
if (!doInterpElev) body(plot.call$panel)[[2]] <-
call('if', FALSE, body(plot.call$panel)[[2]])
plot.call$z <- quote(tmp.sites$elev)
plot.call$labels <- quote(row.names(tmp.sites))
if (!is.null(hsp$region)) {
plot.call$xlim <- quote(hsp$region$xlim)
plot.call$ylim <- quote(hsp$region$ylim)
} else {
plot.call$prepanel <- quote(prepanel.extend.10)
}
addToLog(paste(deparse(plot.call), collapse="\n"))
playwith(plot.call=plot.call, title="site map",
bottom=list(setRegionTool),
eval.args="^hsp$", invert.match=T, on.close=restoreHS)
guiDo(rm(tmp.sites))
setStatusBar("Generated site map from database")
}
.hs_on_scope_viewdbtimeline_button_clicked <- function(button) {
freezeGUI(echo.to.log=F)
on.exit(thawGUI())
siteListFile <- theWidget("scope_sitelist_filechooserbutton")$getFilename()
siteListFormatIndex <- theWidget("scope_sitelist_format_combobox")$getActive()+1
dataYears <- theWidget("scope_datayears_spinbutton")$getValue()
if (is.null(siteListFile)) {
errorDialog("Choose the site list file.")
return()
}
addLogComment("Generate site map from database and select region")
select.call <- call(SITELIST.FORMATS[[siteListFormatIndex]])
select.call$siteListFile <- siteListFile
select.call$xlim <- hsp$region$xlim
select.call$ylim <- hsp$region$ylim
select.call$min.years <- dataYears
select.assign.call <- quote(tmp.sites <- foo)
select.assign.call[[3]] <- select.call
guiDo(call=select.assign.call)
# make rough annual series from start and end dates
guiDo({
tmp.coverage <- list()
for (i in which(tmp.sites$ok)) {
years <- with(tmp.sites, paste(
first.year[i]:last.year[i], "-01-01", sep=''))
tmp.coverage[[tmp.sites$name[i]]] <- timeblob(years, 1)
}
})
plot.call <- quote(grid.timeline.plot(tmp.coverage, xlim=hsp$timePeriod))
addToLog(paste(deparse(plot.call), collapse="\n"))
playwith(plot.call=plot.call, title="timeline",
viewport="time.vp",
time.mode=TRUE,
bottom=list(setPeriodTool),
eval.args="^hsp$", invert.match=T, on.close=restoreHS)
guiDo(rm(tmp.sites, tmp.coverage))
setStatusBar("Generated timeline plot from database")
}
.hs_on_scope_import_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
siteListFile <- theWidget("scope_sitelist_filechooserbutton")$getFilename()
siteListFormatIndex <- theWidget("scope_sitelist_format_combobox")$getActive()+1
siteDataArchive <- theWidget("scope_sitearchive_filechooserbutton")$getFilename()
isArchive <- (theWidget("scope_sitearchive_type_combobox")$getActive() == 0)
if (!isArchive) {
siteDataArchive <- theWidget("scope_sitearchive_filechooserbutton")$getCurrentFolder()
}
dataYears <- theWidget("scope_datayears_spinbutton")$getValue()
if (is.null(siteListFile)) {
errorDialog("Choose the site list file.")
return()
}
if (is.null(siteDataArchive)) {
errorDialog("Choose the site data archive.")
return()
}
addLogComment("Import sites from database")
select.call <- call(SITELIST.FORMATS[[siteListFormatIndex]])
select.call$siteListFile <- siteListFile
select.call$archivePath <- siteDataArchive
select.call$xlim <- hsp$region$xlim
select.call$ylim <- hsp$region$ylim
select.call$timelim <- quote(hsp$timePeriod)
select.call$min.years <- dataYears
nSites <- sum(eval(select.call)$ok)
if (nSites >= 10) {
if (is.null(questionDialog("Import ", nSites,
" time series from file? This might take a long time, ",
"and R will appear to freeze. ",
"Watch the R console for progress."))) {
return()
}
}
select.call$return.data <- T
select.assign.call <- quote(tmp.sites <- foo)
select.assign.call[[3]] <- select.call
guiDo(call=select.assign.call)
guiDo(hsp$data[names(tmp.sites)] <- tmp.sites)
setIsImportMode(FALSE)
setStatusBar("Imported sites from database")
datasetModificationUpdate()
}
.hs_on_timeperiod_updateperiod_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
myText <- theWidget("timeperiod_chosenperiod_entry")$getText()
myTimeStrings <- strsplit(myText, " to ")[[1]]
if (length(myTimeStrings) != 2) {
errorDialog("Give time period in form \"START to END\".")
return()
}
addLogComment("Set time period for analysis")
guiDo(call=bquote(
hsp$timePeriod <- as.POSIXct(.(myTimeStrings), tz="GMT")
))
setStatusBar("Set time period for analysis: ", myText)
timeperiodModificationUpdate()
}
.hs_on_timeperiod_reset_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
addToLog("\n")
guiDo(hsp$timePeriod <- NULL)
timeperiodModificationUpdate()
}
.hs_on_scope_set_region_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
myXText <- theWidget("scope_region_x_entry")$getText()
myYText <- theWidget("scope_region_y_entry")$getText()
myXStrings <- strsplit(myXText, " to ")[[1]]
myYStrings <- strsplit(myYText, " to ")[[1]]
if ((length(myXStrings) != 2) || (length(myYStrings) != 2)) {
errorDialog("Give bounds in form \"LOWER to UPPER\".")
return()
}
addLogComment("Set region for analysis")
guiDo(call=bquote(
hsp$region <- list(xlim=as.numeric(.(myXStrings)),
ylim=as.numeric(.(myYStrings)))
))
setStatusBar("Set region for analysis: X = ", myXText,
", Y = ", myYText)
regionModificationUpdate()
}
.hs_on_scope_reset_region_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
addToLog("\n")
guiDo(hsp$region <- NULL)
regionModificationUpdate()
}
.hs_on_timeperiod_viewtimeline_button_clicked <- function(button) {
freezeGUI(echo.to.log=F)
on.exit(thawGUI())
selNames <- iconViewGetSelectedNames(theWidget("selection_iconview"))
if (length(selNames) == 0) {
errorDialog("No items selected.")
return()
}
plotQualCodes <- theWidget("timeperiod_plotqualitycodes_checkbutton")$getActive()
addLogComment("Generate timeline plot")
plot.call <- call('grid.timeline.plot')
plot.call[[2]] <- bquote(hsp$data[.(selNames)])
plot.call$xlim <- quote(hsp$timePeriod)
plot.call$colMap <- if (!plotQualCodes) { NA }
addToLog(paste(deparse(plot.call), collapse="\n"))
playwith(plot.call=plot.call, title="timeline",
viewport="time.vp",
time.mode=TRUE,
bottom=list(setPeriodTool),
eval.args="^hsp$", invert.match=T, on.close=restoreHS)
setStatusBar("Generated timeline plot")
}
.hs_on_scope_viewsitemap_button_clicked <- function(button) {
freezeGUI(echo.to.log=F)
on.exit(thawGUI())
doInterpElev <- theWidget("scope_sitemap_elevation_checkbutton")$getActive()
doRainOnly <- theWidget("scope_sitemap_rainonly_checkbutton")$getActive()
selNames <- names(hsp$data)
if (doRainOnly) {
role <- sapply(hsp$data, attr, "role")
selNames <- names(hsp$data)[role=="RAIN"]
}
loc <- lapply(hsp$data[selNames], attr, "location.xy")
ok <- (sapply(loc, length) == 2)
if (any(!ok)) {
infoDialog(paste("Some items do not have a valid 'location.xy' attribute:",
paste(selNames[!ok], collapse=", "),
". You can fix them with 'edit metadata' in the 'Dataset' tab."))
}
selNames <- selNames[ok]
if (length(selNames) < 4) {
# need at least 4 locations to interpolate
doInterpElev <- FALSE
}
addLogComment("Generate site map")
tmpObjs <- c('tmp.names', 'tmp.locs')
guiDo(call=bquote({
tmp.names <- .(selNames)
tmp.locs <- sapply(hsp$data[tmp.names], attr, "location.xy")
tmp.locs <- data.frame(x=tmp.locs[1,], y=tmp.locs[2,])
}))
plot.call <- quote(
xyplot(y ~ x, tmp.locs, aspect="iso")
)
plot.call$panel <- function(x, y, z, labels, ...) {
panel.levelplot.interp(x, y, z, col.regions=grey.colors(100, start=0.9, end=0.6))
panel.worldmap()
panel.rivers()
panel.cities()
if (!is.null(hsp$catchment))
sp.polygons(hsp$catchment)
panel.points(x, y)
panel.text(x, y, labels=labels, ...)
}
# turn layer off by wrapping it in `if (FALSE)`
if (!doInterpElev) body(plot.call$panel)[[2]] <-
call('if', FALSE, body(plot.call$panel)[[2]])
plot.call$labels <- quote(row.names(tmp.locs))
if (doInterpElev) {
tmpObjs <- c(tmpObjs, 'tmp.elev')
guiDo({
tmp.elev <- lapply(hsp$data[tmp.names], attr, "elevation")
tmp.elev <- unlist(ifelse(sapply(tmp.elev,is.null),NA,tmp.elev))
})
stopifnot(exists("tmp.elev"))
plot.call$z <- quote(tmp.elev)
}
if (!doInterpElev) plot.call$z <- NULL
if (!is.null(hsp$region)) {
plot.call$xlim <- quote(hsp$region$xlim)
plot.call$ylim <- quote(hsp$region$ylim)
} else {
plot.call$prepanel <- quote(prepanel.extend.10)
}
addToLog(paste(deparse(plot.call), collapse="\n"))
playwith(plot.call=plot.call, title="site map",
bottom=list(setRegionTool),
eval.args="^hsp$", invert.match=T, on.close=restoreHS)
if (length(tmpObjs) > 0) {
guiDo(call=bquote(rm(list=.(tmpObjs))))
}
setStatusBar("Generated site map")
}
.hs_on_scope_import_catchment_button_clicked <- function(button) {
freezeGUI()
on.exit(thawGUI())
shapeFile <- theWidget("scope_catchment_filechooserbutton")$getFilename()
if (is.null(shapeFile)) {
errorDialog("First, choose the file.")
return()
}
addLogComment("Import catchment boundaries from file")
shapeDir <- dirname(shapeFile)
shapeLayer <- get.stem(shapeFile)
if (require(rgdal, quietly=T)) {
guiDo(library(rgdal))
guiDo(call=bquote(
hsp$catchment <- readOGR(.(shapeDir), .(shapeLayer))
))
} else if (require(maptools, quietly=T)) {
guiDo(library(maptools))
guiDo(call=bquote(
hsp$catchment <- readShapePoly(.(shapeFile))
))
} else {
errorDialog('You need the "rgdal" package or "maptools" package ',
"to import catchment boundaries ",
"(note: maptools only supports ESRI shapefiles).")
return()
}
setStatusBar("Imported catchment boundaries from file")
}
## NON-ACTIONS, just interface bits and pieces
.hs_on_scope_yearstart_combobox_changed <- function(widget) {
StateEnv$echo.to.log <- T
addToLog("\n")
guiDo(call=bquote(hsp$yearStart <- .(widget$getActive()+1)))
}
.hs_on_timeperiod_chosenperiod_entry_changed <- function(widget) {
theWidget("timeperiod_updateperiod_button")$setSensitive(TRUE)
}
.hs_on_scope_region_entry_changed <- function(widget) {
theWidget("scope_set_region_button")$setSensitive(TRUE)
}
.hs_on_scope_sitearchive_type_combobox_changed <- function(widget) {
isArchive <- (theWidget("scope_sitearchive_type_combobox")$getActive() == 0)
if (!isArchive) {
infoDialog("To choose the folder, select one file inside the folder.")
}
}
|
1a34726099fe0abc02736f81e507342468c45634
|
9c48383adcdc2eeb25d48f543e9c176524149251
|
/estate_EDA_140731.R
|
8635369b4f2de81946c0cca19adcac3220707679
|
[] |
no_license
|
andrew-kim2/test111
|
f5934f3aab6f0234db949f831274b7e87465e17a
|
41f00548bd01a416ff6418accd1740271b874393
|
refs/heads/master
| 2020-12-24T15:23:32.267390
| 2014-08-08T09:03:03
| 2014-08-08T09:03:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,063
|
r
|
estate_EDA_140731.R
|
# --------------------------------------------------------------------------
# calendarheatmap Plotting function
# --------------------------------------------------------------------------
eda_calendarheatmap <- function(
data,
date_column,
value_column,
aggr_func,
plot_filename=NULL,
ncolors = 99,
color = "r2g",
varname = "Values",
date.form = "%Y-%m-%d",
color.reverse = F,
...)
{
require(lattice)
require(grid)
require(chron)
args <- list(...)
get_arg <- function(arg_name, args) {
if (arg_name %in% names(args)) {
arg <- args[[arg_name]]
return(arg)
} else {
return(null)
}
}
date_column_name <- as.list(substitute(date_column))[[1]]
value_column_name <- as.list(substitute(value_column))[[1]]
if (is.name(date_column_name)) {
if (exists(as.character(date_column_name))) {
date_column_name <- eval(date_column_name)
} else {
date_column_name <- as.character(date_column_name)
}
} else {
date_column_name <- date_column
}
if (is.name(value_column_name)) {
if (exists(as.character(value_column_name))) {
value_column_name <- eval(value_column_name)
} else {
value_column_name <- as.character(value_column_name)
}
} else {
value_column_name <- value_column
}
formula <- as.formula(paste(value_column_name, " ~ ", date_column_name, sep="", collapse=""))
temp.df <- aggregate(as.formula(formula), data=data, aggr_func)
colnames(temp.df) <- c("dates", "values")
dates <- temp.df$dates
values <- temp.df$values
if (class(dates) == "integer") {
dates <- as.character(dates)
}
if (class(dates) == "character" | class(dates) == "factor") {
dates <- strptime(dates, date.form)
}
caldat <- data.frame(value = values, dates = dates)
min.date <- as.Date(paste(format(min(dates), "%Y"), "-1-1", sep = ""))
max.date <- as.Date(paste(format(max(dates), "%Y"), "-12-31", sep = ""))
dates.f <- data.frame(date.seq = seq(min.date, max.date, by = "days"))
caldat <- data.frame(date.seq = seq(min.date, max.date, by = "days"), value = NA)
dates <- as.Date(dates)
caldat$value[match(dates, caldat$date.seq)] <- values
caldat$dotw <- as.numeric(format(caldat$date.seq, "%w"))
caldat$woty <- as.numeric(format(caldat$date.seq, "%U")) + 1
caldat$yr <- as.factor(format(caldat$date.seq, "%Y"))
caldat$month <- as.numeric(format(caldat$date.seq, "%m"))
yrs <- as.character(unique(caldat$yr))
d.loc <- as.numeric()
for (m in min(yrs):max(yrs)) {
d.subset <- which(caldat$yr == m)
sub.seq <- seq(1, length(d.subset))
d.loc <- c(d.loc, sub.seq)
}
caldat <- cbind(caldat, seq=d.loc)
r2b <- c("#0571B0", "#92C5DE", "#F7F7F7", "#F4A582", "#CA0020")
r2g <- c("#D61818", "#FFAE63", "#FFFFBD", "#B5E384")
w2b <- c("#045A8D", "#2B8CBE", "#74A9CF", "#BDC9E1", "#F1EEF6")
colorset <- get(color)
if (color.reverse == T) {
colorset <- rev(colorset)
}
assign("col.sty", colorset)
calendar.pal <- colorRampPalette((col.sty), space = "Lab")
def.theme <- lattice.getOption("default.theme")
cal.theme <- function() {
theme <- list(strip.background = list(col = "transparent"),
strip.border = list(col = "transparent"), axis.line = list(col = "transparent"),
par.strip.text = list(cex = 0.8))
}
lattice.options(default.theme = cal.theme)
yrs <- (unique(caldat$yr))
nyr <- length(yrs)
main.title <- ""
main.title <- get_arg("main.title", args)
cal.plot <- levelplot(value ~ woty * dotw | yr,
data = caldat,
as.table = TRUE,
aspect = 0.12,
layout = c(1, nyr%%8),
between = list(x = 0, y = c(1, 1)),
strip = TRUE,
main = main.title,
scales = list(x = list(at = c(seq(2.9, 52, by = 4.42)),
labels = paste(1:12, "월", sep=""),
alternating = c(1, rep(0, (nyr - 1))),
tck = 0,
cex = 0.7),
y = list(at = c(0, 1, 2, 3, 4, 5, 6),
#labels = c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"),
labels = c("일", "월", "화", "수", "목", "금", "토"),
alternating = 1,
cex = 0.6,
tck = 0)),
xlim = c(0.4, 54.6),
ylim = c(6.6, -0.6),
cuts = ncolors - 1,
col.regions = (calendar.pal(ncolors)),
xlab = "",
ylab = "",
colorkey = list(col = calendar.pal(ncolors), width = 0.6, height = 0.5),
subscripts = TRUE)
if (!is.null(plot_filename)) {
png(plot_filename, width = 1400, height = 400)
}
print(cal.plot)
panel.locs <- trellis.currentLayout()
for (row in 1:nrow(panel.locs)) {
for (column in 1:ncol(panel.locs)) {
if (panel.locs[row, column] > 0) {
trellis.focus("panel", row = row, column = column, highlight = FALSE)
xyetc <- trellis.panelArgs()
subs <- caldat[xyetc$subscripts, ]
dates.fsubs <- caldat[caldat$yr == unique(subs$yr), ]
y.start <- dates.fsubs$dotw[1]
y.end <- dates.fsubs$dotw[nrow(dates.fsubs)]
dates.len <- nrow(dates.fsubs)
adj.start <- dates.fsubs$woty[1]
for (k in 0:6) {
if (k < y.start) {
x.start <- adj.start + 0.5
}
else {
x.start <- adj.start - 0.5
}
if (k > y.end) {
x.finis <- dates.fsubs$woty[nrow(dates.fsubs)] - 0.5
}
else {
x.finis <- dates.fsubs$woty[nrow(dates.fsubs)] + 0.5
}
grid.lines(x = c(x.start, x.finis),
y = c(k - 0.5, k - 0.5),
default.units = "native",
gp = gpar(col = "grey", lwd = 1))
}
if (adj.start < 2) {
grid.lines(x = c(0.5, 0.5),
y = c(6.5, y.start - 0.5),
default.units = "native",
gp = gpar(col = "grey", lwd = 1))
grid.lines(x = c(1.5, 1.5),
y = c(6.5, -0.5),
default.units = "native",
gp = gpar(col = "grey", lwd = 1))
grid.lines(x = c(x.finis, x.finis),
y = c(dates.fsubs$dotw[dates.len] - 0.5, -0.5),
default.units = "native",
gp = gpar(col = "grey", lwd = 1))
if (dates.fsubs$dotw[dates.len] != 6) {
grid.lines(x = c(x.finis + 1, x.finis + 1),
y = c(dates.fsubs$dotw[dates.len] - 0.5, -0.5),
default.units = "native",
gp = gpar(col = "grey",
lwd = 1))
}
grid.lines(x = c(x.finis, x.finis),
y = c(dates.fsubs$dotw[dates.len] - 0.5, -0.5),
default.units = "native",
gp = gpar(col = "grey",
lwd = 1))
}
for (n in 1:51) {
grid.lines(x = c(n + 1.5, n + 1.5),
y = c(-0.5, 6.5),
default.units = "native",
gp = gpar(col = "grey", lwd = 1))
}
x.start <- adj.start - 0.5
if (y.start > 0) {
grid.lines(x = c(x.start, x.start + 1),
y = c(y.start - 0.5, y.start - 0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start + 1, x.start + 1),
y = c(y.start - 0.5, -0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start, x.start),
y = c(y.start - 0.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
if (y.end < 6) {
grid.lines(x = c(x.start + 1, x.finis + 1),
y = c(-0.5, -0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start, x.finis),
y = c(6.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
else {
grid.lines(x = c(x.start + 1, x.finis),
y = c(-0.5, -0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start, x.finis),
y = c(6.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
}
else {
grid.lines(x = c(x.start, x.start),
y = c(-0.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
if (y.start == 0) {
if (y.end < 6) {
grid.lines(x = c(x.start, x.finis + 1),
y = c(-0.5, -0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start, x.finis),
y = c(6.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
else {
grid.lines(x = c(x.start + 1, x.finis),
y = c(-0.5, -0.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.start, x.finis),
y = c(6.5, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
}
for (j in 1:12) {
last.month <- max(dates.fsubs$seq[dates.fsubs$month == j])
x.last.m <- dates.fsubs$woty[last.month] + 0.5
y.last.m <- dates.fsubs$dotw[last.month] + 0.5
grid.lines(x = c(x.last.m, x.last.m),
y = c(-0.5, y.last.m),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
if ((y.last.m) < 6) {
grid.lines(x = c(x.last.m, x.last.m - 1),
y = c(y.last.m, y.last.m), default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
grid.lines(x = c(x.last.m - 1, x.last.m - 1),
y = c(y.last.m, 6.5),
default.units = "native",
gp = gpar(col = "black", lwd = 1.75))
}
else {
grid.lines(x = c(x.last.m, x.last.m), y = c(-0.5,
6.5), default.units = "native", gp = gpar(col = "black",
lwd = 1.75))
}
}
}
}
trellis.unfocus()
}
lattice.options(default.theme = def.theme)
if (!is.null(plot_filename)) {
dev.off()
}
rm(temp.df)
}
# --------------------------------------------------------------------------
# data load
# --------------------------------------------------------------------------
sample_data <- read.csv("C:/Users/Andrew/Desktop/Logs_2014_07_23_15_29.csv", header = T)
head(sample_data)
sample_data1 <- sample_data[, c(2, 4:12)]
sample_data1$rcd <- substring(sample_data$reportCreatedAt, 1, 10)
sample_data1$rud <- substring(sample_data$reportUpdatedAt, 1, 10)
sample_data1$rlcd <- substring(sample_data$reportLogCretatedAt, 1, 10)
sample_data1$rlud <- substring(sample_data$reportLogUpdatedAt, 1, 10)
head(sample_data1)
# --------------------------------------------------------------------------
# 선릉KT
# --------------------------------------------------------------------------
seonreung <- subset(sample_data1, placeName == "선릉KT")
seonreung <- seonreung[order(seonreung$facilitiesName, seonreung$facilitiesSubName, seonreung$categoryName, seonreung$measurementName),]
head(seonreung, 10)
names(seonreung)
dim(seonreung) # 3899
###########################################################################
# 각 항목별 (measurementMame별) 일별 시간축에 따른 추이 (일 평균)
# 항목 List: 총 305가지
###########################################################################
seonreung$device <- paste0(seonreung$facilitiesName, "_", seonreung$facilitiesSubName, "_", seonreung$categoryName, "_", seonreung$measurementName)
seonreung_m_name <- unique(seonreung$device)
seonreung_m_name_table <- as.data.frame(table(seonreung$device))
colnames(seonreung_m_name_table) <- c("device_name", "freq")
for(i in 1:length(seonreung_m_name)){
seonreung_test_set <- subset(seonreung, device == seonreung_m_name[i])
seonreung_m_name_table$total[i] <- nrow(seonreung_test_set)
seonreung_m_name_table$null[i] <- sum(seonreung_test_set$value == "") + sum(seonreung_test_set$value == " ") + sum(seonreung_test_set$value == " ")
seonreung_m_name_table$null_p[i] <- round(seonreung_m_name_table$null[i]/seonreung_m_name_table$total[i]*100, 2)
}
head(seonreung_m_name_table)
tail(seonreung_m_name_table)
###########################################################################
# 결측치 70%이상, 데이터 수 10개 이하 제외
# 분석 가능한 device: 총 58개 (전체 305개 중)
###########################################################################
seonreung_use_device_temp1 <- subset(seonreung_m_name_table, null_p <= 30)
seonreung_use_device_temp2 <- subset(seonreung_use_device_temp1, total > 10)
seonreung_use_device_name <- seonreung_use_device_temp2$device_name # 58
seonreung_use_device_data <- seonreung_use_device_temp2
###########################################################################
# 일별 평균 및 Trend Time Series Chart
# 일별 평균: seonreung_device_day_mean_list
###########################################################################
seonreung_device_day_mean_list <- c()
for(i in 1:length(seonreung_use_device_name)){
seonreung_device_temp <- as.data.frame(subset(seonreung, device == seonreung_use_device_name[i]))
seonreung_device_temp$value <- as.numeric(as.character(seonreung_device_temp$value))
if(sum(is.na(seonreung_device_temp$value)) != length(seonreung_device_temp$value)){
seonreung_device_day_mean <- aggregate(value ~ rcd, data = seonreung_device_temp, FUN = mean)
colnames(seonreung_device_day_mean) <- c("day", "mean")
seonreung_device_day_mean_list[[i]] <- list(device_name = as.character(seonreung_use_device_name[i]), day_mean = seonreung_device_day_mean)
seonreung_plot_title <- paste("Time Plot (by Day):", seonreung_use_device_name[i])
seonreung_time_chart <- ggplot(data = seonreung_device_day_mean, aes(x = as.Date(day), y = mean))
seonreung_time_chart <- seonreung_time_chart + geom_line()
seonreung_time_chart <- seonreung_time_chart + xlab("Date") + ylab("Mean")
seonreung_time_chart <- seonreung_time_chart + labs(title = seonreung_plot_title)
seonreung_time_chart <- seonreung_time_chart + theme(axis.text.x = element_text(angle=70, vjust=0.5), plot.title = element_text(lineheight=.8, face="bold"))
print(seonreung_time_chart)
}
}
###########################################################################
# 각 항목별 Calendar heatmap
###########################################################################
for(i in 1:length(seonreung_use_device_name)){
seonreung_device_temp <- as.data.frame(subset(seonreung, device == seonreung_use_device_name[i]))
seonreung_device_temp$value <- as.numeric(as.character(seonreung_device_temp$value))
if(sum(is.na(seonreung_device_temp$value)) != length(seonreung_device_temp$value)){
seonreung_plot_title <- paste("Calendar heatmap:", seonreung_use_device_name[i])
eda_calendarheatmap(seonreung_device_day_mean_list[[i]]$day_mean, "day", "mean", sum, date.form="%Y-%m-%d", main.title=seonreung_plot_title, color.reverse=T)
}
}
# --------------------------------------------------------------------------
# 우면센터
# --------------------------------------------------------------------------
umyeon <- subset(sample_data1, placeName == "우면센터")
umyeon <- umyeon[order(umyeon$facilitiesName, umyeon$facilitiesSubName, umyeon$categoryName, umyeon$measurementName),]
###########################################################################
# 각 항목별 (measurementMame별) 일별 시간축에 따른 추이 (일 평균)
# 항목 List: 총 101가지
###########################################################################
umyeon$device <- paste0(umyeon$facilitiesName, "_", umyeon$facilitiesSubName, "_", umyeon$categoryName, "_", umyeon$measurementName)
umyeon_m_name <- unique(umyeon$device)
umyeon_m_name_table <- as.data.frame(table(umyeon$device))
colnames(umyeon_m_name_table) <- c("device_name", "freq")
for(i in 1:length(umyeon_m_name)){
umyeon_test_set <- subset(umyeon, device == umyeon_m_name[i])
umyeon_m_name_table$total[i] <- nrow(umyeon_test_set)
umyeon_m_name_table$null[i] <- sum(umyeon_test_set$value == "") + sum(umyeon_test_set$value == " ") + sum(umyeon_test_set$value == " ")
umyeon_m_name_table$null_p[i] <- round(umyeon_m_name_table$null[i]/umyeon_m_name_table$total[i]*100, 2)
}
###########################################################################
# 결측치 70%이상, 데이터 수 10개 이하 제외
# 분석 가능한 device: 총 22개 (전체 101개 중)
###########################################################################
umyeon_use_device_temp1 <- subset(umyeon_m_name_table, null_p <= 30)
umyeon_use_device_temp2 <- subset(umyeon_use_device_temp1, total > 10)
umyeon_use_device_name <- umyeon_use_device_temp2$device_name # 22
umyeon_use_device_data <- umyeon_use_device_temp2
###########################################################################
# 일별 평균 및 Trend Time Series Chart
# 일별 평균: umyeon_device_day_mean_list
###########################################################################
umyeon_device_day_mean_list <- c()
for(i in 1:length(umyeon_use_device_name)){
umyeon_device_temp <- as.data.frame(subset(umyeon, device == umyeon_use_device_name[i]))
umyeon_device_temp$value <- as.numeric(as.character(umyeon_device_temp$value))
if(sum(is.na(umyeon_device_temp$value)) != length(umyeon_device_temp$value)){
umyeon_device_day_mean <- aggregate(value ~ rcd, data = umyeon_device_temp, FUN = mean)
colnames(umyeon_device_day_mean) <- c("day", "mean")
umyeon_device_day_mean_list[[i]] <- list(device_name = as.character(umyeon_use_device_name[i]), day_mean = umyeon_device_day_mean)
umyeon_plot_title <- paste("Time Plot (by Day):", umyeon_use_device_name[i])
umyeon_time_chart <- ggplot(data = umyeon_device_day_mean, aes(x = as.Date(day), y = mean))
umyeon_time_chart <- umyeon_time_chart + geom_line()
umyeon_time_chart <- umyeon_time_chart + xlab("Date") + ylab("Mean")
umyeon_time_chart <- umyeon_time_chart + labs(title = umyeon_plot_title)
umyeon_time_chart <- umyeon_time_chart + theme(axis.text.x = element_text(angle=70, vjust=0.5), plot.title = element_text(lineheight=.8, face="bold"))
print(umyeon_time_chart)
}
}
###########################################################################
# 각 항목별 Calendar heatmap
###########################################################################
for(i in 1:length(umyeon_use_device_name)){
umyeon_device_temp <- as.data.frame(subset(umyeon, device == umyeon_use_device_name[i]))
umyeon_device_temp$value <- as.numeric(as.character(umyeon_device_temp$value))
if(sum(is.na(umyeon_device_temp$value)) != length(umyeon_device_temp$value)){
umyeon_plot_title <- paste("Calendar heatmap:", umyeon_use_device_name[i])
eda_calendarheatmap(umyeon_device_day_mean_list[[i]]$day_mean, "day", "mean", sum, date.form="%Y-%m-%d", main.title=umyeon_plot_title, color.reverse=T)
}
}
|
51f523e92ffcf3f9817a486be577dc92f1361f87
|
708f744bc98651fd3f78f2d59307509118c16879
|
/RKEEL/man/GFS-LogitBoost-C.Rd
|
7f276bdedd7a1ade72dc3718f6ff55b0592d42d0
|
[] |
no_license
|
i02momuj/RKEEL
|
726efa0409193a1ebc6ff82ef195e2708f3fa397
|
445cd8cceade2316bc12d40406c7c1248e2daeaa
|
refs/heads/master
| 2021-01-10T10:13:36.242589
| 2019-07-19T07:43:32
| 2019-07-19T07:43:32
| 49,633,299
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
rd
|
GFS-LogitBoost-C.Rd
|
\docType{class}
\name{GFS_LogitBoost_C}
\alias{GFS_LogitBoost_C}
\alias{R6_GFS_LogitBoost_C}
\title{GFS_LogitBoost_C KEEL Classification Algorithm}
\description{
GFS_LogitBoost_C Classification Algorithm from KEEL.
}
\usage{
GFS_LogitBoost_C(train, test, numLabels, numRules, seed)
}
\arguments{
\item{train}{Train dataset as a data.frame object}
\item{test}{Test dataset as a data.frame object}
\item{numLabels}{numLabels. Default value = 3}
\item{numRules}{numRules. Default value = 25}
\item{seed}{Seed for random numbers. If it is not assigned a value, the seed will be a random number}
}
\value{
A data.frame with the actual and predicted classes for both \code{train} and \code{test} datasets.
}
\examples{
#data_train <- RKEEL::loadKeelDataset("iris_train")
#data_test <- RKEEL::loadKeelDataset("iris_test")
#Create algorithm
#algorithm <- RKEEL::GFS_LogitBoost_C(data_train, data_test)
#Run algorithm
#algorithm$run()
#See results
#algorithm$testPredictions
}
\keyword{classification}
|
fe10002cea4f5b6df86eaaadeea2a8e3d3738ba1
|
cf4263e82b2c118bc3ecea5dc62d561e7487cbd3
|
/man/ants_L1.Rd
|
8e058ac55f3b5ef04d63f33bd5a27097ad96de93
|
[
"MIT"
] |
permissive
|
EDIorg/ecocomDP
|
151a2d519ff740d466fafab74df5171a6ef196bf
|
0554d64ce81f35ed59985d9d991203d88fe1621f
|
refs/heads/main
| 2023-08-14T02:07:19.274860
| 2023-06-19T22:27:30
| 2023-06-19T22:27:30
| 94,339,321
| 26
| 10
|
NOASSERTION
| 2023-07-26T22:21:00
| 2017-06-14T14:22:43
|
R
|
UTF-8
|
R
| false
| true
| 905
|
rd
|
ants_L1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ants_L1}
\alias{ants_L1}
\title{The ecocomDP (L1) version of EDI data package knb-lter-hfr.118.33}
\format{
A list of:
\describe{
\item{id}{The dataset identifier}
\item{metadata}{See source url for metadata}
\item{tables}{A list of data frames, each an ecocomDP table}
\item{validation_issues}{Is NULL because there are no validation issues for this dataset}
}
}
\source{
\url{https://portal.edirepository.org/nis/mapbrowse?scope=edi&identifier=193&revision=5}
}
\usage{
ants_L1
}
\description{
The the ecocomDP (L1) formatted version of EDI data package knb-lter-hfr.118.33 (Ant Assemblages in Hemlock Removal Experiment at Harvard Forest since 2003) read from the EDI API with \code{read_data(id = "edi.193.5")}. Use this dataset as an input to data "use" functions.
}
\keyword{datasets}
|
71b9af2626ce0fa399a39d697f92dab3b27fb033
|
d8c6c1a9ecb2e4be9f6cfa2f335c7d16e6a09c73
|
/man/inputSLanalyzeR.Rd
|
3e37ce6bb5e855dad7b7f6aaece739f64f2a0373
|
[] |
no_license
|
kkolmus/MolecularAnalyzeR
|
fedf71c03b60dcd79fccc951767048f79ca2f91e
|
02bc2710aa9af7a75dab1e4b7d600cd52496d6df
|
refs/heads/main
| 2023-06-11T09:13:23.882645
| 2021-07-04T12:54:39
| 2021-07-04T12:54:39
| 382,806,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 535
|
rd
|
inputSLanalyzeR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inputSLanalyzeR.R
\docType{data}
\name{inputSLanalyzeR}
\alias{inputSLanalyzeR}
\title{Expression data}
\format{
An object of class S3 data frame
}
\usage{
data(inputSLanalyzeR)
}
\description{
Data from The Cancer Genome Atlas project for READ cohort of patients.
161 patients, copy number data for TP53, expression data for TSPAN6
}
\examples{
data(inputSLanalyzeR)
}
\references{
data frame with patient IDs, CNV and expression data
}
\keyword{datasets}
|
5f6b6525bd1c0cc87eeb478b36ea7a4f8d63a8a9
|
d34be3726b14e7fa85555f12d15d540f2cca4414
|
/CALISTA-R/calista/R/ind2sub.R
|
e086a7fce97f8be988402e36337422cab9bad3cc
|
[
"BSD-3-Clause"
] |
permissive
|
CABSEL/CALISTA
|
a595b42458449f16e06859539ce3ba794d26829e
|
af734bc422bbc00f7019e8f50a8460ec6540a4d9
|
refs/heads/master
| 2021-05-09T13:38:14.262647
| 2019-05-16T14:20:17
| 2019-05-16T14:20:17
| 119,039,497
| 10
| 3
|
BSD-3-Clause
| 2019-05-07T16:58:28
| 2018-01-26T10:35:58
|
HTML
|
UTF-8
|
R
| false
| false
| 362
|
r
|
ind2sub.R
|
#' A calista Function
#'
#' @param sz,ind
#' @keywords calista
#' @export
#' @examples
#' ind2sub()
ind2sub = function(sz,ind)
{
ind = as.matrix(ind,ncol=1);
sz = c(1,sz);
den = 1;
sub = c();
for(i in 2:length(sz)){
den = den * sz[i-1];
num = den * sz[i];
s = floor(((ind-1) %% num)/den) + 1;
sub = cbind(sub,s);
}
return(sub);
}
|
7ecaf51722d72c68a3f69c386df1abece7fd9c09
|
ff604c79c7c4761efbe6e22ff72615c3faf372cc
|
/Data science Projects with R/random forest.R
|
b3e4561d5d1c575418a113f83c2dd72ef0f94ab8
|
[] |
no_license
|
Khachit-Basetti/Projects-with-R
|
d53ad9e5aac3bcb75afbfcc5c89b3bbffe0cef01
|
8f2fc6c029923948a1570244043263d82d73c40b
|
refs/heads/main
| 2023-05-25T07:51:17.451409
| 2021-06-06T16:24:27
| 2021-06-06T16:24:27
| 372,054,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,607
|
r
|
random forest.R
|
data <- read.csv("F:/DSP13/CTGAll.csv") #
View(data)
colSums(is.na(data))
colSums(data == "")
dim(data)
str(data)
data$NSP <- as.factor(data$NSP)
table(data$NSP) #Normal,Suspect, Pathologic
dim(data)
set.seed(123)
sample <- sample(nrow(data), nrow(data)*0.70)
train <- data[sample,]
test <- data[-sample,]
#install.packages("randomForest")
library(randomForest)
attach(data)
set.seed(222)
m1 <- randomForest(NSP~., data = train) #No. of variables tried at each split -> mtry
m1
attributes(m1) #ntree and mtry we will use to tune
pred <- predict(m1, train, type = "class")
tab <- table(pred, train$NSP)
tab
acc <- sum(diag(tab))/sum(tab)
acc
#for test
pred<- predict(m1, newdata = test, type = "class")
tab <- table(pred, test$NSP)
acc <- sum(diag(tab))/sum(tab)
acc
plot(m1)
set.seed(111)
tuneRF(train[,-22], train[,22],
stepFactor = 0.5, plot = T, ntreeTry = 300, trace = T, improve = 0.05)
set.seed(111)
m2 <- randomForest(NSP~., data = train, ntree = 300, mtry = 8,type = "class")
m2
#for test, train will not change
pred1<- predict(m2, newdata = test)
tab <- table(pred1, test$NSP)
tab
acc <- sum(diag(tab))/sum(tab)
acc
#more understanding
hist(treesize(m2), col = "red")
varImpPlot(m2) #graph where we decide the best variable
importance(m2)
varUsed(m2)
#partial dependence plot
partialPlot(m2, train, ASTV, 1) #Normal=1; Suspect=2; Pathologic=3
partialPlot(m2, train, ASTV, 2)
partialPlot(m2, train, ASTV, 3)
getTree(m2, 1, labelVar = T) # 1 represents the first tree out of 300 according tho this model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.