blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56fd93750cf0b3c1671ccf18ab6400d3ec9df323
|
771d9236b852638924858f6f38bbadb496e10426
|
/R/ExtractFixations.R
|
4045f760398b4474b337e4c202060b2ffd19fade
|
[] |
no_license
|
sascha2schroeder/popEye
|
e8c8de1de72d0c5f937c2710b8b03e9834ea7435
|
ef6cfca89e71f324cbb7970061cdf23ece7b63b4
|
refs/heads/master
| 2023-05-24T14:11:35.312441
| 2022-09-08T10:17:15
| 2022-09-08T10:17:15
| 173,056,807
| 17
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
ExtractFixations.R
|
ExtractFixations <- function(dat) {
for (trial in 1:length(dat$item)) {
dat <- RetrieveFixations(dat, trial)
dat <- ComputeDur(dat, trial)
}
return(dat)
}
|
fb46299dac79c6ea5e8ce1b46b4ef24493c2673a
|
9f645cdeecffe321b9e66ee6f7d3e5fafcb9d1a0
|
/scripts/centroid_lavielle_adults_2017.R
|
0854c9e72441ddd1cccdf680c22bd860f71e5d27
|
[] |
no_license
|
dwwolfson/ch2_crane_movement
|
b64ecabf13b28f4bd869295048e641df2a76bc7b
|
373492c6dced0fd9821e53b06700ad8b11fcc3d6
|
refs/heads/master
| 2021-09-28T22:56:51.829061
| 2018-11-21T06:16:16
| 2018-11-21T06:16:16
| 111,363,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,666
|
r
|
centroid_lavielle_adults_2017.R
|
library(dplyr)
library(sp)
library(lubridate)
library(tlocoh)
library(adehabitatLT)
rm(list=ls())
source("Scripts/distance_to_centroid_function.R")
setwd("C:/Users/David/Documents/R/CraneMovement/scripts/t-locoh")
#Import csv of all locations from Movebank for April 1-August 31, 2017
df<-read.csv("Data/all_cranes_2017_april1_thru_aug31.csv")
#Drop columns
df<-df[,c(4,5, 17, 23)]
head(df)
#Renames variables to make simpler
colnames(df)[3]<-"id"
colnames(df)[4]<-"loctime"
df$loctime<-as.character(df$loctime)
df$loctime<-as.POSIXct(df$loctime, format="%Y-%m-%d %H:%M:%S",
tz="America/Chicago")
apply(is.na(df),2,sum)
df<-na.omit(df)
summary(df$location.lat)
df<-df[df$location.long<(-93),]
#Assign population, gender, age
df$age<-NULL
adult.list<-c("0J (Waubun adult #2)", "0K Deglman #1","1J (Santiago #3)" ,
"2C (Melby adult)" , "2M (Bagley)" , "3C (Larson adult #2)" ,
"3E (Wambach adult)" , "3K (Rasmussen adult)", "3M (Callaway adult)" ,
"4E Larson adult #1" , "4J (Santiago #1)", "4K (Waubun #1)" , "4M (Ogema adult)" ,
"5J (Deglman #3)" ,"5K (Helliksen July adult 2015)", "5M (Beaulieu adult)" , "6E (Linden-Kologi)", "8K (Deglman #2)" ,
"8M (Stockyard adult)", "9A (Santer)" , "9C (Helliksen adult April 2015)",
"9K (Santiago #2)" ,"9M (Inman adult)" )
df<-mutate(df, age=ifelse(id%in%adult.list, "adult", "colt"))
#Assign population
MCP.list<-c("2A (Helliksen July 2015 colt)" ,"2E Beaulieu colt" ,"3E (Wambach adult)",
"4K (Waubun #1)" ,"4M (Ogema adult)" ,"5K (Helliksen July adult 2015)",
"5M (Beaulieu adult)","6E (Linden-Kologi)" ,
"7C (Helliksen colt 2014)" ,"9C (Helliksen adult April 2015)","9J (Waubun colt)")
df$pop<-NULL
df<-mutate(df, pop=ifelse(id%in%MCP.list, "MCP", "EP"))
#Assign gender
male.list<-c("3A (Barb Wire)","2E Beaulieu colt" ,"3M (Callaway adult)",
"7M (E of Mud Lake colt)", "7A (Hackensack)", "7C (Helliksen colt 2014)",
"2A (Helliksen July 2015 colt)" ,"9C (Helliksen adult April 2015)",
"5K (Helliksen July adult 2015)", "9M (Inman adult)","1A (Inman colt #2)" ,
"3C (Larson adult #2)" , "7K (Larson colt #1)" , "6E (Linden-Kologi)",
"2C (Melby adult)", "1K (Melby colt #2)","1M (Min/Maint Rd)",
"7E (Nora Township colt)" ,"6A (Pelican Rapids colt)","3K (Rasmussen adult)",
"6C (Rice Lake colt 2014)" , "3E (Wambach adult)" ,"9J (Waubun colt)", "0J (Waubun adult #2)")
df$gender<-NULL
df<-mutate(df, gender=ifelse(id%in%male.list, "male", "female"))
#Drop colts
df<-df[df$age=="adult",]
df<-droplevels(df)
table(df$id)
#Spatial Stuff
df.sp<-SpatialPoints(df[c("location.long","location.lat")],
proj4string =CRS("+proj=longlat +ellps=WGS84"))
df.sp<-spTransform(df.sp, CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5
+lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs "))
df.utm<-coordinates(df.sp)
colnames(df.utm)<-c("x","y")
df$loctime<-as.character(df$loctime)
df$loctime<-as.POSIXct(x=df$loctime,format="%Y-%m-%d %H:%M:%S", tz="America/Chicago")
df.loctimes<-df$loctime
df.lxy<-xyt.lxy(xy=df.utm, dt=df.loctimes,id=df$id,
proj4string = CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5
+lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs " ))
table(df$id)
# lxy.plot.pt2ctr(df.lxy)
##########################################
deg3<-lxy.subset(df.lxy, id="5J (Deglman #3)")
plot(deg3)
deg3.d2<-d2ctr(deg3)
#I think 5J Deglman 3 should be excluded because there is only a couple weeks of data.
##############################################
lind<-lxy.subset(df.lxy, id="6E (Linden-Kologi)")
plot(lind)
lind.d2<-d2ctr(lind)
# Linden-Kologi also is missing most of the locations from the time period, although
# there are small chunks from time to time
####################################################################
wam<-lxy.subset(df.lxy, id="3E (Wambach adult)")
plot(wam)
wam.d2<-d2ctr(wam)
#This should be cut out as well. There is only info from the first few weeks of April.
######################################################
ras<-lxy.subset(df.lxy, id="3K (Rasmussen adult)")
plot(ras)
ras.d2<-d2ctr(ras)
#This one should be cut off as well? It basically doesn't have any
# coverage from most of the summer
#####################################################################
stock<-lxy.subset(df.lxy, id="8M (Stockyard adult)")
plot(stock)
stock.d2<-d2ctr(stock)
summary(stock)
lxy.plot.pt2ctr(stock)
lxy.plot.freq(stock, deltat.by.date = T)
lxy.plot.freq(stock, cp= T)
lxy.plot.freq(stock, samp.by.date = T)
# It's probably worth including 8M for now, but it's missing a
# chunk of data from late May till late June.
##################################################################
# So I'll be excluding Wambach, Rasmussen, and Linden-Kologi. (as well as Deglman #3)
# For a number of cranes there is a really high dist2centroid for the beginning of April,
# but I'll look at the trajectory and if it's basically a straight line towards the
# territory, I'll consider it migration and not roaming, and try to make a note of it.
###########################################################################
#dataframe to store comprehensive results
res<-data.frame()
dat<-data.frame(id=NA, set_roam=NA, n_days=NA, n_obs=NA, max=NA, sd=NA, mean=NA, median=NA, IQR=NA)
###############################################################################
#Deglman adult #1
deg1<-lxy.subset(df.lxy, id="0K Deglman #1")
deg1.d2<-d2ctr(deg1)
plot(deg1)
deg1.d2$day<-yday(deg1.d2$date)
# deg1.lav<-lavielle(deg1.d2$dist2centroid, Lmin=5, Kmax=10)
deg1.d2$date[c(1,5188)]
#There on April 1. Stayed on territory until Aug 30
# settled
set<-deg1.d2[c(1:5188),]
s1<-max(set$dist2centroid) #3278
s2<-sd(set$dist2centroid) #408
s3<-mean(set$dist2centroid) #528
s4<-median(set$dist2centroid) #500
s5<-IQR(set$dist2centroid) #526
dat[1,]<-c("0K Deglman #1", "settled", length(unique(set$day)), nrow(set), s1, s2, s3, s4, s5)
d.df<-deg1.d2
d.df$mode<-"settled"
d.df$id<-"0K Deglman #1"
res<-rbind(res, d.df)
#############################################################################
#Santiago #3
san3<-lxy.subset(df.lxy, id="1J (Santiago #3)")
san3.d2<-d2ctr(san3)
plot(san3)
san3.d2$day<-yday(san3.d2$date)
san3.lav<-lavielle(san3.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(san3.lav)
findpath(san3.lav, 5)
san3.d2$date[c(1,1000,6723)]
# Like Deglman #2, this crane seems to be very active and not as settled as other adults,
# but it didn't have big long-distance movements, and limited itself to one overall area.
# I'm going to count the first three weeks as roaming, and if you look at a map, it's kinda
# all over the place. I'm going to hack off the end, because I think it started migrating
# to a staging area.
# settled
set<-san3.d2[c(1000:5970),]
s1<-max(set$dist2centroid) #16,251
s2<-sd(set$dist2centroid) #2,879
s3<-mean(set$dist2centroid)#2,883
s4<-median(set$dist2centroid) # 1315
s5<-IQR(set$dist2centroid) #3247
dat[2,]<-c("1J (Santiago #3)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3, s4,s5)
#roam
roam<-san3.d2[c(1:999),]
r1<-max(roam$dist2centroid) #73,304
r2<-sd(roam$dist2centroid) #15,635
r3<-mean(roam$dist2centroid) #9,475
r4<-median(roam$dist2centroid) # 3152
r5<-IQR(roam$dist2centroid) #7706
dat[3,]<-c("1J (Santiago #3)", "roam", length(unique(roam$day)),nrow(roam), r1, r2, r3, r4, r5)
s.df<-san3.d2[1:5970,]
s.df$mode<-NA
s.df[1:999,"mode"]<-"roam"
s.df[1000:5970,"mode"]<-"settled"
s.df$id<-"1J (Santiago #3)"
res<-rbind(res, s.df)
###################################################################################
#2M (Bagley)
bag<-lxy.subset(df.lxy, id="2M (Bagley)")
bag.d2<-d2ctr(bag)
bag.d2$day<-yday(bag.d2$date)
plot(bag)
lxy.plot.freq(bag, deltat.by.date = T)
lxy.plot.freq(bag, cp= T)
lxy.plot.freq(stock, samp.by.date = T) # a month missing mid-May to mid-July
bag.lav<-lavielle(bag.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(bag.lav)
findpath(bag.lav, 5)
bag.d2$date[c(1,3679,3678)]
#There on April 1. Stayed on territory until August 31, when it seemed to start migration.
# Instead of counting the last part as roaming, I"m simply going to chop it off, since it's
# less than a day.
# settled
set<-bag.d2[c(1:3660),]
s1<-max(set$dist2centroid) #2,485
s2<-sd(set$dist2centroid) #515
s3<-mean(set$dist2centroid) #693
s4<-median(set$dist2centroid) # 407
s5<-IQR(set$dist2centroid) #526
dat[4,]<-c("2M (Bagley)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
b.df<-bag.d2[c(1:3660),]
b.df$mode<-"settled"
b.df$id<-"2M (Bagley)"
b.df$day<-yday(b.df$date)
res<-rbind(res, b.df)
###############################################################################
# 4J (Santiago #1)
san1<-lxy.subset(df.lxy, id="4J (Santiago #1)")
san1.d2<-d2ctr(san1)
plot(san1)
san1.d2$day<-yday(san1.d2$date)
san1.lav<-lavielle(san1.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(san1.lav)
findpath(san1.lav, 3)
san1.d2$date[c(1,1000,6723)]
#There on April 1. Roaming during April, then stayed setteld till middle of July,
# Roaming from mid July-end of August. The roaming period wasn't the "wandering"
# of typical colts, it just relocated a few counties over to spend most of May
# at Wrightstown WMA (large WMA SE of 4 corners) where there was likely a huge
# flock of non-breeding cranes. (Probably a "young adult" without a territory)
# settled
set<-san1.d2[c(1:1740, 2550:7534),]
s1<-max(set$dist2centroid) #12,510
s2<-sd(set$dist2centroid) #1,981
s3<-mean(set$dist2centroid)#6,474
s4<-median(set$dist2centroid) # 6494
s5<-IQR(set$dist2centroid) #3477
dat[5,]<-c("4J (Santiago #1)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
#roam
roam<-san1.d2[c(1741:2549),]
r1<-max(roam$dist2centroid) #48,773
r2<-sd(roam$dist2centroid) #7,704
r3<-mean(roam$dist2centroid) #46,459
r4<-median(roam$dist2centroid) # 47,972
r5<-IQR(roam$dist2centroid) #1045 #this shows that the period spent away was an outlier
dat[6,]<-c("4J (Santiago #1)", "roam", length(unique(roam$day)),nrow(roam), r1, r2, r3, r4, r5)
s.df<-san1.d2
s.df$mode<-NA
s.df[c(1741:2549),"mode"]<-"roam"
s.df[c(1:1740, 2550:7534),"mode"]<-"settled"
s.df$id<-"4J (Santiago #1)"
res<-rbind(res, s.df)
#######################################################################################
# 4K (Waubun #1)
waub1<-lxy.subset(df.lxy, id="4K (Waubun #1)")
waub1.d2<-d2ctr(waub1)
plot(waub1)
waub1.d2$day<-yday(waub1.d2$date)
waub1.lav<-lavielle(waub1.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(waub1.lav)
findpath(waub1.lav, 2)
waub1.d2$date[c(1,35,5588)]
#Arrived on April 1st (not before). Settled all summer. There was a huge movement in the
# first day as the crane was still migrating there. I'm going to chop that off.
# settled
set<-waub1.d2[c(50:5588),]
s1<-max(set$dist2centroid) #20,266
s2<-sd(set$dist2centroid) #843
s3<-mean(set$dist2centroid) #2,113
s4<-median(set$dist2centroid) # 1984
s5<-IQR(set$dist2centroid) #444
dat[7,]<-c("4K (Waubun #1)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
w.df<-waub1.d2[c(50:5588),]
w.df$mode<-"settled"
w.df$id<-"4K (Waubun #1)"
res<-rbind(res, w.df)
###################################################################################
#4M (Ogema adult)
ogema<-lxy.subset(df.lxy, id="4M (Ogema adult)")
ogema.d2<-d2ctr(ogema)
ogema.d2$day<-yday(ogema.d2$date)
plot(ogema)
lxy.plot.freq(ogema, samp.by.date = T)
# ogema.lav<-lavielle(ogema.d2$dist2centroid, Lmin=5, Kmax=10)
# chooseseg(ogema.lav)
# findpath(ogema.lav, 5)
ogema.d2$date[c(1,7838)]
#This crane pretty obviously was settled all season.
# settled
set<-ogema.d2[c(1:7838),]
s1<-max(set$dist2centroid) #4,275
s2<-sd(set$dist2centroid) #605
s3<-mean(set$dist2centroid) #1,021
s4<-median(set$dist2centroid) # 816
s5<-IQR(set$dist2centroid) #854
dat[8,]<-c("4M (Ogema adult)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
o.df<-ogema.d2
o.df$mode<-"settled"
o.df$id<-"4M (Ogema adult)"
res<-rbind(res, o.df)
###################################################################################
#5K (Helliksen July adult 2015)
hel<-lxy.subset(df.lxy, id="5K (Helliksen July adult 2015)")
hel.d2<-d2ctr(hel)
hel.d2$day<-yday(hel.d2$date)
plot(hel)
lxy.plot.freq(hel, samp.by.date = T)
# hel.lav<-lavielle(hel.d2$dist2centroid, Lmin=5, Kmax=10)
# chooseseg(hel.lav)
# findpath(hel.lav, 5)
hel.d2$date[c(1,7838)]
#Poster child of being settled all year.
# settled
set<-hel.d2[c(1:6043),]
s1<-max(set$dist2centroid) #2,241
s2<-sd(set$dist2centroid) #303
s3<-mean(set$dist2centroid) #421
s4<-median(set$dist2centroid) # 369
s5<-IQR(set$dist2centroid) #354
dat[9,]<-c("5K (Helliksen July adult 2015)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
h.df<-hel.d2
h.df$mode<-"settled"
h.df$id<-"5K (Helliksen July adult 2015)"
res<-rbind(res, h.df)
###################################################################################
#8K (Deglman #2)
deg2<-lxy.subset(df.lxy, id="8K (Deglman #2)")
deg2.d2<-d2ctr(deg2)
deg2.d2$day<-yday(deg2.d2$date)
plot(deg2)
lxy.plot.freq(deg2, samp.by.date = T)
deg2.lav<-lavielle(deg2.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(deg2.lav)
findpath(deg2.lav, 5)
deg2.d2$date[c(1,1000,4300,5736)]
# This crane was more on the "active" end for an adult, and didn't seem to be super settled to a
# territory, but it didn't have big long-term roaming movmements, so settled all season.
# settled
set<-deg2.d2
s1<-max(set$dist2centroid) #16,763
s2<-sd(set$dist2centroid) #3004
s3<-mean(set$dist2centroid) #6322
s4<-median(set$dist2centroid) # 5297
s5<-IQR(set$dist2centroid) #3414
dat[10,]<-c("8K (Deglman #2)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
d.df<-deg2.d2
d.df$mode<-"settled"
d.df$id<-"8K (Deglman #2)"
res<-rbind(res, d.df)
###################################################################################
# 8M (Stockyard adult)
stock<-lxy.subset(df.lxy, id="8M (Stockyard adult)")
stock.d2<-d2ctr(stock)
stock.d2$day<-yday(stock.d2$date)
plot(stock)
lxy.plot.freq(stock, samp.by.date = T)
stock.lav<-lavielle(stock.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(stock.lav)
findpath(stock.lav, 3)
stock.d2$date[c(1,80,3102)]
# This crane was still migrating and didn't get to its territory until
# April 6th. It is also missing data for after July 23. I'm going to call
# it all settled and cut off the first week.
# settled
set<-stock.d2[c(80:3102),]
s1<-max(set$dist2centroid) #9,193
s2<-sd(set$dist2centroid) #835
s3<-mean(set$dist2centroid) #7668
s4<-median(set$dist2centroid) # 7893
s5<-IQR(set$dist2centroid) #1053
dat[11,]<-c("8M (Stockyard adult)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
s.df<-stock.d2[c(80:3102),]
s.df$mode<-"settled"
s.df$id<-"8M (Stockyard adult)"
res<-rbind(res, s.df)
#########################################################################
# 9K (Santiago #2)
san2<-lxy.subset(df.lxy, id="9K (Santiago #2)")
san2.d2<-d2ctr(san2)
san2.d2$day<-yday(san2.d2$date)
plot(san2)
lxy.plot.freq(san2, samp.by.date = T)
san2.lav<-lavielle(san2.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(san2.lav)
findpath(san2.lav, 3)
san2.d2$date[c(1,80,8161)]
# So the crane was still migrating the first three days. I'll cut those out
# as I did with the others. Settled the whole time.
# settled
set<-san2.d2[c(80:8106),]
s1<-max(set$dist2centroid, na.rm=T) #2,921
s2<-sd(set$dist2centroid, na.rm=T) #268
s3<-mean(set$dist2centroid, na.rm=T) #337
s4<-median(set$dist2centroid) # 252
s5<-IQR(set$dist2centroid) #249
dat[12,]<-c("9K (Santiago #2)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
s.df<-san2.d2[c(80:8106),]
s.df$mode<-"settled"
s.df$id<-"9K (Santiago #2)"
res<-rbind(res, s.df)
################################################################################
# 9M (Inman adult)
inman<-lxy.subset(df.lxy, id="9M (Inman adult)")
inman.d2<-d2ctr(inman)
inman.d2$day<-yday(inman.d2$date)
plot(inman)
lxy.plot.freq(inman, samp.by.date = T)
inman.lav<-lavielle(inman.d2$dist2centroid, Lmin=5, Kmax=10)
chooseseg(inman.lav)
findpath(inman.lav, 6)
# THings were very stable except one day with a huge movement. After
# looking at it more closely, that is just one location that is an outlier mistake.
# I'll remove it.
inman.d2<-inman.d2[-7656,]
# Now the rest are obviously all settled.
# settled
set<-inman.d2
s1<-max(set$dist2centroid) #2,898
s2<-sd(set$dist2centroid) #228
s3<-mean(set$dist2centroid) #317
s4<-median(set$dist2centroid) # 261
s5<-IQR(set$dist2centroid) #167
dat[13,]<-c("9M (Inman adult)", "settled", length(unique(set$day)), nrow(set), s1, s2, s3,s4,s5)
i.df<-inman.d2
i.df$mode<-"settled"
i.df$id<-"9M (Inman adult)"
res<-rbind(res, i.df)
##############################################################################################
write.csv(res, "output/comp.lavielle.adult.2017.df.csv")
write.csv(dat, "output/comp.adult.centroid.summaries.2017.df.csv")
|
971bdf69e7b23ffaf10254dea9f9ea9bafdb00e8
|
d4ded4d8fd0d430bf09e98b6f41c53ddff85d6d8
|
/StandardAnalysis/summaryfunctions.R
|
3ee8bea32906c3a669debe2636429ee1772464ee
|
[] |
no_license
|
nklange/PrimingSourcePaper
|
287a198d266236ad880324a606c1a859f017cced
|
528ec2d956b30212124aaed1e1c1953159d07a48
|
refs/heads/main
| 2023-01-06T20:14:34.908035
| 2020-11-03T16:04:08
| 2020-11-03T16:04:08
| 307,328,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,111
|
r
|
summaryfunctions.R
|
init <- function(need) {
ip <- .packages(all.available = T)
if (any((need %in% ip) == F)) {
install.packages(need[!(need %in% ip)])
}
ok <- sapply(1:length(need), function(p) require(need[[p]],
character.only = T))
}
serror <- function(x) sd(x,na.rm=T)/sqrt(length(x[!is.na(x)]))
serror_ind <- function(x,y) sqrt((sd(x)^2/length(x)) + (sd(y)^2/length(y)))
cohensdz<-function(ttest,n){
dz<-ttest$statistic/sqrt(n)
return(dz)
}
cohensd<-function(ttest,n1,n2){
d<-ttest$statistic*(n1+n2)/(sqrt(ttest$parameter)*sqrt(n1*n2))
return(d)
}
ttestAgainstZero <- function(data){
result<-t.test(data,mu=0)
BFt <- ttestBF(data,mu=0)
BFt <- extractBF(BFt)
data.frame(Mean = result$estimate,
SE = serror(data),
df = result$parameter,
tval = result$statistic,
pval = result$p.value,
cohensdz = cohensdz(result,result$parameter[[1]] + 1)[[1]],
BF = BFt$bf,
order = (nchar(trunc(BFt$bf)) - 1))
}
ttestTwoCond<- function(data1,data2, paired = TRUE){
result<-t.test(data1,data2,paired = paired)
BFt <- ttestBF(data1,data2,paired = paired)
BFt <- extractBF(BFt)
if (paired == TRUE) {
SEs <- serror(data1 - data2)
CD <- cohensdz(result,result$parameter[[1]] + 1)[[1]]
} else {
SEs <- serror_ind(data1,data2)
CD <- cohensd(result,nrow(data1),nrow(data2))
}
data.frame(Mean = result$estimate,
SE = SEs,
df = result$parameter,
tval = result$statistic,
pval = result$p.value,
cohensd = CD,
BF = BFt$bf,
order = (nchar(trunc(BFt$bf)) - 1))
}
# Summary Functions ------------------------------------------------------------------
# Summary for graphs with within-subjects confidence intervals from R cookbook
## Summarizes data.
## Gives count, SUM, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE2 <- function(data=NULL, measurevar, groupvars=NULL, na.rm=TRUE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
# mean = mean (xx[,col], na.rm=na.rm),
sum = sum (xx[,col], na.rm=na.rm), #change mean to sum in this case
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("sum"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
## Summarizes data.
## Gives count, MEAN, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=TRUE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
mean = mean (xx[,col], na.rm=na.rm),
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("mean"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
normDataWithin <- function(data=NULL, idvar, measurevar,
betweenvars=NULL, na.rm=FALSE, .drop=TRUE) {
library(plyr)
# Measure var on left, idvar + between vars on right of formula.
data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
.fun = function(xx, col, na.rm) {
c(subjMean = mean(xx[,col],
na.rm=na.rm))
},
measurevar,
na.rm
)
# Put the subject means with original data
data <- merge(data, data.subjMean)
# Get the normalized data in a new column
measureNormedVar <- paste(measurevar, "_norm", sep="")
data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
mean(data[,measurevar], na.rm=na.rm)
# Remove this subject mean column
data$subjMean <- NULL
return(data)
}
## Summarizes data, handling within-subjects variables by removing
## inter-subject variability.
## It will still work if there are no within-S variables.
## Gives count, un-normed mean, normed mean (with same
## between-group mean),
## standard deviation, standard error of the mean, and confidence
## interval.
## If there are within-subject variables, calculate adjusted values
## using method from Morey (2008).
## data: a data frame.
## measurevar: the name of a column that contains the variable to
## be summariezed
## betweenvars: a vector containing names of columns that are
## between-subjects variables
## withinvars: a vector containing names of columns that are
## within-subjects variables
## idvar: the name of a column that identifies each subject
## (or matched subjects)
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval
## (default is 95%)
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL,
withinvars=NULL,idvar=NULL, na.rm=FALSE, conf.interval=.95,
.drop=TRUE) {
# Ensure that the betweenvars and withinvars are factors
factorvars <- vapply(data[, c(betweenvars, withinvars),
drop=FALSE], FUN=is.factor, FUN.VALUE=logical(1))
if (!all(factorvars)) {
nonfactorvars <- names(factorvars)[!factorvars]
message("Automatically converting the following
non-factors to factors: ",
paste(nonfactorvars, collapse = ", "))
data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
}
# Get the means from the un-normed data
datac <- summarySE2(data, measurevar, groupvars=c(betweenvars,
withinvars),na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Drop all the unused columns (these will be calculated
# with normed data)
datac$sd <- NULL
datac$se <- NULL
datac$ci <- NULL
# Norm each subject's data
ndata <- normDataWithin(data, idvar, measurevar,
betweenvars, na.rm, .drop=.drop)
# This is the name of the new column
measurevar_n <- paste(measurevar, "_norm", sep="")
# Collapse the normed data - now we can treat between and
# nwithin vars the same
ndatac <- summarySE2(ndata, measurevar_n,
groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Apply correction from Morey (2008) to the standard error
# and confidence interval
# Get the product of the number of conditions of
# within-S variables
nWithinGroups <- prod(vapply(ndatac[,withinvars, drop=FALSE],
FUN=nlevels, FUN.VALUE=numeric(1)))
correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )
# Apply the correction factor
ndatac$sd <- ndatac$sd * correctionFactor
ndatac$se <- ndatac$se * correctionFactor
ndatac$ci <- ndatac$ci * correctionFactor
# Combine the un-normed means with the normed results
merge(datac, ndatac)
}
# ROC curves ---------------------------------------------------------------------------
# source ROC
makesourceROC<-function(alldata){
fullrocrating<-NULL
for (recrating in sort(unique(alldata$RecConf),decreasing=T)){
forroc<-alldata[alldata$RecConf==recrating,]
rocrating<-NULL
for (rating in sort(unique(alldata$SourceConf),decreasing=T)){
hit<-sum(forroc[forroc$SourceInput=="Top" &
forroc$SourceConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="Top",]$items)
fa<-sum(forroc[forroc$SourceInput=="Bottom" &
forroc$SourceConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="Bottom",]$items)
adjhit<-sum(forroc[forroc$SourceInput=="Top" &
forroc$SourceConf>=rating,]$adjnumber)/
sum(forroc[forroc$SourceInput=="Top",]$adjnumber)
adjfa<-sum(forroc[forroc$SourceInput=="Bottom" &
forroc$SourceConf>=rating,]$adjnumber)/
sum(forroc[forroc$SourceInput=="Bottom",]$adjnumber)
pair<-cbind(hit,fa,adjhit,adjfa)
rocrating<-rbind(rocrating,pair)
}
pretty<-cbind(recrating,rocrating)
fullrocrating<-rbind(fullrocrating,pretty)
fullrocrating<-as.data.frame(fullrocrating)
}
acrossrocrating<-NULL
acrosssource<-ddply(alldata,.(SourceInput,SourceConf),summarise,
sumitems=sum(items))
acrosssource$adjnumber<-acrosssource$sumitems+0.5
rocrating<-NULL
for (rating in sort(unique(acrosssource$SourceConf),decreasing=T)){
hit<-sum(acrosssource[acrosssource$SourceInput=="Top" &
acrosssource$SourceConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="Top",]$sumitems)
fa<-sum(acrosssource[acrosssource$SourceInput=="Bottom" &
acrosssource$SourceConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="Bottom",]$sumitems)
adjhit<-sum(acrosssource[acrosssource$SourceInput=="Top" &
acrosssource$SourceConf>=rating,]$adjnumber)/
sum(acrosssource[acrosssource$SourceInput=="Top",]$adjnumber)
adjfa<-sum(acrosssource[acrosssource$SourceInput=="Bottom" &
acrosssource$SourceConf>=rating,]$adjnumber)/
sum(acrosssource[acrosssource$SourceInput=="Bottom",]$adjnumber)
pair<-cbind(hit,fa,adjhit,adjfa)
rocrating<-rbind(rocrating,pair)
}
rocrating<-as.data.frame(rocrating)
rocrating$zhit<-qnorm(rocrating$adjhit)
rocrating$zfa<-qnorm(rocrating$adjfa)
rocrating$recrating<-"across"
fullrocrating$zhit<-qnorm(fullrocrating$adjhit)
fullrocrating$zfa<-qnorm(fullrocrating$adjfa)
fullrocrating<-rbind(fullrocrating,rocrating)
return(fullrocrating)
}
ggplot_source_split<-function(alldata){
source_roc<-ggplot(alldata[alldata$recrating!="across",],aes(x=fa,y=hit)) +
geom_point(size=5,aes(shape=recrating),fill="black") +
theme_bw(base_size = 18)+
theme(legend.position=c(0.75,0.26),
legend.direction="vertical") +
scale_shape_manual(name="Recognition",labels=c("Certain New","Probably New","Guess New","Guess Old",
"Probably Old","Certain Old"),values=c(0,1,2,5,6,22)) +
scale_x_continuous(name="FA",limits=c(0,1))+
scale_y_continuous(name="Hit",limits=c(0,1))+
geom_abline(intercept=0,slope=1)+
theme(plot.title=element_text(size=18, face="bold"),
legend.key = element_blank(),
axis.text.x = element_text(size=12,colour="black"),
axis.title.x = element_text(size=12),
axis.text.y = element_text(size=12,colour="black"),
axis.title.y = element_text(size=12,vjust=1.2),
legend.text = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title=element_text(size=14,colour="black"),
panel.border = element_rect(colour="black"),
strip.text.x = element_text(colour = "white", size = 14, face="bold"),
strip.background = element_rect(fill="black",colour="black"),
plot.background = element_rect(fill = "transparent",colour = NA),
panel.background = element_rect(fill = "transparent",colour = NA))
source_zroc<-ggplot(alldata[alldata$recrating!="across",],aes(x=zfa,y=zhit)) +
geom_point(size=5,aes(shape=recrating),fill="black") +
theme_bw(base_size = 18)+
theme(legend.position=c(0.8,0.26),
legend.direction="vertical") +
scale_x_continuous(name="zFA",limits=c(-3,3))+
scale_y_continuous(name="zHit",limits=c(-3,3))+
scale_shape_manual(name="Recognition",labels=c("Certain New","Probably New","Guess New","Guess Old",
"Probably Old","Certain Old"),values=c(0,1,2,5,6,22)) +
theme_bw(base_size = 18)+
theme(plot.title=element_text(size=18, face="bold"),
axis.text.x = element_text(size=12,colour="black"),
axis.title.x = element_text(size=12),
axis.text.y = element_text(size=12,colour="black"),
axis.title.y = element_text(size=12,vjust=1.2),
legend.text = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title=element_text(size=14,colour="black"),
legend.key=element_blank(),
legend.background = element_rect(fill = "transparent"),
panel.border = element_rect(colour="black"),
strip.text.x = element_text(colour = "white", size = 14, face="bold"),
strip.background = element_rect(fill="black",colour="black"),
plot.background = element_rect(fill = "transparent",colour = NA),
panel.background = element_rect(fill = "transparent",colour = NA))
plot_grid(source_roc, source_zroc, labels=c("A", "B"), ncol = 2, nrow = 1)
}
# Recog ROC
makerecogROC<-function(alldata){
fullrocrating_rec<-NULL
for (sourcerating in sort(unique(alldata$SourceConf),decreasing=T)){
forroc<-alldata[alldata$SourceConf==sourcerating,]
Trocrating<-NULL
Brocrating<-NULL
for (rating in sort(unique(alldata$RecConf),decreasing=T)){
Thit<-sum(forroc[forroc$SourceInput=="Top" &
forroc$RecConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="Top",]$items)
Tfa<-sum(forroc[forroc$SourceInput=="New" &
forroc$RecConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="New",]$items)
Bhit<-sum(forroc[forroc$SourceInput=="Bottom" &
forroc$RecConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="Bottom",]$items)
Bfa<-sum(forroc[forroc$SourceInput=="New" &
forroc$RecConf>=rating,]$items)/
sum(forroc[forroc$SourceInput=="New",]$items)
Tpair<-cbind("Top",Thit,Tfa)
Bpair<-cbind("Bottom",Bhit,Bfa)
Trocrating<-rbind(Trocrating,Tpair)
Brocrating<-rbind(Brocrating,Bpair)
}
rocrating<-rbind(Trocrating,Brocrating)
pretty<-cbind(sourcerating,rocrating)
fullrocrating_rec<-rbind(fullrocrating_rec,pretty)
fullrocrating_rec<-as.data.frame(fullrocrating_rec)
}
acrosssource<-ddply(alldata,.(SourceInput,RecConf),summarise,
sumitems=sum(items))
emptydf <- with(acrosssource, expand.grid(SourceInput = levels(SourceInput),
RecConf = levels(RecConf)))
Trocrating<-NULL
Brocrating<-NULL
for (rating in sort(unique(acrosssource$RecConf),decreasing=T)){
Thit<-sum(acrosssource[acrosssource$SourceInput=="Top" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="Top",]$sumitems)
Bhit<-sum(acrosssource[acrosssource$SourceInput=="Bottom" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="Bottom",]$sumitems)
Tfa<-sum(acrosssource[acrosssource$SourceInput=="New" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="New",]$sumitems)
Bfa<-sum(acrosssource[acrosssource$SourceInput=="New" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$SourceInput=="New",]$sumitems)
Tpair<-cbind("Top",Thit,Tfa)
Bpair<-cbind("Bottom",Bhit,Bfa)
Trocrating<-rbind(Trocrating,Tpair)
Brocrating<-rbind(Brocrating,Bpair)
}
rocrating<-rbind(Trocrating,Brocrating)
rocrating<-as.data.frame(rocrating)
rocrating$sourcerating<-"across"
names(rocrating)[1] <- "V2"
fullrocrating_rec<-rbind(fullrocrating_rec,rocrating)
fullrocrating_rec$Thit<-as.numeric(as.character(fullrocrating_rec$Thit))
fullrocrating_rec$Tzhit<-qnorm(fullrocrating_rec$Thit)
fullrocrating_rec$Tfa<-as.numeric(as.character(fullrocrating_rec$Tfa))
fullrocrating_rec$Tzfa<-qnorm(fullrocrating_rec$Tfa)
return(fullrocrating_rec)
}
ggplot_recog_split<-function(alldata){
recogbysource.roc<-ggplot(alldata[alldata$sourcerating!="across",],
aes(x=Tfa,y=Thit,shape=sourcerating)) +
geom_point(size=5)+
scale_shape_manual(labels=c("Certain Bottom","Probably Bottom","Guess Bottom","Guess Top",
"Probably Top","Certain Top"),name="Source",
values=c(16,17,15,3,7,8)) +
scale_x_continuous(name="FA",limits=c(0,1))+
scale_y_continuous(name="Hit",limits=c(0,1))+
geom_abline(intercept = 0,slope=1)+
#geom_line()+
facet_grid(.~V2)+
theme_bw(base_size = 18)+
theme(legend.position=c(0.85,0.26),
legend.direction="vertical") +
theme(plot.title=element_text(size=18, face="bold"),
axis.text.x = element_text(size=12, colour="black"),
axis.title.x = element_text(size=12, colour="black"),
axis.text.y = element_text(size=12, colour="black"),
axis.title.y = element_text(size=12,vjust=1.2, colour="black"),
legend.text = element_text(size=12, colour="black"),
legend.key = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title=element_text(colour="black",size=14),
legend.background=element_rect(fill="transparent"),
panel.border = element_rect(colour="black"),
strip.text.x = element_text(colour = "white", size = 14, face="bold"),
strip.background = element_rect(fill="black",colour="black"),
plot.background = element_rect(fill = "transparent",colour = NA),
panel.background = element_rect(fill = "transparent",colour = NA))
recogbysource.zroc<-ggplot(alldata[alldata$sourcerating!="across",],
aes(x=Tzfa,y=Tzhit,shape=sourcerating)) +
geom_point(size=5) +
#geom_line()+
scale_shape_discrete(name="Source",labels=c("Certain Bottom","Maybe Bottom","Guess Bottom","Guess Top",
"Maybe Top","Certain Top")) +
scale_x_continuous(name="zFA")+
scale_y_continuous(name="zHit")+
facet_grid(.~V2)+
theme_bw(base_size = 18)+
theme(legend.position=c(0.9,0.3),
legend.direction="vertical") +
theme(plot.title=element_text(size=18, face="bold"),
axis.text.x = element_text(size=12),
axis.title.x = element_text(size=12),
axis.text.y = element_text(size=12),
axis.title.y = element_text(size=12,vjust=1.2),
legend.text = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.title=element_text(colour="black"),
panel.border = element_rect(colour="black"),
strip.text.x = element_text(colour = "white", size = 14, face="bold"),
strip.background = element_rect(fill="black",colour="black"),
plot.background = element_rect(fill = "transparent",colour = NA),
panel.background = element_rect(fill = "transparent",colour = NA))
plot_grid(recogbysource.roc, recogbysource.zroc, labels=c("A", "B"), ncol = 1, nrow = 2)
}
makerecogcollapseROC<-function(alldata){
alldata<-ddply(alldata,.(StimType,RecConf,SourceConfrecoded),summarise,
item=sum(items))
fullrocrating_rec<-NULL
for (sourcerating in sort(unique(alldata$SourceConfrecoded),decreasing=T)){
forroc<-alldata[alldata$SourceConfrecoded==sourcerating,]
Trocrating<-NULL
for (rating in sort(unique(alldata$RecConf),decreasing=T)){
Thit<-sum(forroc[forroc$StimType=="1" &
forroc$RecConf>=rating,]$item)/
sum(forroc[forroc$StimType=="1",]$item)
Tfa<-sum(forroc[forroc$StimType=="0" &
forroc$RecConf>=rating,]$item)/
sum(forroc[forroc$StimType=="0",]$item)
Tpair<-cbind("OldNew",Thit,Tfa)
Trocrating<-rbind(Trocrating,Tpair)
}
pretty<-cbind(sourcerating,Trocrating)
fullrocrating_rec<-rbind(fullrocrating_rec,pretty)
fullrocrating_rec<-as.data.frame(fullrocrating_rec)
}
acrosssource<-ddply(alldata,.(StimType,RecConf),summarise,
sumitems=sum(item))
Trocrating<-NULL
for (rating in sort(unique(acrosssource$RecConf),decreasing=T)){
Thit<-sum(acrosssource[acrosssource$StimType=="1" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$StimType=="1",]$sumitems)
Tfa<-sum(acrosssource[acrosssource$StimType=="0" &
acrosssource$RecConf>=rating,]$sumitems)/
sum(acrosssource[acrosssource$StimType=="0",]$sumitems)
Tpair<-cbind("OldNew",Thit,Tfa)
Trocrating<-rbind(Trocrating,Tpair)
}
rocrating<-as.data.frame(Trocrating)
rocrating$sourcerating<-"across"
names(rocrating)[1] <- "V2"
fullrocrating_rec<-rbind(fullrocrating_rec,rocrating)
fullrocrating_rec$Thit<-as.numeric(as.character(fullrocrating_rec$Thit))
fullrocrating_rec$Tzhit<-qnorm(fullrocrating_rec$Thit)
fullrocrating_rec$Tfa<-as.numeric(as.character(fullrocrating_rec$Tfa))
fullrocrating_rec$Tzfa<-qnorm(fullrocrating_rec$Tfa)
return(fullrocrating_rec)
}
gridplotidnumber<-function(RTplot,Numberplot){
grid.newpage()
# two plots
# extract gtable
g1 <- ggplot_gtable(ggplot_build(RTplot))
g2 <- ggplot_gtable(ggplot_build(Numberplot))
# overlap the panel of 2nd plot on that of 1st plot
pp <- c(subset(g1$layout, name == "panel", se = t:r))
g <- gtable_add_grob(g1, g2$grobs[[which(g2$layout$name == "panel")]], pp$t,
4, pp$b, 4)
# axis tweaks
ia <- which(g2$layout$name == "axis-l")
ga <- g2$grobs[[ia]]
ax <- ga$children[[2]]
ax$widths <- rev(ax$widths)
ax$grobs <- rev(ax$grobs)
ax$grobs[[1]]$x <- ax$grobs[[1]]$x - unit(1, "npc") + unit(0.15, "cm")
g <- gtable_add_cols(g, g2$widths[g2$layout[ia, ]$l], length(g$widths)-1)
g <- gtable_add_grob(g, ax, pp$t, length(g$widths)-1, pp$b)
g <- gtable_add_cols(g, g2$widths[g2$layout[ia, ]$l], 9)
g <- gtable_add_grob(g, g2$grobs[[which(g2$layout$name == "ylab-l")]], pp$t, 9, pp$b)
g$layout$clip[grep("layout",g$layout$name)] <- "off"
return(grid.draw(g))}
set_panel_size <- function(p=NULL, g=ggplotGrob(p), file=NULL,
margin = unit(1,"mm"),
width=unit(7.2, "cm"),
height=unit(7, "cm")){
panels <- grep("panel", g$layout$name)
panel_index_w<- unique(g$layout$l[panels])
panel_index_h<- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
if(getRversion() < "3.3.0"){
# the following conversion is necessary
# because there is no `[<-`.unit method
# so promoting to unit.list allows standard list indexing
g$widths <- grid:::unit.list(g$widths)
g$heights <- grid:::unit.list(g$heights)
g$widths[panel_index_w] <- rep(list(width), nw)
g$heights[panel_index_h] <- rep(list(height), nh)
} else {
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
}
if(!is.null(file))
ggsave(file, g,
width = convertWidth(sum(g$widths) + margin,
unitTo = "in", valueOnly = TRUE),
height = convertHeight(sum(g$heights) + margin,
unitTo = "in", valueOnly = TRUE))
g
}
set_panel_size2 <- function(p=NULL, g=ggplotGrob(p), file=NULL,
margin = unit(1,"mm"),
width=unit(2.2, "cm"),
height=unit(7, "cm")){
panels <- grep("panel", g$layout$name)
panel_index_w<- unique(g$layout$l[panels])
panel_index_h<- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
if(getRversion() < "3.3.0"){
# the following conversion is necessary
# because there is no `[<-`.unit method
# so promoting to unit.list allows standard list indexing
g$widths <- grid:::unit.list(g$widths)
g$heights <- grid:::unit.list(g$heights)
g$widths[panel_index_w] <- rep(list(width), nw)
g$heights[panel_index_h] <- rep(list(height), nh)
} else {
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
}
if(!is.null(file))
ggsave(file, g,
width = convertWidth(sum(g$widths) + margin,
unitTo = "in", valueOnly = TRUE),
height = convertHeight(sum(g$heights) + margin,
unitTo = "in", valueOnly = TRUE))
g
}
set_panel_size3 <- function(p=NULL, g=ggplotGrob(p), file=NULL,
margin = unit(1,"mm"),
width=unit(8, "cm"),
height=unit(8, "cm")){
panels <- grep("panel", g$layout$name)
panel_index_w<- unique(g$layout$l[panels])
panel_index_h<- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
if(getRversion() < "3.3.0"){
# the following conversion is necessary
# because there is no `[<-`.unit method
# so promoting to unit.list allows standard list indexing
g$widths <- grid:::unit.list(g$widths)
g$heights <- grid:::unit.list(g$heights)
g$widths[panel_index_w] <- rep(list(width), nw)
g$heights[panel_index_h] <- rep(list(height), nh)
} else {
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
}
if(!is.null(file))
ggsave(file, g,
width = convertWidth(sum(g$widths) + margin,
unitTo = "in", valueOnly = TRUE),
height = convertHeight(sum(g$heights) + margin,
unitTo = "in", valueOnly = TRUE))
g
}
|
0537f61771f16ffe1ad4930c25aef9ff67303e05
|
849a6932f5e298d6f6b68a27d74608c36b663c39
|
/scripts/writePsims.r
|
f05ad69fa62661c0e0c9ca057fcdac9dbadd3d3c
|
[] |
no_license
|
RDCEP/nldas
|
4e0aaedfbd01346a9a1421822d16900a7f24d6c9
|
44769f2057812670c1afa041d4834de00190f26b
|
refs/heads/master
| 2021-01-01T19:06:22.962860
| 2013-12-03T21:57:42
| 2013-12-03T21:57:42
| 12,471,543
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,684
|
r
|
writePsims.r
|
#!/home/nbest/local/bin/r
## --interactive
stripe <- as.integer( argv[ 1])
## stripe <- 14
degreesPerStripe <- 0.5
##startYear <- as.Date( argv[ 1])
##endYear <- as.Date( argv[ 2])
startYear <- 1979
endYear <- 2013
years <- startYear:endYear
library( ncdf4)
library( raster)
library( abind)
## library( ascii)
## options( asciiType= "org")
library( stringr)
library( doMC)
## registerDoMC( multicore:::detectCores())
registerDoMC( 4)
## options( error= recover)
## annualPaths <- sprintf(
## "/scratch/midway/nbest/data/annual/%d", years)
nldasVars <- c(
"tmin", "tmax", "precip", "solar",
"pres", "spfh", "u", "v")
nldasMask <- setMinMax(
raster( "data/output/nldasRegion.tif"))
nldasRes <- res( nldasMask)[ 1]
nldasAnchorPoints <-
cbind(
lon= seq(
from= xmin( nldasMask) + nldasRes / 2,
to= xmax( nldasMask) - nldasRes / 2,
by= degreesPerStripe),
lat= ymax( nldasMask))
readNldasValues <-
function(
ncFn, lon,
n= as.integer( degreesPerStripe / nldasRes))
{
nc <- nc_open( ncFn)
varid <- names( nc$var)[ 1]
column <-
which( nc$dim$lon$vals > lon)[ 1] -1
m <-
ncvar_get(
nc,
varid= varid,
start= c( column, 1, 1),
count= c( n, -1, -1),
# collapse_degen seems to have
collapse_degen= FALSE) # no effect
## nldasDays <- seq(
## from= as.Date(
## sprintf(
## "%s-01-01",
## str_match( nc$filename, "_([12][0-9]{3})\\.nc$")[,2])),
## length.out= length( nc$dim$time$vals),
## by= "day")
nldasDays <- seq(
from= as.Date( "1979-01-01"),
length.out= length( nc$dim$time$vals),
by= "day")
dn <- list(
longitude= nc$dim$lon$vals[ column:(column +n -1)],
latitude= nc$dim$lat$vals, ##[ row:( row +nrow( nldasMask) -1)],
time= as.character( nldasDays))
## dim(m) <- c( dim(m), 1) # to compensate for apparent
dimnames( m) <- dn # collapse_degen bug
nc_close( nc)
m
}
## cat( sprintf( "Time to load data for stripe %d:", stripe))
## system.time( {
## This does not work because the annual files are in the original
## 0.125\deg grid
##
## nldasValues <-
## foreach(
## var= nldasVars) %:%
## ## var= nldasVars[1:2]) %:%
## foreach(
## year= years,
## .combine= abind,
## .multicombine= TRUE ) %dopar% {
## readNldasValues(
## sprintf( "/scratch/midway/nbest/annual/%1$s_nldas_%2$d.nc4", var, year),
## nldasAnchorPoints[ stripe, 1])
## }
## nldasValues <- list(
## precip= readNldasValues(
## "/scratch/midway/nbest/full/precip_nldas_1979-2013.nc4",
## nldasAnchorPoints[ stripe, 1],
## n=6))
nldasValues <-
foreach(
var= nldasVars) %dopar% {
readNldasValues(
sprintf( "/scratch/midway/nbest/full/%1$s_nldas_1979-2013.nc4", var),
nldasAnchorPoints[ stripe, 1])
}
names( nldasValues) <- nldasVars
for( var in nldasVars)
names( dimnames( nldasValues[[ var]])) <-
c( "longitude", "latitude", "time")
## })
ncDimsFunc <- function(
xy, ncDays,
ncTimeName= "time",
ncTimeUnits= "days since 1978-12-31 00:00:00") {
list(
ncdim_def(
name= "longitude",
units= "degrees_east",
vals= xy[[ "lon"]]),
ncdim_def(
name= "latitude",
units= "degrees_north",
vals= xy[[ "lat"]]),
ncdim_def(
name= ncTimeName,
units= ncTimeUnits,
vals= ncDays,
unlim= TRUE))
}
ncVarsFunc <- function(
xy, ncDays,
ncGroupName= "narr",
ncTimeUnits= "days since 1978-12-31 00:00:00",
compression= NA,
missval= ncdf4:::default_missval_ncdf4()
) {
list(
ncvar_def(
name= "tmin",
units= "C",
longname= "daily minimum temperature",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "tmax",
units= "C",
longname= "daily maximum temperature",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "precip",
units= "mm",
longname= "daily total precipitation",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "solar",
units= "MJ/m^2/day",
longname= "daily average downward short-wave radiation flux",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "pres",
units= "Pa",
longname= "pressure",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "spfh",
units= "kg/kg",
longname= "specific humidity",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "u",
units= "m/s",
longname= "u wind",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval),
ncvar_def(
name= "v",
units= "m/s",
longname= "v wind",
dim= ncDimsFunc( xy, ncDays,
ncTimeUnits,
ncTimeName= "time"),
compression= compression,
missval= missval))
}
psimsNcFromXY <- function(
xy, ncDays,
resWorld= 0.5,
ncTimeUnits= "days since 1860-01-01 00:00:00") {
if( xy[[ "lon"]] > 180) {
xy[[ "lon"]] <- xy[[ "lon"]] - 360
}
world <- raster()
res( world) <- resWorld
rowCol <- as.list( rowColFromCell( world, cellFromXY( world, xy))[1,])
ncFile <- sprintf( "data/psims/%1$03d/%2$03d/%1$03d_%2$03d.psims.nc", rowCol$row, rowCol$col)
if( !file.exists( dirname( ncFile))) {
dir.create( path= dirname( ncFile), recursive= TRUE)
}
if( file.exists( ncFile)) file.remove( ncFile)
nc_create(
filename= ncFile,
vars= ncVarsFunc( xy, ncDays,
ncGroupName= "nldas",
ncTimeUnits= ncTimeUnits),
force_v4= FALSE,
verbose= FALSE)
}
writePsimsNc <- function( nldasValues, col, row) {
xy <- c(
lon= as.numeric( dimnames( nldasValues[[ "tmin"]])$longitude[ col]),
lat= as.numeric( dimnames( nldasValues[[ "tmin"]])$latitude[ row]))
if( is.na( extract( nldasMask, rbind( xy)))) return( NA)
psimsNc <- psimsNcFromXY(
xy,
ncDays= as.integer(
as.Date( dimnames( nldasValues[[ "tmin"]])$time) -
as.Date( "1978-12-31")),
resWorld= nldasRes,
ncTimeUnits= "days since 1978-12-31 23:00:00")
for( var in names( nldasValues)) {
vals <- nldasValues[[ var]][ col, row,]
vals <- switch(
var,
solar= vals *86400 /1000000, # Change units to MJ /m^2 /day
tmin= vals -273.15, # change K to C
tmax= vals -273.15,
## precip= vals *3600 *24, # Change mm/s to mm/day
vals )
## browser()
ncvar_put(
nc= psimsNc,
varid= var, ## sprintf( "nldas/%s", var),
vals= vals,
count= c( 1, 1, -1))
}
nc_close( psimsNc)
psimsNc$filename
}
registerDoMC()
## time <-
## system.time(
psimsNcFile <-
foreach( col= 1:dim( nldasValues$tmax)[1], .combine= c) %:%
foreach( row= 1:dim( nldasValues$tmax)[2], .combine= c) %dopar% {
writePsimsNc( nldasValues, col, row)
}
## )
cat(
psimsNcFile,
## sprintf( "\n\nTime to write %d files:", length( psimsNcFile)),
sep= "\n")
## print( time)
|
b1f0b5678e9e9d6567b2cc24b7293047ee5d263e
|
1308fdc1702986e9f02b1d2c8dc9a545008eee00
|
/R/stop_quietly.R
|
83c129be67398e10198c95cb78d13176abe4905a
|
[] |
no_license
|
raphidoc/lighthouse
|
62c2a15dd2a52069e2fbad90045db9c028b341be
|
bacba1366dbc13eb384c84e79d46976d4433cc6d
|
refs/heads/main
| 2022-06-16T08:53:02.969512
| 2022-06-08T13:29:18
| 2022-06-08T13:29:18
| 213,687,674
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
stop_quietly.R
|
stop_quietly <- function() {
opt <- options(show.error.messages = FALSE)
on.exit(options(opt))
stop("QC files produced, Terminated")
}
|
b85c734a39f40f0dfc9ac4748028ed196a0aef5a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/splitstackshape/examples/Reshape.Rd.R
|
cfe9c86c1eead3f07e246475dece4d21bf760f11
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
r
|
Reshape.Rd.R
|
library(splitstackshape)
### Name: Reshape
### Title: Reshape Wide Data Into a Semi-long Form
### Aliases: Reshape
### ** Examples
set.seed(1)
mydf <- data.frame(id_1 = 1:6, id_2 = c("A", "B"), varA.1 = sample(letters, 6),
varA.2 = sample(letters, 6), varA.3 = sample(letters, 6),
varB.2 = sample(10, 6), varB.3 = sample(10, 6),
varC.3 = rnorm(6))
mydf
## Note that these data are unbalanced
## reshape() will not work
## Not run:
##D reshape(mydf, direction = "long", idvar=1:2, varying=3:ncol(mydf))
## End(Not run)
## The Reshape() function can handle such scenarios
Reshape(mydf, id.vars = c("id_1", "id_2"),
var.stubs = c("varA", "varB", "varC"))
|
74bed2d55d8604eaa9ea71a815bc70b17935bce4
|
7cd8b7c0cd60c18c366d8d6c027df8077c81789e
|
/Schedules/Original/Virginia/_TASL/extract.R
|
71985a7c6b5e7a670c2c840586a5096a32634e3e
|
[] |
no_license
|
tyler-richardett/virginia_competitive_youth_soccer
|
5c6c99378c888b4f03a2794b9c49a59924d32a9f
|
022ad240bbe29ff7677e09efdb5438677d291911
|
refs/heads/master
| 2021-04-12T11:59:01.294354
| 2018-07-11T01:45:28
| 2018-07-11T01:45:28
| 126,563,080
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,182
|
r
|
extract.R
|
library(dplyr)
library(rvest)
filenames <- list.files()
filenames <- filenames[-grep("extract.R", filenames)]
filenames <- filenames[-grep("csv$", filenames)]
schedule <- data.frame()
for (i in 1:length(filenames)) {
tmp.teams.1 <- read_html(filenames[i]) %>% html_nodes("tr.trstyle1 td.trstyleGame") %>% html_text()
tmp.fields.1 <- read_html(filenames[i]) %>% html_nodes("tr.trstyle1 td a") %>% html_attr("href")
tmp.teams.2 <- read_html(filenames[i]) %>% html_nodes("tr.trstyle2 td.trstyleGame") %>% html_text()
tmp.fields.2 <- read_html(filenames[i]) %>% html_nodes("tr.trstyle2 td a") %>% html_attr("href")
tmp <- data.frame(Home = c(tmp.teams.1, tmp.teams.2), Away = c(tmp.teams.1, tmp.teams.2), Field = c(tmp.fields.1, tmp.fields.2))
tmp$Home <- gsub("( vs.+$| played.+$|\\s+[0-9]+,.+$)", "", tmp$Home)
tmp$Away <- gsub("(^.+vs\\. |^.+played |^.+[0-9]+,\\s+)", "", tmp$Away)
tmp$Away <- gsub("CANCELLED", "", tmp$Away)
tmp$Away <- gsub("\\s+[0-9]+.+$", "", tmp$Away)
tmp$Field <- gsub("^javascript:directWindow\\('", "", tmp$Field)
tmp$Field <- gsub("','.+$", "", tmp$Field)
tmp$Age.Group <- rep(substring(filenames[i], 3, 5), nrow(tmp))
tmp$Gender <- rep(ifelse(substring(filenames[i], 1, 1) == "B", "Boys", "Girls"), nrow(tmp))
schedule <- rbind(schedule, tmp)
}
schedule <- schedule %>% mutate(Home = paste(Home, Age.Group, Gender, "TASL"), Away = paste(Away, Age.Group, Gender, "TASL"))
## Save full schedule.
## write.csv(schedule, "TASL_Full.csv", row.names = FALSE, na = "")
teams <- schedule %>% select(Home, Age.Group, Gender) %>% arrange(Home) %>% distinct()
fields <- schedule %>% filter(Field != "Unassigned" & Field != "") %>% select(Field) %>% arrange(Field) %>% distinct()
schedule <- schedule %>% filter(Field != "Unassigned" & Field != "") %>% select(Home, Away, Field)
## Save teams, fields, and schedule.
## write.csv(teams, "TASL_Teams.csv", row.names = FALSE, na = "")
## write.csv(fields, "TASL_Fields.csv", row.names = FALSE, na = "")
## write.csv(schedule, "TASL_Schedule.csv", row.names = FALSE, na = "")
|
5d54c9c4d544b1aa5d249283d9e712794d78de44
|
f3ca0a4a2391f3e226b14b54f367a9797fe2d275
|
/man/ask_question.Rd
|
ca4ce964beda47a078542b6ecc2dde635323e925
|
[
"MIT"
] |
permissive
|
Rukshani/r2vr
|
a8b9903f5876f9d679824b27e3376a514695a26c
|
8d5e9630eb7538121f01951045e174ec235b043c
|
refs/heads/master
| 2023-07-10T12:43:30.438378
| 2021-03-28T07:22:43
| 2021-03-28T07:22:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,215
|
rd
|
ask_question.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testing_ask_question.R
\name{ask_question}
\alias{ask_question}
\title{Ask the user a question or multiple questions}
\usage{
ask_question(
index = NA,
visible = TRUE,
question_and_responses = QUESTIONS_AND_RESPONSES
)
}
\arguments{
\item{index}{Integer representing the question number as defined by the user in 'question_and_reponses'}
\item{visible}{Boolean to toggle visibility of the question and response entities}
\item{question_and_responses}{list of evaluation question lists composed of a 'question', 'answerOne', 'answerTwo', 'answerThree', and an 'answerFour'}
}
\description{
Ask the user a question or multiple questions
}
\examples{
\donttest{
evaluation_questions <- list(
list(question = "Did you enjoy this experiment?", answerOne = "Very much", answerTwo = "Yes", #' answerThree = "A little", answerFour = "No"),
list(question = "On a scale of 1-4, how would you rate your experience?", answerOne = "1",
answerTwo = "2", answerThree = "3", answerFour = "4")
)
ask_question(1)
ask_question(2)
ask_question(1, FALSE) # hide question/responses
ask_question(1, TRUE, evaluation_questions)
}
}
|
af730c0378c8348dd4b62af527f4b25db245b0a8
|
768a5e8713ed0751fdea1fc0512dc5e87c1c06b0
|
/man/AtmosphericEmissivity.Rd
|
93d82b2ed6a34fbbbf392db935dc1b3e83bafe87
|
[] |
no_license
|
cran/EcoHydRology
|
c854757a7f70f91b3d33d6f7c5313752bf2819e3
|
d152909c12e6bb0c1f16b987aa7f49737cdcf3d3
|
refs/heads/master
| 2020-05-16T21:01:18.881492
| 2018-09-24T11:52:33
| 2018-09-24T11:52:33
| 17,691,749
| 6
| 6
| null | 2018-08-29T19:54:05
| 2014-03-13T02:26:21
|
R
|
UTF-8
|
R
| false
| false
| 1,206
|
rd
|
AtmosphericEmissivity.Rd
|
\name{AtmosphericEmissivity}
\alias{AtmosphericEmissivity}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Atmospheric Emissivity
}
\description{
The emissivity of the atmsophere [-]
}
\usage{
AtmosphericEmissivity(airtemp, cloudiness, vp=NULL, opt="linear")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{airtemp}{
Air temperature: air temperature [C]
}
\item{cloudiness}{
Cloudiness: fraction of the sky covered in clouds [-]
}
\item{vp}{
Vapor Pressure : [kPa]
}
\item{opt}{
option: either "linear" for a linear relationship between clear sky emissivity and temperature, or
"Brutsaert" to use Brutsaert's(1975) clear sky emissivity formulation - this requires input for vapor pressure
}
}
\value{
The emissivity of the atmsophere [-]
}
\references{
Campbell, G. S., Norman, J.M., 1998. An Introduction to
Environmental Biophysics, second ed., Springer, New York,
p. 286.
Brutsaert, W. (1975 On a Derivable Formula for Long-Wave Radiation From Clear Skies. Water Resources Research, 11(5) 742-744
}
\author{
Fuka, D.R., Walter, M.T., Archibald, J.A.
}
\examples{
temp=15
clouds=.5
AtmEm=AtmosphericEmissivity(temp,clouds)
print(AtmEm)
}
|
f10f777f11ff706e36314c742daa114ac4f99c50
|
114a56f459152b345fd4d34c427da8d81c2e8f1f
|
/R/prepare_data.R
|
d339f6e2afc4c8bc6911000bc432fb3b2c065578
|
[] |
no_license
|
ModelOriented/FairPAN
|
2e512744e43b1053cab03753581dc1f15b8ef90d
|
139f0bd82a055dad92e1e41a64ec60ff57676ce1
|
refs/heads/master
| 2023-08-24T23:16:53.590628
| 2021-10-04T16:41:30
| 2021-10-04T16:41:30
| 394,198,426
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,240
|
r
|
prepare_data.R
|
#' Creates datasets and dataloaders
#'
#' Creates two torch datasets, from given train_x, test_x matrices and train_y,
#' test_y vectors and converts them into torch dataloaders with provided batch
#' size. It is used for both classifier and adversarial. During the initialization
#' NAs are ommited.
#'
#' @param train_x numeric, scaled matrix of predictors used for training
#' @param test_x integer, matrix of predictors used for testing
#' @param train_y numeric, scaled vector of target used for training
#' @param test_y integer, vector of predictors used for testing
#' @param batch_size integer indicating a batch size used in dataloader.
#' Default: 50
#' @param dev device used for calculations (cpu or gpu)
#'
#' @return list of two data sets and two dataloaders for train and test
#' respectively
#' @export
#'
#' @examples
#' train_x <- matrix(c(1,2,3,4,5,6),nrow=3)
#' train_y <- c(1,2,3)
#' test_x <- matrix(c(1,2,3,4),nrow=2)
#' test_y <- c(1,2)
#' dev <- "cpu"
#' dataset_loader(train_x,train_y,test_x,test_y,batch_size=1,dev)
dataset_loader <- function(train_x,
train_y,
test_x,
test_y,
batch_size = 50,
dev) {
if (!is.numeric(train_x))
stop("train_x must be numeric")
if (!is.numeric(test_x))
stop("test_x must be numeric")
if (!is.numeric(train_y) || !is.vector(train_y))
stop("train_y must be numeric vector of target")
if (!is.numeric(test_y) || !is.vector(test_y))
stop("test_y must be numeric vector of target")
if (batch_size != as.integer(batch_size / 1))
stop("batch size must be an integer")
if (!dev %in% c("gpu", "cpu"))
stop("dev must be gpu or cpu")
#Without this NA self inside data_set produces global variable note
self <- NA
new_dataset <- torch::dataset(
name = "new_dataset",
initialize = function(df,y2) {
df <- stats::na.omit(df)
x_cont <- df
#create tensors for x and y and pass it to device
self$x_cont <- torch::torch_tensor(x_cont)$to(device = dev)
self$y <-
torch::torch_tensor(y2, dtype = torch::torch_long())$to(device = dev)
},
.getitem = function(i) {
list(x_cont = self$x_cont[i, ], y=self$y[i])
},
.length = function() {
self$y$size()[[1]]
}
)
#create datasets and data loaders
train_ds <- new_dataset(train_x, train_y)
test_ds <- new_dataset(test_x, test_y)
train_dl <- torch::dataloader(train_ds, batch_size = batch_size,
shuffle = FALSE)
test_dl <- torch::dataloader(test_ds, batch_size = batch_size,
shuffle = FALSE)
return(list("train_ds" = train_ds,"test_ds"=test_ds,
"train_dl"=train_dl,"test_dl"=test_dl))
}
#' Prepares data for adversarial model
#'
#' Prepares classifiers output for adversarial by splitting original predictions
#' into train and test vectors.
#'
#' @param preds numeric vector of predictions of target value made by
#' classifier (preferably the probabilistic ones).
#' @param sensitive integer vector of sensitive attribute which adversarial has
#' to predict.
#' @param partition float from [0,1] range setting the size of train vector
#' (test size equals 1-partition). Default = 0.7.
#'
#' @return list of four numeric lists with x and y data for train and test
#' respectively.
#' @export
#'
#' @examples
#'
#' preds <-c(0.312,0.343,0.932,0.754,0.436,0.185,0.527,0.492,0.743,0.011)
#' sensitive <- c(1,1,2,2,1,1,2,2,2,1)
#'
#' prepare_to_adv(preds,sensitive,partition=0.6)
#'
prepare_to_adv <- function(preds, sensitive, partition=0.7){
if (!is.numeric(preds) || !is.vector(preds))
stop("preds must be numeric vector of probabilities")
if (!is.numeric(sensitive) || !is.vector(sensitive))
stop("sensitive must be numeric vector of mapped sensitive classes")
if (!is.numeric(partition) || partition > 1 || partition < 0)
stop("partition must be numeric be in [0,1]")
set.seed(123)
train_indices <- sample(1:length(preds), length(preds) * partition)
train_x <- as.numeric(preds[train_indices])
train_x <- matrix(train_x, ncol = 1)
train_y <- sensitive[train_indices]
test_x <- as.numeric(preds[setdiff(1:length(preds), train_indices)])
test_x <- matrix(test_x, ncol = 1)
test_y <- sensitive[setdiff(1:length(sensitive), train_indices)]
return(list("train_x"=train_x,"train_y"=train_y,
"test_x"=test_x,"test_y"=test_y))
}
#' Preprocesses data for training
#'
#' Prepares provided dataset to be ready for the training process.
#' It makes data suitable for training functions, splits it into train, test
#' and validation, provides other data objects that are necessary for our
#' training.
#'
#' WARNING! So far the code in other functions is not fully prepared for
#' validation dataset and is designed for using test as test and validation.
#' Well understanding users however can use validation set in place of test if
#' they are sure it makes sense there.
#'
#' @param data list representing whole table of data (categorical variables
#' must be factors).
#' @param target_name character, column name of the target variable. Selected
#' column must be interpretable as categorical.
#' @param sensitive_name character, column name of the sensitive variable.
#' Selected column must be interpretable as categorical.
#' @param privileged character meaning the name of privileged group
#' @param discriminated character meaning the name of discriminated group
#' @param drop_also character vector, column names of other columns to drop
#' (like other sensitive variables).
#' @param sample double from [0,1] setting size of our sample from original
#' data set. Default: 1
#' @param train_size double from [0,1] setting size of our train. Note that
#' train_size+test_size+validation_size=1. Default=0.7
#' @param test_size double from [0,1] setting size of our test Note that
#' train_size+test_size+validation_size=1. Default=0.3
#' @param validation_size double from [0,1] setting size of our validation.
#' Note that train_size+test_size+validation_size=1. Default=0
#' @param seed sets seed for the sampling for code reproduction. Default=NULL
#'
#' @return list of prepared data
#' (
#' train_x, - numeric scaled matrix for classifier training
#' train_y, - numeric scaled vector for classifier training
#' sensitive_train, - numeric scaled vector for adversaries training
#' test_x, - numeric scaled matrix for classifier testing
#' test_y, - numeric scaled vector for classifier testing
#' sensitive_test, - numeric scaled vector for adversaries testing
#' valid_x, - numeric scaled matrix for classifier validation
#' valid_y, - numeric scaled vector for classifier validation
#' sensitive_valid, - numeric scaled vector for adversaries validation
#' data_scaled_test, - numeric scaled data set for testing
#' data_scaled_valid, - numeric scaled data set for validation
#' data_test, - whole dataset for testing, unchanged
#' protected_test, - character vector of protected values for explainers test
#' data_valid, - whole dataset for validation, unchanged
#' protected_valid - character vector of protected values for explainers valid
#' )
#' @export
#'
#' @examples
#' adult <- fairmodels::adult
#'
#' processed <-
#' preprocess(
#' adult,
#' "salary",
#' "sex",
#' "Male",
#' "Female",
#' c("race"),
#' sample = 0.05,
#' train_size = 0.65,
#' test_size = 0.35,
#' validation_size = 0,
#' seed = 7
#' )
#'
preprocess <- function(data,
target_name,
sensitive_name,
privileged,
discriminated,
drop_also = NULL,
sample = 1,
train_size = 0.7,
test_size = 0.3,
validation_size = 0,
seed = NULL) {
if (!is.list(data) && !is.matrix(data) && !is.data.frame(data))
stop("data must be some sort of data holer (list,matrix,data.frame)")
if (!is.character(privileged))
stop("privileged must be a character string")
if (!is.character(discriminated))
stop("discriminated must be a character string")
if (train_size < 0 || test_size < 0 || validation_size < 0)
stop("sizes must be positive")
if (train_size + test_size + validation_size != 1)
stop("train_size+test_size+validation_size must equal 1")
if (!is.character(target_name) || !is.character(sensitive_name))
stop("target_name and sensitive_name must be characters")
if (!is.null(drop_also) && !is.character(drop_also))
stop("drop_also must be a character vector")
if(sample > 1 || sample < 0)
stop("sample must be between 0 and 1")
if (seed != as.integer(seed / 1))
stop("seed must be an integer")
col <- eval(parse(text = paste("data$", sensitive_name, sep = "")))
#balance dataset to have the same number of sensitive values, so
#adversarial doesn't overfit (like all predictions are 1 or 2)
M <- min(table(col))
df_new <- data[col == privileged,][1:M,]
df_new <- rbind(df_new, data[col == discriminated,][1:M,])
data <- df_new
data <- stats::na.omit(data)
set.seed(seed)
sample_indices <- sample(1:nrow(data), nrow(data) * sample)
data <- data[sample_indices, ]
data <- stats::na.omit(data)
sensitive <-
as.integer (eval(parse(text = paste(
"data$", sensitive_name, sep = ""
))))
target <-
as.integer (eval(parse(text = paste(
"data$", target_name, sep = ""
))))
#drop columns we dont want to be in learning set
if(is.null(drop_also)){
data_coded <- data[, -which(names(data) %in%
c(target_name, sensitive_name))]
data_coded <- data.frame(data_coded)
}else{
data_coded <- data[, -which(names(data) %in%
c(target_name, sensitive_name, drop_also))]
data_coded <- data.frame(data_coded)
}
#encode columns which are not numeric
for (i in 1:ncol(data_coded)) {
if (!is.numeric(data_coded[, i])) {
data_coded[, i] <- as.integer(data_coded[, i])
}
}
#prepare data with scaling
data_matrix <- matrix(unlist(data_coded), ncol = ncol(data_coded))
data_scaled <- scale(data_matrix, center = TRUE, scale = TRUE)
#prepare indices for all classes
set.seed(seed)
train_indices <- sample(1:nrow(data_coded), train_size * nrow(data_coded))
rest_indices <- setdiff(1:nrow(data_coded), train_indices)
set.seed(seed)
test_indices <- sample(rest_indices, test_size /
(1 - train_size) * length(rest_indices))
validation_indices <- setdiff(rest_indices, test_indices)
data_scaled_test <- data_scaled[test_indices, ]
data_scaled_valid <- data_scaled[validation_indices, ]
train_x <- data_scaled[train_indices, ]
train_y <- target[train_indices]
sensitive_train <- sensitive[train_indices]
test_x <- data_scaled[test_indices, ]
test_y <- target[test_indices]
sensitive_test <- sensitive[test_indices]
valid_x <- data_scaled[validation_indices,]
valid_y <- target[validation_indices]
sensitive_valid <- sensitive[validation_indices]
data_test <- data[test_indices, ]
protected_test <-
eval(parse(text = paste("data_test$", sensitive_name, sep = "")))
data_valid <- data[validation_indices, ]
protected_valid <-
eval(parse(text = paste("data_valid$", sensitive_name, sep = "")))
prepared_data <- list(
"train_x" = train_x,
"train_y" = train_y,
"sensitive_train" = sensitive_train,
"test_x" = test_x,
"test_y" = test_y,
"sensitive_test" = sensitive_test,
"valid_x" = valid_x,
"valid_y" = valid_y,
"sensitive_valid" = sensitive_valid,
"data_scaled_test" = data_scaled_test,
"data_scaled_valid" = data_scaled_valid,
"data_test" = data_test,
"protected_test" = protected_test,
"data_valid" = data_valid,
"protected_valid" = protected_valid
)
return(prepared_data)
}
|
534d300f53dbfb9d89d40ae7006ee30edd75e4a7
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/multinomRob/R/genoudRob.R
|
e000f2939b680abcfa02dc566f5e930a9e16f519
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,179
|
r
|
genoudRob.R
|
#
# multinomRob
#
# Walter R. Mebane, Jr.
# University of Michigan
# http://www-personal.umich.edu/~wmebane
# <wmebane@umich.edu>
#
# Jasjeet Singh Sekhon
# UC Berkeley
# http://sekhon.polisci.berkeley.edu
# sekhon@berkeley.edu
#
#
###################################
#New Front End for Genoud, with tuned defaults
###################################
#sets genoud.parms defaults
genoudParms <- function(genoud.parms)
{
#set user controlled defaults
if (!is.null(genoud.parms$cluster)) {
if (length(genoud.parms$cluster) > 1) {
warning("cluster option cannot be used with 'multinomRob'")
} else if (is.list(genoud.parms$cluster)) {
warning("cluster option cannot be used with 'multinomRob'")
} else if (genoud.parms$cluster!=FALSE) {
warning("cluster option cannot be used with 'multinomRob'")
}
}
genoud.parms$cluster <- FALSE;
if (is.null(genoud.parms$balance))
genoud.parms$balance <- FALSE ;
if (is.null(genoud.parms$pop.size))
genoud.parms$pop.size <- 1000;
if (is.null(genoud.parms$max.generations))
genoud.parms$max.generations <- 100;
if (is.null(genoud.parms$wait.generations))
genoud.parms$wait.generations <- 10;
if (is.null(genoud.parms$hard.generation.limit))
genoud.parms$hard.generation.limit <- FALSE;
#this is redundant, but maintains clarity
if (is.null(genoud.parms$MemoryMatrix))
genoud.parms$MemoryMatrix <- TRUE;
if (is.null(genoud.parms$Debug))
genoud.parms$Debug <- FALSE ;
#this is redundant, but maintains clarity
if (is.null(genoud.parms$Domains))
genoud.parms$Domains <- NULL;
if (is.null(genoud.parms$scale.domains))
genoud.parms$scale.domains <- 10;
if (is.null(genoud.parms$boundary.enforcement))
genoud.parms$boundary.enforcement <- 0;
if (is.null(genoud.parms$solution.tolerance))
genoud.parms$solution.tolerance <- 0.0000001;
if (is.null(genoud.parms$BFGS))
genoud.parms$BFGS <- TRUE;
if (is.null(genoud.parms$unif.seed))
genoud.parms$unif.seed <- 812821;
if (is.null(genoud.parms$int.seed))
genoud.parms$int.seed <- 53058;
if (is.null(genoud.parms$print.level))
genoud.parms$print.level <- 0;
if (is.null(genoud.parms$share.type))
genoud.parms$share.type <- 0;
if (is.null(genoud.parms$instance.number))
genoud.parms$instance.number <- 0;
if (is.null(genoud.parms$output.path))
genoud.parms$output.path <- "stdout";
if (is.null(genoud.parms$output.append))
genoud.parms$output.append <- FALSE;
if (is.null(genoud.parms$project.path))
genoud.parms$project.path <- "/dev/null";
if (is.null(genoud.parms$P1))
genoud.parms$P1 <- 50;
if (is.null(genoud.parms$P2))
genoud.parms$P2 <- 50;
if (is.null(genoud.parms$P3))
genoud.parms$P3 <- 50;
if (is.null(genoud.parms$P4))
genoud.parms$P4 <- 50;
if (is.null(genoud.parms$P5))
genoud.parms$P5 <- 50;
if (is.null(genoud.parms$P6))
genoud.parms$P6 <- 50;
if (is.null(genoud.parms$P7))
genoud.parms$P7 <- 50;
if (is.null(genoud.parms$P8))
genoud.parms$P8 <- 50;
if (is.null(genoud.parms$P9))
genoud.parms$P9 <- 0 ;
return(genoud.parms);
} #end genoudParms
genoudRob <- function(fn,nvars,starting.values,genoud.parms)
{
#new options for genoud > 2.0 which are needed
lexical=FALSE
cluster <- genoud.parms$cluster
balance <- genoud.parms$balance
#set static defaults
max <- FALSE
gradient.check <- FALSE
data.type.int <- FALSE
hessian <- FALSE
#load up genoud.parms
pop.size <- genoud.parms$pop.size;
max.generations <- genoud.parms$max.generations;
wait.generations <- genoud.parms$wait.generations;
hard.generation.limit <- genoud.parms$hard.generation.limit;
MemoryMatrix <- genoud.parms$MemoryMatrix;
Debug <- genoud.parms$Debug;
Domains <- genoud.parms$Domains;
scale.domains <- genoud.parms$scale.domains;
boundary.enforcement <- genoud.parms$boundary.enforcement;
solution.tolerance <- genoud.parms$solution.tolerance;
BFGS <- genoud.parms$BFGS;
unif.seed <- genoud.parms$unif.seed;
int.seed <- genoud.parms$int.seed;
print.level <- genoud.parms$print.level;
share.type <- genoud.parms$share.type;
instance.number <- genoud.parms$instance.number;
output.path <- genoud.parms$output.path;
output.append <- genoud.parms$output.append;
project.path <- genoud.parms$project.path;
P1 <- genoud.parms$P1;
P2 <- genoud.parms$P2;
P3 <- genoud.parms$P3;
P4 <- genoud.parms$P4;
P5 <- genoud.parms$P5;
P6 <- genoud.parms$P6;
P7 <- genoud.parms$P7;
P8 <- genoud.parms$P8;
P9 <- genoud.parms$P9;
if (!(is.matrix(Domains)))
{
Domains <- matrix(nrow=nvars, ncol=2);
for (i in 1:nvars)
{
Domains[i,1] <- starting.values[i] - abs(starting.values[i])*scale.domains;
Domains[i,2] <- starting.values[i] + abs(starting.values[i])*scale.domains;
} # end of for loop
} # end of Domains if
ret = genoud(fn, nvars=nvars, max=max, pop.size=pop.size, max.generations=max.generations, wait.generations=wait.generations,
hard.generation.limit=hard.generation.limit, starting.values=starting.values, MemoryMatrix=MemoryMatrix,
Domains=Domains, solution.tolerance=solution.tolerance,
gr=NULL, boundary.enforcement=boundary.enforcement, lexical=lexical, gradient.check=gradient.check, BFGS=BFGS,
data.type.int=data.type.int, hessian=hessian, unif.seed=unif.seed, int.seed=int.seed,
print.level=print.level, share.type=share.type, instance.number=instance.number,
output.path=output.path, output.append=output.append, project.path=project.path,
P1=P1, P2=P2, P3=P3, P4=P4, P5=P5, P6=P6, P7=P7, P8=P8, P9=P9,
cluster=cluster, balance=balance, debug=Debug)
return(ret)
} #end of genoudRob()
|
af1e314bac1a7e258e8a7be091e384df38978086
|
791ad46939643528dcb2b5de5f3bd809366d7a5b
|
/R/ild2.R
|
0f0faf2470d582a610a579bca206cdee571e8cbe
|
[] |
no_license
|
trinhdangmau/lmorph
|
0ae4c175bc38ae6e4f43ec2b251e4c1e7d347461
|
4c2069a9c687e2f90c416eccf5a1d565bca11c4c
|
refs/heads/master
| 2022-07-26T10:09:52.212779
| 2020-05-22T03:44:31
| 2020-05-22T03:44:31
| 266,010,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
ild2.R
|
ild2<-function(M1, M2){sqrt(apply((M1-M2)^2, 1, sum))}
|
3fee6279a301f709827a2711844acc7ed1700c93
|
404b5b7112dee522f61a7e357ff815d8f52a7623
|
/UST_SubIDInvestigation_SETO_v2.R
|
e6997828690e4bce1973c941f940191d2cf7a67a
|
[] |
no_license
|
robmill/UST
|
630af8985e2ef2c35772dc513fd74a041d1dc9e6
|
67ac320bef55618c7963c5dcc9923fdf43032d43
|
refs/heads/master
| 2021-01-20T19:53:02.879625
| 2016-08-08T18:17:19
| 2016-08-08T18:17:19
| 63,387,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,024
|
r
|
UST_SubIDInvestigation_SETO_v2.R
|
library(forecast);
library(fpp);
library(xts)
library(tseries)
library(CosmosToR)
library(zoo)
library(plyr)
library(dplyr)
library(reshape2)
library(lubridate)
library(sqldf)
# Connect to CSDFENG COSMOS VC
#
vc <- vc_connection('https://cosmos11.osdinfra.net/cosmos/CSDFENG/')
SETO_Compute_RAW<-submit_iscope(vc, 'SELECT * FROM (SSTREAM "/my/SubIDInvestigation/SETO/SETO_ComputeUsage.ss"); OUTPUT TO CONSOLE;')
# translate to ts class data
# adf.test()
# use window to generate train date set
# use window to generate test set
# forecast to generate forecast class data
# use accuracy on forecast class data
# minimize AIC
# Modify DateTime variable to correct date format
SETO_Compute<-data.frame(SETO_Compute_RAW,stringsAsFactors = FALSE);
SETO_Compute<-SETO_Compute[SETO_Compute$ClusterType=="COMPUTE",]
SETO_Compute$DateAndTime<-as.Date(SETO_Compute$DateAndTime,format="%m/%d/%Y");
# Subset for FY16
FY16_Start<-as.Date("2015-06-01")
FY16_End<-as.Date("2016-06-30")
SETO_Compute_FY16_daily<-SETO_Compute[SETO_Compute$DateAndTime >= FY16_Start & SETO_Compute$DateAndTime <= FY16_End,]
SETO_Compute_FY16_daily_sorted<-SETO_Compute_FY16_daily[order(SETO_Compute_FY16_daily$DateAndTime),]
write.csv(SETO_Compute_FY16_daily,"c:/temp/SETO_Compute_Daily.csv")
# Aggregate to monthly time grain
# Hadley Wickham
# http://stackoverflow.com/questions/6052631/aggregate-daily-data-to-month-year-in-r
SETO_Compute_FY16_daily_sorted_temp<-SETO_Compute_FY16_daily_sorted
SETO_Compute_FY16_daily_sorted_temp$my<-floor_date(SETO_Compute_FY16_daily_sorted_temp$DateAndTime,"month")
# Compute average and peak monthly usage
SETO_Compute_avg_monthly<-sqldf('select my as DateAndTime,
SubscriptionGUID,
Customer,
GeographyName,
RegionName,
ModelSegment,
ClusterType,
VMType,
VMSize,
VMSeries,
avg(TotalQuantity) as AvgMonthlyQuantity
FROM SETO_Compute_FY16_daily_sorted_temp
GROUP BY my,SubscriptionGUID,Customer,GeographyName,RegionName,ModelSegment,ClusterType,VMType,VMSize')
SETO_Compute_peak_monthly<-sqldf('select my as DateAndTime,
SubscriptionGUID,
Customer,
GeographyName,
RegionName,
ModelSegment,
ClusterType,
VMType,
VMSize,
VMSeries,
max(TotalQuantity) as PeakMonthlyQuantity
FROM SETO_Compute_FY16_daily_sorted_temp
GROUP BY my,SubscriptionGUID,Customer,GeographyName,RegionName,ModelSegment,ClusterType,VMType,VMSize')
SETO_Compute_monthly_Detailed<-sqldf('select my as DateAndTime,
SubscriptionGUID,
Customer,
GeographyName,
RegionName,
ModelSegment,
ClusterType,
VMType,
VMSize,
VMSeries,
avg(TotalQuantity) as AvgMonthlyQuantity,
max(TotalQuantity) as PeakMonthlyQuantity,
sum(TotalQuantity)/30 as Avg30Day
FROM SETO_Compute_FY16_daily_sorted_temp
GROUP BY my,SubscriptionGUID,Customer,GeographyName,RegionName,ModelSegment,ClusterType,VMType,VMSize')
SETO_Compute_monthly_TopLevel<-sqldf('select my as DateAndTime,
SubscriptionGUID,
avg(TotalQuantity) as AvgMonthlyQuantity,
max(TotalQuantity) as PeakMonthlyQuantity,
sum(TotalQuantity) as MonthlyAgg,
sum(TotalQuantity)/30 as Avg30Day
FROM SETO_Compute_FY16_daily_sorted_temp
GROUP BY my,SubscriptionGUID')
# Validate that average value snaps to monthly compute hours found in CSDF Forecast Comparison
April16_Validation_TopLevel<-sum(SETO_Compute_monthly_TopLevel$Avg30Day[SETO_Compute_monthly_TopLevel$DateAndTime=="2016-04-01"]);
April16_Validation_Detailed<-sum(SETO_Compute_monthly_Detailed$Avg30Day[SETO_Compute_monthly_Detailed$DateAndTime=="2016-04-01"]);
SETO_Compute_monthly_TotalValidation<-sqldf('select my as DateAndTime,
avg(TotalQuantity) as AvgMonthlyQuantity,
max(TotalQuantity) as PeakMonthlyQuantity,
sum(TotalQuantity) as MonthlyAgg,
sum(TotalQuantity)/30 as Avg30Day
FROM SETO_Compute_FY16_daily_sorted_temp
GROUP BY my')
count_SubIDs<-unique(SETO_Compute_monthly_Detailed$SubscriptionGUID);
# Write SETO Monthly Compute Detailed and TopLevel to CSV
write.csv(SETO_Compute_monthly_TopLevel,"c:/temp/SETO_SubID_InvestigationCOMPUTE_TopLevel.csv")
write.csv(SETO_Compute_monthly_Detailed,"c:/temp/SETO_SubID_InvestigationCOMPUTE_Detailed.csv")
SETO_SubID_List<-data.frame(unique(SETO_Compute_monthly_TopLevel$SubscriptionGUID))
plot(SETO_Compute_monthly_TopLevel[SETO_Compute_monthly_TopLevel$SubscriptionGUID==SETO_SubID_List[3,],]$AvgMonthlyQuantity, type="l")
SETO_Compute_monthly_TopLevel_BoxPlot<-SETO_Compute_monthly_TopLevel;
SETO_Compute_monthly_TopLevel_BoxPlot$DateAndTime<-as.yearmon(SETO_Compute_monthly_TopLevel_BoxPlot$DateAndTime);
boxplot(Avg30Day ~ DateAndTime, data=SETO_Compute_monthly_TopLevel_BoxPlot, xlab="Month", ylab="Azure Compute Units");
# validate against Excel results
test<-sqldf('SELECT
DateAndTime,
Customer,
GeographyName,
RegionName,
ModelSegment,
ClusterType,
VMType,
VMSize,
VMSeries,
AvgMonthlyQuantity,
SubscriptionGUID
FROM SETO_Compute_avg_monthly
WHERE SubscriptionGUID=="002b06d5-140b-4518-ab8c-67bb8e174d68"')
#
#
#headSETOust_compute<-read.csv("C:/UST_Usage/UstOnlyComputeUsage.ss.csv")
#msn_compute<-read.csv("C:/UST_Usage/MSN_Template_ComputeUsage.ss.csv")
#ust_compute$DateAndTime<-as.Date(ust_compute$DateAndTime,format="%m/%d/%Y")
#msn_compute$DateAndTime<-as.Date(msn_compute$DateAndTime,format="%m/%d/%Y");
#yr<-strftime(ust_compute$DateAndTime,"%Y")
#mo<-strftime(ust_compute$DateAndTime,"%m")
#
#msn_275_compute<-read.csv("C:/UST_Usage/MSNComputeUsage.ss.csv");
#nrow(msn_275_compute)
#msn_275_compute$DateAndTime<-as.Date(msn_275_compute$DateAndTime,format="%m/%d/%Y");
#msn_275_compute<-msn_275_compute[msn_275_compute$ClusterType=="COMPUTE",]
#msn_275_compute<-msn_275_compute[msn_275_compute$Customer=="MSN",]
#nrow(msn_275_compute)
#msn_275_compute<-msn_275_compute[grep("D",msn_275_compute$VMSize),]$TotalQuantity
#msn_275_compute[grep("D",msn_275_compute$VMSize),]$TotalQuantity<-msn_275_compute[grep("D",msn_275_compute$VMSize),]$TotalQuantity/1.625
#msn_275_daily<-aggregate(msn_275_compute$TotalQuantity,by=list(msn_275_compute$DateAndTime),FUN=sum)
#names(msn_275_daily)<-c("Date","ACU")
#plot(msn_275_daily,type="l", main="MSN Azure Compute Daily Usage - Filtered",ylab="Cores")
#lines
#msn_compute<-msn_compute[msn_compute$VMSize!="UNKNOWN",]
#msn_compute<-msn_compute[msn_compute$Customer=="MSN",]
#msn_compute<-msn_compute[msn_compute$ClusterType=="COMPUTE",]
#msn_compute[grep("D",msn_compute$VMSize),]$TotalQuantity<-msn_compute[grep("D",msn_compute$VMSize),]$TotalQuantity/1.625
#msn_daily<-aggregate(msn_compute$TotalQuantity,by=list(msn_compute$DateAndTime),FUN=sum)
#names(msn_daily)<-c("Date","ACU")
#plot(msn_daily, type="l")
#max(msn_daily$ACU)
#msn_275_region<-unique(msn_275_compute$RegionName)
#msn_275_region<-as.data.frame(msn_275_region)
#names(msn_275_region)<-c("RegionName")
#msn_275_region<-sort(msn_275_region$RegionName)
#msn_275_region<-as.data.frame(msn_275_region)
#names(msn_275_region)<-c("RegionName")
#msn_275_daily_region<-aggregate(msn_275_compute$TotalQuantity,by=list(msn_275_compute$DateAndTime,msn_275_compute$RegionName),FUN=sum)
#names(msn_275_daily_region)<-c("Date","REGION","ACU")
#plot(msn_275_daily_region["REGION"=="APAC EAST",]$ACU,type="l", main="MSN Azure Compute Daily Usage - 275 SubIDs")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="APAC EAST",]$ACU,type="l",ylab="ACU",xlab="Date",main="APAC EAST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="APAC SOUTHEAST",]$ACU,type="l",ylab="ACU",xlab="Date",main="APAC SOUTHEAST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="CHINA EAST",]$ACU,type="l",ylab="ACU",xlab="Date",main="CHINA EAST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="CHINA NORTH",]$ACU,type="l",ylab="ACU",xlab="Date",main="CHINA NORTH")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="EUROPE NORTH",]$ACU,type="l",ylab="ACU",xlab="Date",main="EUROPE NORTH")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="EUROPE WEST",]$ACU,type="l",ylab="ACU",xlab="Date",main="EUROPE WEST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="JAPAN EAST",]$ACU,type="l",ylab="ACU",xlab="Date",main="JAPAN EAST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US CENTRAL",]$ACU,type="l",ylab="ACU",xlab="Date",main="US CENTRAL")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US EAST",]$ACU,type="l",ylab="ACU",xlab="Date",main="US EAST")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US EAST 2",]$ACU,type="l",ylab="ACU",xlab="Date",main="US EAST 2")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US NORTH CENTRAL",]$ACU,type="l",ylab="ACU",xlab="Date",main="US NORTH CENTRAL")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US SOUTH CENTRAL",]$ACU,type="l",ylab="ACU",xlab="Date",main="US SOUTH CENTRAL")
#plot(msn_275_daily_region[msn_275_daily_region$REGION=="US WEST",]$ACU,type="l",ylab="ACU",xlab="Date",main="US WEST")
#US NORTH CENTRAL
#yr_
#write.csv(msn_275_daily_region,"c:/UST_Usage/msn_regionView.csv")
#yr<-unique(yr)
#mo<-unique(mo)
#ust_compute_ts<-ust_compute[,c("DateAndTime","TotalQuantity")]
#ust_daily<-aggregate(ust_compute$TotalQuantity,by=list(ust_compute$DateAndTime),FUN=sum)
#names(ust_daily)<-c("Date","ACU")
#plot(ust_daily,type="l")
#msn_daily<-aggregate(msn_compute$TotalQuantity,by=list(msn_compute$DateAndTime),FUN=sum)
#names(msn_daily)<-c("Date","ACU")
#ust_yearmon<-as.yearmon(ust_compute_ts$DateAndTime)
#ust_monthly<-aggregate(ust_compute$TotalQuantity,by=list(ust_yearmon),FUN=sum)
#test$Group.1<-as.Date(test$Group.1)
#plot(test[1:nrow(test)-1,])
#daily_arima<-auto.arima(daily$x)
#plot(forecast(daily_arima,h=100),type="l")
#daily_ets<-ets(daily$x)
#plot(forecast(daily_ets,h=730))
#plot(forecast(daily_arima,h=730),type="l")
#daily_arima_forecast<-forecast(daily_arima,h=730)
#daily_window<-window(daily,)
#ust_monthly_arima<-auto.arima(ust_monthly[5:40,]$x)
#forecast(ust_monthly_arima)
#forecast(ust_monthly_arima)
#ust_monthly_arima<-auto.arima(ust_monthly[10:30,]$x)
#forecast(ust_monthly_arima)
#plot(forecast(ust_monthly_arima,h=24))
#plots
plot(ust_daily,type="l",main="Universal Store Azure Compute Daily Usage")
plot(ust_daily,type="l",main="Universal Store Azure Compute Daily Usage",col="dark blue")
#Auto.Arima
ust_daily_arima<-auto.arima(ust_daily[200:600,]$ACU)
ust_daily$Month<-cut(ust_daily$Date,breaks = "month")
plot(msn_daily, type="l", main="MSN Azure Compute Daily Usage")
msn_region<-unique(msn_compute$RegionName)
msn_region<-as.data.frame(msn_region)
names(msn_region)<-c("RegionName")
msn_region<-sort(msn_region$RegionName)
msn_region<-as.data.frame(msn_region)
names(msn_region)<-c("RegionName")
msn_275_region<-unique(msn_275_compute$RegionName)
msn_275_region<-as.data.frame(msn_275_region)
names(msn_275_region)<-c("RegionName")
msn_275_region<-sort(msn_275_region$RegionName)
msn_275_region<-as.data.frame(msn_275_region)
names(msn_275_region)<-c("RegionName")
ust_region<-unique(ust_compute$RegionName)
ust_region<-as.data.frame(ust_region)
names(ust_region)<-c("RegionName")
ust_region<-sort(ust_region$RegionName)
ust_region<-as.data.frame(ust_region)
names(ust_region)<-c("RegionName")
msn_daily_<-aggregate(msn_compute$TotalQuantity,by=list(msn_compute$DateAndTime),FUN=sum)
names(msn_daily)<-c("Date","ACU")
|
516b74f2e817ebcc367fdb53334f7c70ac960508
|
777015d3dd7f5a2524ca376ed1c59be3c093428b
|
/SPID-function.R
|
d956361d01305b85e0dd63b48e0889c09480a451
|
[] |
no_license
|
sshonosuke/SPID
|
843caf14a5352d7645fb04767b08c70f6f0fb0b1
|
fb9feb045ee3662f0d2df7c9756e4c0288ec9c62
|
refs/heads/master
| 2021-06-24T05:43:30.589511
| 2021-03-29T07:44:26
| 2021-03-29T07:44:26
| 211,786,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,532
|
r
|
SPID-function.R
|
library(MASS)
library(MCMCpack)
library(SparseM)
library(statmod)
####### PWD: Pair-wise Difference Prior #######
# W is (non-scaled) adjacent matrix
PWD=function(Data,Z,W,mcmc=12000,burn=2000,family="LN",print=F){
m=dim(Data)[1]
N=length(Z)-1
# Distributions
if(family=="LN"){
p=2
Dist=function(x,U){
mm=U[1]; ss=sqrt(exp(U[2]))
plnorm(x,mm,ss)
}
}
if(family=="SM"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; 1-(1+y)^(-c)
}
}
if(family=="DG"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; (1+1/y)^(-c)
}
}
# log-likelihood
LogLike=function(data,U){
val=0
for(k in 1:N){
dd=Dist(Z[k+1],U)-Dist(Z[k],U)
dd[dd==0]=10^(-10)
val=val+(data[k])*log(dd)
}
return(val)
}
# Gaissian approximation of likelihood
Ut=matrix(NA,m,p); P=array(NA,c(m,p,p))
for(i in 1:m){
opt=function(u){ -LogLike(Data[i,],u) }
ml=optim(par=rep(0,p),fn=opt,hessian=T)
Ut[i,]=ml$par
P[i,,]=ml$hessian
if(min(eigen(P[i,,])$values)<0){
P[i,,]=diag(diag(P[i,,]))
diag(P[i,,])[diag(P[i,,])<0]=0.01
}
}
# prior
b0=1; c0=1 # gamma priors for random effect precision and spatial precision
a0=0.001 # precision parameter in priors for grand means
# initial values
U.pos=array(NA,c(mcmc,m,p))
Mu.pos=matrix(NA,mcmc,p)
Tau.pos=matrix(NA,mcmc,p)
Lam.pos=matrix(NA,mcmc,p)
U=Ut
Mu=rep(0,p)
Tau=rep(10,p)
Lam=rep(10,p)
# MCMC iterations
W=as(W,"sparseMatrix")
rW=apply(W,1,sum)
WW=as(diag(rW)-W,"sparseMatrix")
Q=function(tau,lam){ tau*diag(m)+lam*WW }
tr=function(x){ sum(diag(x)) }
for(r in 1:mcmc){
# update mu
mm=Tau*apply(U,2,sum)/(m*Tau+a0)
ss=1/(m*Tau+a0)
Mu=rnorm(p,mm,sqrt(ss))
Mu.pos[r,]=Mu
resid=t(t(U)-Mu)
WD=c()
for(l in 1:p){
U.mat=matrix(rep(U[,l],m),m,m)
Dif=(U.mat-t(U.mat))^2
WD[l]=sum(W*Dif)/2
}
# update precision parameters (Langevin)
for(l in 1:p){
h=0.1
cu=c(Tau[l],Lam[l])
Q1=Q(cu[1],cu[2])
invQ1=solve(Q1)
aa=sum(resid[,l]^2); bb=WD[l]
dU1=c()
dU1[1]=-0.5*tr(invQ1)+0.5*aa
dU1[2]=-0.5*tr(invQ1%*%WW)+0.5*bb
prop=cu-h*dU1+sqrt(2*h)*rnorm(2)
prop[prop<0.01]=0.01
Q2=Q(prop[1],prop[2])
invQ2=solve(Q2)
val1=chol(as(Q1,"matrix.csr"))@log.det-0.5*cu[1]*aa-0.5*cu[2]*bb
val2=chol(as(Q2,"matrix.csr"))@log.det-0.5*prop[1]*aa-0.5*prop[2]*bb
dU2=c()
dU2[1]=-0.5*tr(invQ2)+0.5*aa
dU2[2]=-0.5*tr(invQ2%*%WW)+0.5*bb
q2=-sum((prop-cu+h*dU1)^2)/(4*h)
q1=-sum((cu-prop+h*dU2)^2)/(4*h)
prob=min(1,exp(val2-q2-(val1-q1)))
ch=rbinom(1,1,prob)
Tau[l]=cu[1]+ch*(prop[1]-cu[1])
Lam[l]=cu[2]+ch*(prop[2]-cu[2])
}
Tau.pos[r,]=Tau
Lam.pos[r,]=Lam
# update U (independent MH)
for(i in 1:m){
pp1=U[i,]
ai=Lam*apply(W[i,]*U,2,sum)
bi=P[i,,]%*%Ut[i,]
ci=Tau*Mu
mm=ai+bi+ci # aproximated mean
vv=solve(P[i,,]+diag(Tau+Lam*rW[i])+diag(rep(0.0001,p))) # approximated covariance
pp2=mvrnorm(1,vv%*%mm,vv) # proposal
resid1=pp1-Ut[i,]
L1=LogLike(Data[i,],pp1)+0.5*as.vector(t(resid1)%*%P[i,,]%*%resid1)
resid2=pp2-Ut[i,]
L2=LogLike(Data[i,],pp2)+0.5*as.vector(t(resid2)%*%P[i,,]%*%resid2)
prob=min(1,exp(L2-L1))
U[i,]=pp1+rbinom(1,1,prob)*(pp2-pp1)
}
U.pos[r,,]=U
if(print==T & round(r/100)==r/100){ print(r) }
}
om=1:burn
Res=list(U.pos[-om,,],Mu.pos[-om,],Tau.pos[-om,],Lam.pos[-om,],Ut,P)
names(Res)=c("U","Mu","Tau","Lam","ML","Hessian")
return(Res)
}
###### PWL: Pair-wise Difference Laplace Prior ######
# W is (non-scaled) adjacent matrix
PWL=function(Data,Z,W,mcmc=12000,burn=2000,family="LN",Rn=10,print=F){
m=dim(Data)[1]
N=length(Z)-1
if(family=="LN"){
p=2
Dist=function(x,U){
mm=U[1]; ss=sqrt(exp(U[2]))
plnorm(x,mm,ss)
}
}
if(family=="SM"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; 1-(1+y)^(-c)
}
}
if(family=="DG"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; (1+1/y)^(-c)
}
}
# log-likelihood
LogLike=function(data,U){
val=0
for(k in 1:N){
dd=Dist(Z[k+1],U)-Dist(Z[k],U)
dd[dd==0]=10^(-10)
val=val+(data[k])*log(dd)
}
return(val)
}
# Gaissian approximation of likelihood
Ut=matrix(NA,m,p); P=array(NA,c(m,p,p))
for(i in 1:m){
opt=function(u){ -LogLike(Data[i,],u) }
ml=optim(par=rep(0,p),fn=opt,hessian=T)
Ut[i,]=ml$par
P[i,,]=ml$hessian
if(min(eigen(P[i,,])$values)<0){
P[i,,]=diag(diag(P[i,,]))
diag(P[i,,])[diag(P[i,,])<0]=0.01
}
}
# prior
b0=1; c0=1 # gamma prior for random effect precision and spatial precision
a0=0.001 # precision parameter for grand mean
# initial values
U.pos=array(NA,c(mcmc,m,p))
Mu.pos=matrix(NA,mcmc,p)
Tau.pos=matrix(NA,mcmc,p)
Lam.pos=c()
U=Ut
Mu=rep(0,p)
Tau=rep(0.5*mean(P),p)
Lam=0.5*mean(P)
uL=2*mean(P)
# MCMC iterations
W=as(W,"sparseMatrix")
S=as(matrix(1/mean(P),m,m)*W,"sparseMatrix")
Q=function(tau,SS){
sW=W/SS; sW[is.na(sW)]=0
rW=apply(sW,1,sum)
tau*diag(m)+as(diag(rW)-sW,"sparseMatrix")
}
tr=function(x){ sum(diag(x)) }
delta=sum(W)/2
# MCMC
for(r in 1:mcmc){
# update grand mean
mm=Tau*apply(U,2,sum)/(m*Tau+a0)
ss=1/(m*Tau+a0)
Mu=rnorm(p,mm,sqrt(ss))
Mu.pos[r,]=Mu
resid=t(t(U)-Mu)
aa=apply(resid^2,2,sum)
Dif=0
for(l in 1:p){
U.mat=matrix(rep(U[,l],m),m,m)
Dif=Dif+W*(U.mat-t(U.mat))^2
}
bb=sum(sqrt(Dif))/2
# update precision parameters
band=0.5
prop.lam=Lam+band*rnorm(1)
prop.tau=Tau+band*rnorm(p)
prop.lam[prop.lam<0.01]=0.01
prop.tau[prop.tau<0.01]=0.01
prop.lam[prop.lam>uL]=uL
prop.tau[prop.tau>uL]=uL
LD1=c(); LD2=c()
for(k in 1:Rn){
rS1=matrix(0,m,m); rS2=matrix(0,m,m)
for(i in 1:m){
rS1[i,1:i]=rgamma(i,1,0.5*prop.lam^2)
rS2[i,1:i]=rgamma(i,1,0.5*Lam^2)
}
rS1=rS1*W; rS2=rS2*W
SS1=rS1+t(rS1); SS2=rS2+t(rS2)
val1=c(); val2=c()
for(l in 1:p){
Q1=Q(prop.tau[l],SS1)
val1[l]=-chol(as(Q1,"matrix.csr"))@log.det
Q2=Q(Tau[l],SS2)
val2[l]=-chol(as(Q2,"matrix.csr"))@log.det
}
LD1[k]=sum(val1)-sum(log(attr(SS1,"x")))/4-delta*log(prop.lam)
LD2[k]=sum(val2)-sum(log(attr(SS2,"x")))/4-delta*log(Lam)
}
mv1=max(LD1); mv2=max(LD2)
dd1=mv1+log(sum(exp((LD1-mv1))))
dd2=mv2+log(sum(exp((LD2-mv2))))
val1=-dd1-0.5*sum(aa*prop.tau)-prop.lam*bb
val2=-dd2-0.5*sum(aa*Tau)-Lam*bb
prob=min(1,exp(val1-val2))
ch=rbinom(1,1,prob)
Tau=Tau+ch*(prop.tau-Tau)
Lam=Lam+ch*(prop.lam-Lam)
Tau.pos[r,]=Tau
Lam.pos[r]=Lam
# update S
newS=matrix(0,m,m)
for(i in 1:m){
newS[i,(1:i)]=1/rinvgauss(i,sqrt(Lam^2/Dif[i,1:i]),Lam^2)
}
newS=newS*W
S=(newS+t(newS))
# update U
for(i in 1:m){
pp1=U[i,]
Si=diag(Tau+sum(1/S[i,W[i,]>0]))
Ai=U/S[i,]; Ai[abs(Ai)==Inf]=0
ai=apply(W[i,]*Ai,2,sum)
bi=P[i,,]%*%Ut[i,]
ci=Tau*Mu
mm=ai+bi+ci # aproximated mean
vv=solve(P[i,,]+Si+diag(rep(0.0001,p))) # approximated covariance
pp2=mvrnorm(1,vv%*%mm,vv) # proposal
resid1=pp1-Ut[i,]
L1=LogLike(Data[i,],pp1)+0.5*as.vector(t(resid1)%*%P[i,,]%*%resid1)
resid2=pp2-Ut[i,]
L2=LogLike(Data[i,],pp2)+0.5*as.vector(t(resid2)%*%P[i,,]%*%resid2)
prob=min(1,exp(L2-L1))
U[i,]=pp1+rbinom(1,1,prob)*(pp2-pp1)
}
U.pos[r,,]=U
if(print & round(r/100)==r/100){ print(r) }
}
om=1:burn
Res=list(U.pos[-om,,],Mu.pos[-om,],Tau.pos[-om,],Lam.pos[-om],Ut,P)
names(Res)=c("U","Mu","Tau","Lam","ML","Hessian")
return(Res)
}
####### Independent Random Effect Model #######
IRE=function(Data,Z,mcmc=12000,burn=2000,family="LN",print=F){
m=dim(Data)[1]
N=length(Z)-1
if(family=="LN"){
p=2
Dist=function(x,U){
mm=U[1]; ss=sqrt(exp(U[2]))
plnorm(x,mm,ss)
}
}
if(family=="SM"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; 1-(1+y)^(-c)
}
}
if(family=="DG"){
p=3
Dist=function(x,U){
a=exp(U[1]); b=exp(U[2]); c=exp(U[3])
y=(x/b)^a; (1+1/y)^(-c)
}
}
# log-likelihood
LogLike=function(data,U){
val=0
for(k in 1:N){
dd=Dist(Z[k+1],U)-Dist(Z[k],U)
dd[dd==0]=10^(-10)
val=val+(data[k])*log(dd)
}
return(val)
}
# Gaissian approximation of likelihood
Ut=matrix(NA,m,p); P=array(NA,c(m,p,p))
for(i in 1:m){
opt=function(u){ -LogLike(Data[i,],u) }
ml=optim(par=rep(0,p),fn=opt,hessian=T)
Ut[i,]=ml$par
P[i,,]=ml$hessian
if(min(eigen(P[i,,])$values)<0){
P[i,,]=diag(diag(P[i,,]))
diag(P[i,,])[diag(P[i,,])<0]=0.01
}
}
# prior
b0=1; c0=1 # gamma prior for random effect precision
a0=0.001 # precision parameter for grand mean
# initial values
U.pos=array(NA,c(mcmc,m,p))
Mu.pos=matrix(NA,mcmc,p)
Tau.pos=matrix(NA,mcmc,p)
U=Ut
M=rep(0,p)
Tau=rep(10,p)
# MCMC iterations
for(r in 1:mcmc){
# update mu
mm=Tau*apply(U,2,sum)/(m*Tau+a0)
ss=1/(m*Tau+a0)
Mu=rnorm(p,mm,sqrt(ss))
Mu.pos[r,]=Mu
# update tau
resid=t(t(U)-Mu)
sq=apply(resid^2,2,sum)
Tau=rgamma(p,m/2+b0,sq/2+c0)
Tau.pos[r,]=Tau
# update U
for(i in 1:m){
pp1=U[i,]
bi=as.vector(P[i,,]%*%Ut[i,])
ci=Tau*Mu
mm=bi+ci # aproximated mean
vv=solve(P[i,,]+diag(Tau+rep(0.0001,p))) # approximated covariance
pp2=mvrnorm(1,vv%*%mm,vv) # proposal
resid1=pp1-Ut[i,]
L1=LogLike(Data[i,],pp1)+0.5*as.vector(t(resid1)%*%P[i,,]%*%resid1)
resid2=pp2-Ut[i,]
L2=LogLike(Data[i,],pp2)+0.5*as.vector(t(resid2)%*%P[i,,]%*%resid2)
prob=min(1,exp(L2-L1))
U[i,]=pp1+rbinom(1,1,prob)*(pp2-pp1)
}
U.pos[r,,]=U
if(print==T & round(r/100)==r/100){ print(r) }
}
om=1:burn
Res=list(U.pos[-om,,],Mu.pos[-om,],Tau.pos[-om,],Ut,P)
names(Res)=c("U","Mu","Tau","ML","Hessian")
return(Res)
}
|
e5c1c0b12ee501b8f8288c9c809134fc93d4a446
|
6277f517285925aaa02843655a7788c6fd495963
|
/week1/summerR_w1.R
|
23e0f364d45b8fe35aaef8b54e064f2cdd72ef62
|
[] |
no_license
|
ayatsko/summerR
|
2033c8e9fbcb7f6bec1d78d04f79353fd06e00f0
|
37ecbf8a12a53a5923b4a30f79ba2b5d6a4b99d7
|
refs/heads/main
| 2023-06-20T03:31:04.038810
| 2021-07-12T20:37:53
| 2021-07-12T20:37:53
| 376,075,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,673
|
r
|
summerR_w1.R
|
## INTRODUCTION ----
# welcome to your first R script! this is where you can write code to run as well as notes
# to record your thoughts / directions for whatever you're coding. some basics: using the
# '#' indicates non-executable code, i.e. notes to yourself. using the 'hashtag' on a new
# line will be your friend as you are annotating an R script
# if you want to write a piece of code, leave out the '#'. you'll notice that the text
# color changes. since R is basically a fancy calculator, let's try code for simple math:
1+2
3*3
1277590030/900009
# above are three executable lines of code. to 'run' this code, navigate your cursor to the
# line you wish to run and click the 'run' button in the top right corner of this panel.
# alternatively, if using a mac, you can use the shortcut 'command + return' to run a line
# give it a go on the above lines 10-12!
# you can also create variables using '=' or '<-'
X<-2
X=2
X
# and then you can do simple math on the variables you define, as follows:
a <- 10
b <- 2
a*b
b/a
a^b
sin(a)
(sqrt(b))/(b*a)
# better yet, we aren't limited to just numbers! the notation is a bit different...
c <- c("cat")
# obviously we can't do math on these 'character strings' because, for instance, (cat*dog)
# is not well defined and we also can't learn much from it. but in our data, it can be useful
# to have characters, especially if we are using descriptive sample IDs (i.e., sample1)
# R is much more than a fancy calculator. you can write functions, assign variables to
# insert into said functions, input data, build graphs, do statistical analyses, so on and
# so forth. there are a million applications and the more you work with R, the more you
# realize how much there is to learn!
## THE WORKSPACE ----
# there is a lot that you can do with R, tinkering around with the basic calculator functions
# or building your own variables. but for ecology, one of the prime uses of R is to bring
# in your data such that you can analyze and visualize it. that's what we will focus most of
# our energy towards next
# when working in R, it is best to start off by setting up and defining your workspace,
# especially when you are bringing in your own data files
# the first step in this is to set a working directory, or where you will calling on your
# files or datasheets in order to bring them into the R program. a single argument will
# get you set up, as follows:
setwd("/Users/abbeyyatsko/Desktop/week1")
# make sure you put your working directory in quotes such that R recognizes it!
# this basically tells R that this is where you want it to look to retrieve data or where
# to place and locally store output data / figures
# this step can be different for everyone as you have to make sure that your pathname
# correctly navigates to where your week1 folder for summerR is stored locally
# next, let's do some housecleaning to set up the workspace:
# this line clears your global environment (top right panel), basically ensuring that you begin
# with a clean slate as you bring in your data and functions:
rm(list=ls())
# another part of setting up the workspace is installing and loading relevant 'packages' for
# your script. these packages are basically extensions of R that cater to a specific purpose,
# such as graphing or statistical packages. we will get more into these with specific applications,
# but I like to set them up in my R script early so that they are neatly organized and all
# loaded properly before I get going!
# here's an example package called 'ggplot2', which is used for generating graphs and very polished
# data viz. first we have to install the package, and then once it is installed, we call on it
# in the 'library' to make sure it is loaded and ready to go
install.packages("ggplot2")
library(ggplot2)
# now, let's bring in some data. say you have a data set in excel that you want to do some basic
# stats on / exploration of content. since we already set our working directory in the week1
# folder, R knows to look there for the file that we want to bring in
# let's explore the doggies.csv data set! to do this, we need to 'read in' the file
read.csv("doggies.csv")
# let's put this dataset into an object so that we can look at it easier:
doggies <- read.csv("doggies.csv")
# note that .csv is easiest to work with / the convention for dataframes inputted to R. you can
# also use .txt and .xls but it's kinda a hassel and its best to just get in the habit of
# working with .csv
# THE DATA ----
# there are many different options for (pre)viewing your data, give em a try:
View(doggies)
head(doggies)
tail(doggies)
colnames(doggies)
length(doggies)
length(doggies$owner)
# you can also use indexing to identify a specific value (or range of values) within your
# data frame:
# what is the entry found in row 1, column 2?
doggies[1,2]
# what are all of the entries in the second column (breed)?
doggies[,2]
# to call on a specific column, we can use the '$' to link the dataframe to the column of
# interest. here's an example, where we look to find the average age of all of the dogs:
mean(doggies$age_yr)
# we can also do this for other descriptive statistics:
max(doggies$age_yr)
min(doggies$age_yr)
range(doggies$age_yr)
sd(doggies$age_yr)
# try this one out:
max(doggies$owner)
# but wait, does that make sense? what would the maximum value be for a character response?
# another thing that we can use R for is to understand the structure of the data. how are
# different entries being understood by R - are they numbers, values, characters...? let's
# as R about structure using the str() command
str(doggies)
# we see that there are characters, intergers, and numerical values in this dataframe. we can
# transition the class of some of these variables if need be:
doggies$weight_lb <- as.numeric(doggies$weight_lb)
str(doggies)
# what changed?
# we can also add or rename columns in our dataframe. to rename a column:
colnames(doggies)[5] <-"treat"
View(doggies)
# to add a column:
doggies$species <- "Canis lupus familiaris"
View(doggies)
# we can also take a peek at what this data looks by using graphical visualization
# run these lines to get a deeper look into what the data shapes up to be
# distribution of dog ages
boxplot(doggies$age_yr)
# distribution of dog age based on females v. male owners
boxplot(doggies$age_yr~doggies$owner,col=heat.colors(2))
# see what the dog's treat preferences are
counts <- table(doggies$treat)
barplot(counts, xlab="preferred treat")
# see what the dog's treat preferences are by owner
counts <- table(doggies$owner, doggies$treat)
barplot(counts, xlab="preferred treat", legend = rownames(counts))
# doing more with boxplots to look at data distribution and summary statistics
boxplot(weight_lb ~ exercise, data = doggies, frame = FALSE)
# much more can be done with graphing outside of 'base R' functions (what we have
# been using above). for example, this is where the ggplot2 package can come in!
# explore the doggies dataset a bit more. see what the average tail length is,
# or make a graph showing the distribution of what states the dogs are from.
# have fun with it!
# since we added a 'species' column to our dataframe in R, we might want to
# export the dataframe now that it is updated with new information. to do this:
write.csv(doggies,"/Users/abbeyyatsko/Desktop/week1/dogggies_new.csv", row.names = FALSE)
# you can also save images that you generated in the viewer panel (bottom right)
# by exporting them as images or pdf files
# in R, you can always continually build your coding skills - more next time!
|
e014e8443ed637488265a2adfe89456859d761da
|
30d21f7163d8a0382e05566a48c0b042dc96ced6
|
/Hydro Functions/Joint_Prob.R
|
493c1e95a41028a0dcc93592d2a3818b29cce5ce
|
[] |
no_license
|
mchernos/R-Hydro-Library
|
123ad54592aa5e9b1fbb17d5dbe9a5e609c6b3d7
|
402c333918fb71736fc269667f4094f24d5881c2
|
refs/heads/master
| 2020-05-21T13:35:17.496809
| 2018-02-20T16:32:59
| 2018-02-20T16:32:59
| 45,942,745
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,929
|
r
|
Joint_Prob.R
|
# JOINT PROBABILITIES
rm(list = ls())
# Mid- Sites (Lethbridge, Calgary, Drumheller)
read.water.data = function(x, name = 'MaxQ'){
data = read.csv(paste('annualextremes/',x,'_AnnualExtremes.csv', sep = ''))
data = data[data$PARAM == 1 & is.na(data$MAX) == F,] # remove level data (param = 2)
data = data.frame(data$Year, data$MAX)
colnames(data) = c('year', name)
data
}
regplot = function(x,y, labelname, vartitle = 'Maximum Annual Daily Flow (cms)', ylab = 'Farm', xlab = 'YXC'){
plot(x,y,main = vartitle, ylab = ylab, xlab = xlab, pch = 21, bg = rgb(0,0,0,0.6))
# abline(0,1, col = 'grey')
fit = lm(y~x)
abline(fit, col = 'red')
mtext(paste('r.squared = ', round(summary(fit)$r.squared,4)), cex = 0.6)
text(x,y, labelname)
}
Prob3 = function(Pa,Pb,Pc,corx){
# Joint probability for all 3 correlated variables
# uses Chain Rule for Probability
# P{abc} = P{c|ab}*P{a|b}*P{a};
# where P{c|ab} = P{a|c}*P{b|c}
rab = corx[1,2] # correlation matrix
rac = corx[1,3]
rbc = corx[2,3]
Pa_b = Pa + rab*sqrt((Pa/Pb)*(1-Pa)*(1-Pb))
Pa_c = Pa + rac*sqrt((Pa/Pc)*(1-Pa)*(1-Pc))
Pb_c = Pb + rbc*sqrt((Pb/Pc)*(1-Pb)*(1-Pc))
Pc_ab = Pa_c*Pb_c
Pabc = Pc_ab * Pa_b * Pa
Pabc
}
########################
# PRAIRIE RIVER GAUGES #
########################
# Read in data
leth = read.water.data('oldmanlethbridge', 'lethbridge')
yyc = read.water.data('bowcalgary', 'calgary')
drum = read.water.data('reddeerdrumheller', 'drumheller')
water = merge(leth, merge(yyc, drum, by = 'year', all = T), by = 'year', all = T)
# par(mfrow = c(1,3))
# regplot(water$lethbridge, water$calgary, water$year, ylab = 'Lethbridge', xlab = 'Calgary')
# regplot(water$lethbridge, water$drumheller,water$year, ylab = 'Lethbridge', xlab = 'Drumheller')
# regplot(water$calgary, water$drumheller, water$year, ylab = 'Calgary', xlab = 'Drumheller')
corx = cor(water[colnames(water)[-1]], use = "pairwise.complete.obs", method = 'spearman')
Prob3(0.261,0.261,0.183,corx)
Prob3(0.279,0.274,0.189,corx)
##########################
# HEADWATER RIVER GAUGES #
##########################
# Read in data
wald = read.water.data('oldmanwaldrons', 'waldrons')
banff = read.water.data('bowbanff', 'banff')
burnt = read.water.data('reddeerburnttimber', 'burnttimber')
headwater = merge(wald, merge(banff, burnt, by = 'year', all = T), by = 'year', all = T)
# # par(mfrow = c(1,3))
# regplot(headwater$wald, headwater$banff, headwater$year, ylab = 'Waldron\'s Corner', xlab = 'Banff')
# regplot(headwater$wald, headwater$burnt, headwater$year, ylab = 'Waldron\'s Corner', xlab = 'Burnt Timber')
# regplot(headwater$banff, headwater$burnt, headwater$year, ylab = 'Banff', xlab = 'Burnt Timber')
corx2 = cor(headwater[colnames(headwater)[-1]], use = "pairwise.complete.obs", method = 'spearman')
Prob3(0.218, 0.204, 0.278, corx2)
Prob3(0.216,0.239,0.269, corx2)
# http://www.real-statistics.com/correlation/multiple-correlation/
# use to get the multiple r
# whats the probability for 100 yr events in all 3 rivers?
P = 1/100 # 100 yr event
###########################
# CONFLUENCE RIVER GAUGES #
###########################
mouth = read.water.data('oldmanmouth', 'mouth')
bass = read.water.data('bowbassano', 'bassano')
bind = read.water.data('reddeerbindloss', 'bindloss')
headwater = merge(mouth, merge(bass, bind, by = 'year', all = T), by = 'year', all = T)
corx3 = cor(headwater[colnames(headwater)[-1]], use = "pairwise.complete.obs", method = 'spearman')
Prob3(0.239,0.28,0.214, corx3)
Prob3(0.253,0.274, 0.244, corx3)
corx3
# Conditional Probability Graph
Tp = seq(1,200,0.01)
P = 1/Tp
prairie = Prob3(P,P,P,corx)
headwaters = Prob3(P,P,P,corx2)
confl = Prob3(P,P,P,corx3)
x = log(log(Tp))
plot(x, prairie,type = 'l', xaxt = 'n', col = 'navy',
ylab = 'Conditional Probaility of Exceedance', xlab = 'Return Period (yrs)')
lines(x, headwaters, col = 'darkgreen')
lines(x, confl, col = 'brown')
name = c(1.01,1.1,2,5,10,20,50,100,200)
axis(1,at = log(log(name)), labels = as.character(name))
abline(v = log(log(name)), lty = 3, col = rgb(0,0,0,.5))
abline(h = seq(0.2,1,0.2), lty = 3,col = rgb(0,0,0,.5))
legend('topright', c('Headwaters', 'Mid-Plains/Prairie', 'Confluence'),
col = c('darkgreen','navy','brown'), lwd = 2, bg = 'white')
title('Likelihood All 3 Rivers in Site-Type Exceed Threshold')
####################
# Q7S RIVER GAUGES #
####################
library(lubridate)
# running.mean
running.mean = function(x,span, ...){
y = c(rep(NA,span-1))
for (i in span:length(x)){
y[i] = mean(x[(i-span):i], na.rm = T)
}
y
}
genQ7 = function(x, y){
# Read in data
data = read.csv(paste('daily/',x,'_daily.csv', sep = ''), skip=1)
data = data[data$PARAM == 1,] # remove level data (param = 2)
# Find Minimum 7-day moving average from each year
Q7 = running.mean(data$Value, 7)
data = data.frame(date = data$Date, Q = data$Value, Q7)
data$date = strptime(data$date, format = '%Y/%m/%d')
data2 = aggregate(data['Q7'], list(year = cut(data$date, breaks = 'years')), min)
data2 = data2[is.na(data2$Q7) == F,]
colnames(data2) = c('year', y)
data2
}
## Q7s in Prairie
lethQ7 = genQ7('oldmanlethbridge', 'lethbridge')
yycQ7 = genQ7('bowcalgary', 'calgary')
drumQ7 = genQ7('reddeerdrumheller', 'drumheller')
water2 = merge(lethQ7, merge(yycQ7, drumQ7, by = 'year', all = T), by = 'year', all = T)
corx = cor(water2[colnames(water2)[-1]], use = "pairwise.complete.obs", method = 'spearman')
Prob3(0.245,0.198, 0.175, corx)
Prob3(0.257,0.207,0.187,corx)
## Q7s in Headwaters
waldQ7 = genQ7('oldmanwaldron', 'waldrons')
banffQ7 = genQ7('bowbanff', 'banff')
btQ7 = genQ7('reddeerburnttimber', 'burnttimber')
water3 = merge(waldQ7, merge(banffQ7, btQ7, by = 'year', all = T), by = 'year', all = T)
corx2 = cor(water3[colnames(water3)[-1]], use = "pairwise.complete.obs", method = 'spearman')
Prob3(0.219,0.271,0.243, corx2)
Prob3(0.273,0.226,0.232, corx2)
P = 1/100
Prob3(P,P,P,corx)*100
Prob3(P,P,P,corx2)*100
|
0ec932528d761232acbddb1351e5d12da2623208
|
617bd80da2f7f605ab580dfc54465c2a83c84f8e
|
/vale_cambio_custo.R
|
7a704647010460dbb5baec99c2c77f7140f1a0b0
|
[] |
no_license
|
wemigliari/governance
|
65997565f0aca84dc1f61bb2075dffc390ad0a52
|
ebb58e6ccfe27f981cf7c0d6a06ce304452f3c4c
|
refs/heads/master
| 2023-07-05T11:57:48.998839
| 2021-08-13T21:13:20
| 2021-08-13T21:13:20
| 263,469,120
| 1
| 0
| null | 2021-01-13T23:22:21
| 2020-05-12T22:46:13
|
R
|
UTF-8
|
R
| false
| false
| 1,875
|
r
|
vale_cambio_custo.R
|
library(readxl)
library(dplyr)
library(ggplot2)
library(zoo)
cambio_custo <- read_xlsx("/Users/wemigliari/Documents/R/tabelas/vale_cambio_custo.xlsx")
cambio_custo <- data.frame(cambio_custo)
cambio_custo2 <- as.Date(cambio_custo$Data, format = "%m/%d/%Y", tryFormats = c("%m-%d-%Y", "%Y/%m/%d"))
cambio_custo <- cbind(cambio_custo, cambio_custo2)
cambio_custo$cambio_custo2 <- NULL
cambio_custo$Data <- as.Date(cambio_custo$Data)
class(cambio_custo)
ggplot(cambio_custo, aes(x=Data)) +
geom_line(aes(y = Cambio, color = "Câmbio (US$)"), size = 1, linetype = "F1") +
geom_line(aes(y = Custo, color = "Custo (R$)"), size = 1, linetype = "longdash") +
xlab("") +
theme_ipsum() +
theme(plot.caption = element_text(size = 10)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
labs(title="Gráfico B. Variação Cambial Real-Dólar e Diminuição de Custos da Vale S.A. no Brasil ",
y = "Câmbio Real-Dólar", caption = "Fonte: Banco Central Elaborado por Migliari, W. (2021).",
color = "") +
labs(caption = "Fonte: Banco Central. Elaborado por Migliari, W. (2021).",
color = "") +
scale_color_manual(values = c("steelblue", "#1c4966")) +
annotate(geom="text", x=as.Date("2015-11-05"), y=2, label="Rompimento da Barragem do Fundão, Mariana",
angle = 90,
size=2,
color = "#3E3E3E") +
annotate(geom="text", x=as.Date("2018-12-20"), y=2, label="Rompimento da Barragem da Mina Córrego do Feijão, Brumadinho",
angle = 90,
size=2,
color = "#3E3E3E")+
geom_vline(xintercept=as.Date("2015-12-05"), linetype="dotted", color = "red")+
geom_vline(xintercept=as.Date("2019-01-20"), linetype="dotted", color = "red")+
annotate("rect", xmin = as.Date("2015-01-01"), xmax = as.Date("2021-06-21"), ymin = 0, ymax = 6,
alpha = .2)
|
b095d3fa3c2496ab79331841787d799e1d63f5d5
|
f48e25ade098aef7aa6f9fde4927bbf2b2092d14
|
/man/dasl.nightmares.Rd
|
815b192f4368f31ba2d4bbaf84bf5fa9ba731a9a
|
[] |
no_license
|
sigbertklinke/mmstat.data
|
23fa7000d5a3f776daec8b96e54010d85515dc7d
|
90f698e09b4aac87329b0254db28d835014c5ecb
|
refs/heads/master
| 2020-08-18T04:29:05.613265
| 2019-10-17T11:44:57
| 2019-10-17T11:44:57
| 215,747,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,090
|
rd
|
dasl.nightmares.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.nightmares}
\alias{dasl.nightmares}
\title{Nightmares}
\format{63 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/nightmares/?sf_paged=28}{Nightmares}
}
\description{
Researchers interviewed participants to find some who reliably fell asleep and awoke on one side and who could remember their dreams. They found 63 participants, of whom 41 were right-side sleepers and 22 slept on their left side. Then they interviewed them about their dreams. Of the 41 right-side sleepers, only 6 reported often having nightmares. But of the 22 left-side sleepers 9 reported nightmares. Is the difference significant?
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
\url{http://www.sleepandhypnosis.org/pdf/323031765.pdf} “Sleeping Position, Dream Emotions, and Subjective Sleep Quality”, Mehmet Yucel Agargun, M.D., Murat Boysan, M.A., Lutfu Hanoglu, M.D.
}
\concept{Tables}
|
6a29bab2493e65af5f031d6373b9a1712b7b0b60
|
3430916a9c1ce99802d8c0afc0f063b9fccd4542
|
/Assignment2/Storm2.R
|
d641a7bbfacadc78c4c81646c9ca16ee486ab015
|
[] |
no_license
|
Fbarangan/Reproducible_Research
|
f0b3ed4cc809c0bc32604195d75e93727aff1609
|
191cc5d39bd3541980533f9e6235092320e10dd3
|
refs/heads/master
| 2020-12-24T18:55:54.787436
| 2016-05-28T16:20:31
| 2016-05-28T16:20:31
| 56,559,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,569
|
r
|
Storm2.R
|
# Course Assignment 2
# data is from https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2
#
# info https://d396qusza40orc.cloudfront.net/repdata%2Fpeer2_doc%2Fpd01016005curr.pdf
#
# FAQ https://d396qusza40orc.cloudfront.net/repdata%2Fpeer2_doc%2FNCDC%20Storm%20Events-FAQ%20Page.pdf
#
# Title
#
install.packages("dplyr")
install.packages("reshape2")
library(dplyr)
library(ggplot2)
library(reshape2)
getwd()
# Set correct working directory
if (!file.exists("stormData")) {dir.create("stormData")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2"
destfile <- "./stormData/storm_data.zip"
download.file(fileUrl, destfile = destfile)
dateDownloaded <- date()
# Locate file and unzip
rawStormData <- read.csv("./stormData/storm_data", header = TRUE)
# Set correct working directory
# Change to DPLYR
rawStormDataDF_ <- tbl_df(rawStormData)
rawStormDataDF_$EVTYPE <- tolower(rawStormDataDF_$EVTYPE)
# Select Variable
# Variables selected
rawStormDataDF_ <- rawStormDataDF_ %>%
select(State = STATE, Event_Type = EVTYPE, Fatalities = FATALITIES, Injuries = INJURIES,
Property_Damage = PROPDMG, Property_Expo = (PROPDMGEXP), Crop_Damage = CROPDMG,
Crop_Expo = (CROPDMGEXP))
# 1 Storm Surge
rawStormDataDF_stormSurge <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c ("storm surge","coastal storm","dust storm","tropical storm"))
# List of Event type : "Storm Surge"
Event_Type_Storm_Surge <- as.data.frame(c("storm surge","coastal storm","dust storm","tropical storm"))
colnames(Event_Type_Storm_Surge)[1] <- "Storm Surge Category"
Event_Type_Storm_Surge
# rename Event Type to Storm Surge
rawStormDataDF_stormSurge$Event_Type <- "Storm Surge"
# 2 Flood
rawStormDataDF_flood <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("flooding","flood","flash flood","flood/flash flood","urban/sml stream fld"))
# List of Event type : "Flood"
Event_Type_Flood <- as.data.frame(c("flooding","flood","flash flood","flood/flash flood","urban/sml stream fld"))
colnames(Event_Type_Flood)[1] <- "Flood Category"
Event_Type_Flood
# rename Event_Type to Flood
rawStormDataDF_flood$Event_Type <- "Flood"
# 3 Tornado
rawStormDataDF_tornado <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("tornado","waterspout/tornado"))
# List of Event type : "Tornado"
Event_Type_Tornado <- as.data.frame(c("tornado","waterspout/tornado"))
colnames(Event_Type_Tornado)[1] <- "Tornado Category"
Event_Type_Tornado
# Rename Event Type to tornado
rawStormDataDF_tornado$Event_Type <- "Tornado"
# 4 Wintry
rawStormDataDF_wintry <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("ice storm","ice","black ice","winter storm","winter storms","winter storm high winds","winter weather/mix","cold","thundersnow","heavy snow","blowing snow","snow","rain/snow","light snow","freezing rain","extreme windchill","blizzard","extreme cold","icy roads","avalanche","winter weather","extreme cold/wind chill","fog and cold temperatures","freezing drizzle","cold/wind chill"))
# List of Event type : "Wintry"
Event_Type_Wintry <- as.data.frame(c("ice storm","ice","black ice","winter storm","winter storms","winter storm high winds","winter weather/mix","cold","thundersnow","heavy snow","blowing snow","snow","rain/snow","light snow","freezing rain","extreme windchill","blizzard","extreme cold","icy roads","avalanche","winter weather","extreme cold/wind chill","fog and cold temperatures","freezing drizzle","cold/wind chill"))
colnames(Event_Type_Wintry)[1] <- "Wintry Category"
Event_Type_Wintry
# Rename Event Type to Wintry
rawStormDataDF_wintry$Event_Type <- "Wintry"
# 5 Rain
rawStormDataDF_Rain <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("rain","heavy rain","excessive rainfall","dense fog","fog"))
# Rename Event Type to rain
rawStormDataDF_Rain$Event_Type <- "Rain"
# List of Event type : "Rain"
Event_Type_Rain <- as.data.frame(c("rain","heavy rain","excessive rainfall","dense fog","fog"))
colnames(Event_Type_Rain)[1] <- "Rain Category"
Event_Type_Rain
# 6 Lightning
rawStormDataDF_lightning <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("lightning"))
# List of Event type : "Lightning"
Event_Type_Lightning <- as.data.frame(c("lightning"))
colnames(Event_Type_Lightning)[1] <- "Lightning Category"
Event_Type_Lightning
# Rename Event Type to Lightning
rawStormDataDF_lightning$Event_Type <- "Lightning"
# 7 Wind
rawStormDataDF_wind <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("wind","tstm wind","high wind","high winds/snow","strong wind","strong winds","thunderstorm winds","marine strong wind","high winds","thunderstorm wind"))
# List of Event type : "Wind"
Event_Type_Wind <- as.data.frame(c("wind","tstm wind","high wind","high winds/snow","strong wind","strong winds","thunderstorm winds","marine strong wind","high winds","thunderstorm wind"))
colnames(Event_Type_Wind)[1] <- "Wind Category"
Event_Type_Wind
# Rename Event Type to Wind
rawStormDataDF_wind$Event_Type <- "Wind"
# 8 Hurricane
rawStormDataDF_Hurricane <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("hurricane","typhoon","hurricane/typhoon","tropical storm gordon"))
# List of Event type : "Hurricane"
Event_Type_Hurricane <- as.data.frame(c("hurricane","typhoon","hurricane/typhoon","tropical storm gordon"))
colnames(Event_Type_Hurricane)[1] <- "Hurricane category"
Event_Type_Hurricane
# Rename Event Type to Hurricane
rawStormDataDF_Hurricane$Event_Type <- "Hurricane"
# 9 Heat
rawStormDataDF_Heat <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("heat","heat wave","excessive heat","heat wave drought"))
# List of Event type : "Heat"
Event_Type_Heat <- as.data.frame(c("heat","heat wave","excessive heat","heat wave drought"))
colnames(Event_Type_Heat)[1] <- "Heat Category"
Event_Type_Heat
# Rename Event type to Heat
rawStormDataDF_Heat$Event_Type <- "Heat"
# 10 Hail
rawStormDataDF_Hail <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("hail","tstm wind/hail"))
# Rename Event Type to Hail
rawStormDataDF_Hail$Event_Type <- "Hail"
# List of Event type : "Hail"
Event_Type_Hail <- as.data.frame(c("hail","tstm wind/hail"))
colnames(Event_Type_Hail)[1] <- "Hail Category"
Event_Type_Hail
# 11 Fire
rawStormDataDF_Fire <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("wild fires","wild/forest fire","wildfire"))
# rename Event Type Fire
rawStormDataDF_Fire$Event_Type <- "Fire"
# List of Event type : "Fire"
Event_Type_Fire <- as.data.frame(c("wild fires","wild/forest fire","wildfire"))
colnames(Event_Type_Fire)[1] <- "Fire Category"
Event_Type_Fire
# 12 Sea Mishap
rawStormDataDF_Sea <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter(Event_Type %in% c("rip current","rough surf","tsunami","high surf","marine accident","marine thunderstorm wind","marine mishap","rip currents","high wind and seas","heavy surf","heavy surf/high surf"))
# List of Event type : "Sea Mishap"
Event_Type_Sea_Mishap <- as.data.frame(c("rip current","rough surf","tsunami","high surf","marine accident","marine thunderstorm wind","marine mishap","rip currents","high wind and seas","heavy surf","heavy surf/high surf"))
colnames(Event_Type_Sea_Mishap)[1] <- "Sea Mishap Category"
Event_Type_Sea_Mishap
# Rename Event type too Sea Mishap
rawStormDataDF_Sea$Event_Type <- "Sea Mishap"
# 13 Others
rawStormDataDF_Others <- rawStormDataDF_ %>%
select(State, Event_Type, Fatalities, Injuries, Property_Damage, Property_Expo, Crop_Damage, Crop_Expo) %>%
filter( Event_Type != "storm surge", Event_Type != "coastal storm", Event_Type != "dust storm",
Event_Type != "tropical storm",
Event_Type != "flooding", Event_Type != "flood", Event_Type != "flash flood",
Event_Type != "flood/flash flood", Event_Type != "urban/sml stream fld",
Event_Type != "tornado", Event_Type != "waterspout/tornado",
Event_Type != "ice storm",Event_Type != "ice",Event_Type != "black ice",
Event_Type != "winter storm",Event_Type != "winter storms",Event_Type != "winter storm high winds",
Event_Type != "winter weather/mix",Event_Type != "cold",Event_Type != "thundersnow",
Event_Type != "heavy snow",Event_Type != "blowing snow",Event_Type != "snow",
Event_Type != "rain/snow",Event_Type != "light snow",Event_Type != "freezing rain",
Event_Type != "extreme windchill",Event_Type != "blizzard",Event_Type != "extreme cold",
Event_Type != "icy roads", Event_Type != "avalanche",Event_Type != "winter weather",
Event_Type != "extreme cold/wind chill",Event_Type != "fog and cold temperatures",
Event_Type != "freezing drizzle",Event_Type != "cold/wind chill",
Event_Type != "rain",Event_Type != "heavy rain",Event_Type != "excessive rainfall",
Event_Type != "dense fog",Event_Type != "fog",
Event_Type != "lightning",
Event_Type != "wind",Event_Type != "tstm wind",Event_Type != "high wind",
Event_Type != "high winds/snow",Event_Type != "strong wind",Event_Type != "strong winds",
Event_Type != "thunderstorm winds",Event_Type != "marine strong wind",Event_Type != "high winds",
Event_Type != "thunderstorm wind",
Event_Type != "hurricane",Event_Type != "typhoon",Event_Type != "hurricane/typhoon",
Event_Type != "tropical storm gordon",
Event_Type != "heat",Event_Type != "heat wave",Event_Type != "excessive heat",
Event_Type != "heat wave drought",
Event_Type != "hail",Event_Type != "tstm wind/hail",
Event_Type != "wild fires",Event_Type != "wild/forest fire",Event_Type != "wildfire",
Event_Type != "rip current",Event_Type != "rough surf",Event_Type != "strong winds",
Event_Type != "tsunami",Event_Type != "high surf",Event_Type != "marine accident",
Event_Type != "marine thunderstorm wind",Event_Type != "marine mishap",
Event_Type != "rip currents",Event_Type != "high wind and seas",
Event_Type != "heavy surf",Event_Type != "heavy surf/high surf")
# List of Event Type categorize as "Others
Event_Type_Others <- rawStormDataDF_Others %>%
group_by(Event_Type) %>%
summarise(n=n()) %>%
arrange(desc(n))
colnames(Event_Type_Others)[1] <- "Others category"
Event_Type_Others
# rename Event type to Other
rawStormDataDF_Others$Event_Type <- "Others"
#Combine all cleaned tables
stormData <- rbind(rawStormDataDF_stormSurge,
rawStormDataDF_flood,
rawStormDataDF_tornado,
rawStormDataDF_wintry,
rawStormDataDF_Rain,
rawStormDataDF_lightning,
rawStormDataDF_wind,
rawStormDataDF_Hurricane,
rawStormDataDF_Heat,
rawStormDataDF_Hail,
rawStormDataDF_Fire,
rawStormDataDF_Sea,
rawStormDataDF_Others)
# 0 observations for Fatalities and Injuries removed
StormDataDF_Fatalities_Injury <- stormData %>%
select(State = State, Event_Type = Event_Type, Fatalities = Fatalities, Injuries = Injuries) %>%
filter( Fatalities > 0 & Injuries > 0)
# Across the United States, which types of events (as indicated in the EVTYPE variable) are most harmful with respect to population health?
Population_Health_Fatalities <- StormDataDF_Fatalities_Injury %>%
group_by(Event_Type) %>%
summarise(Fatalities = sum(Fatalities), n = n()) %>%
arrange(Fatalities = desc(Fatalities))
# top 10
top_Population_Health_Fatalities <- Population_Health_Fatalities[c(1:10),]
Population_Health_Injuries <- StormDataDF_Fatalities_Injury %>%
group_by(Event_Type) %>%
summarise(Injuries = sum(Injuries), n = n()) %>%
arrange(Injuries = desc(Injuries))
# top 10
top_Population_Health_Injuries <- Population_Health_Injuries[c(1:10),]
# Question 2
# Across the United States, which types of events have the greatest economic consequences?
# Select Variable exluding thse with 0 value for Property Damage and Crop Damage
StormDataDF_Economic_ <- stormData %>%
select (State, Event_Type, Property_Damage = as.integer(Property_Damage),
Property_Expo, Crop_Damage = as.integer(Crop_Damage), Crop_Expo) %>%
filter( Property_Damage > 0 & Crop_Damage > 0)
StormDataDF_Economic_$Property_Expo <- as.character(StormDataDF_Economic_$Property_Expo)
StormDataDF_Economic_$Crop_Expo <- as.character(StormDataDF_Economic_$Crop_Expo)
# Change Exponent to all uppercase
StormDataDF_Economic_$Property_Expo <- toupper(StormDataDF_Economic_$Property_Expo)
StormDataDF_Economic_$Crop_Expo <- toupper(StormDataDF_Economic_$Crop_Expo )
# selected and Replace Property damge Expo "K", "M", and "B" with 1,000, ; 1,000,000 and 1,000,000,000 respectively
StormDataDF_Economic_Property_Values_K <- StormDataDF_Economic_ %>%
filter (Property_Expo = (Property_Expo == "K")) %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Property_Expo = 10^3 )
StormDataDF_Economic_Property_Values_M <- StormDataDF_Economic_ %>%
filter (Property_Expo = (Property_Expo == "M")) %>%
select(State,Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Property_Expo = 10^6 )
StormDataDF_Economic_Property_Values_B <- StormDataDF_Economic_ %>%
filter (Property_Expo = (Property_Expo == "B")) %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Property_Expo = 10^9 )
# select columns excluding those with 1,1000, 1000000, 1000000000
StormDataDF_Economic_Property_Values_others <- StormDataDF_Economic_ %>%
filter (Property_Expo != "",
Property_Expo != "K",
Property_Expo != "M",
Property_Expo != "B") %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo )
# Combined all
stormProperty_value_Cleaned <- rbind ( StormDataDF_Economic_Property_Values_K,
StormDataDF_Economic_Property_Values_M,
StormDataDF_Economic_Property_Values_B,
# StormDataDF_Economic_Property_Values_Special_Char,
StormDataDF_Economic_Property_Values_others)
stormProperty_value_Cleaned$Property_Expo <- as.numeric(stormProperty_value_Cleaned$Property_Expo)
# selected and Replace Crop damge Expo "K", "M", and "B" with 1,000, ; 1,000,000 and 1,000,000,000 respectively
StormDataDF_Economic_Crop_Values_K <- stormProperty_value_Cleaned %>%
filter (Crop_Expo = (Crop_Expo == "K")) %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Crop_Expo = 10^3 )
StormDataDF_Economic_Crop_Values_M <- stormProperty_value_Cleaned %>%
filter (Crop_Expo = (Crop_Expo == "M")) %>%
select(State,Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Crop_Expo = 10^6 )
StormDataDF_Economic_Crop_Values_B <- stormProperty_value_Cleaned %>%
filter (Crop_Expo = (Crop_Expo == "B")) %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo ) %>%
mutate(Crop_Expo = 10^9 )
# select columns excluding those with 1,1000, 1000000, 1000000000
StormDataDF_Economic_Crop_Values_others <- stormProperty_value_Cleaned %>%
filter (Crop_Expo != "K",
Crop_Expo != "M",
Crop_Expo != "B") %>%
select(State, Event_Type, Property_Damage, Property_Expo,
Crop_Damage, Crop_Expo )
# Combined all
stormProperty_Crop_Value_ <- rbind ( StormDataDF_Economic_Crop_Values_K,
StormDataDF_Economic_Crop_Values_M,
StormDataDF_Economic_Crop_Values_B,
StormDataDF_Economic_Crop_Values_others)
# force Crop_Expo to numeric
stormProperty_Crop_Value_$Crop_Expo <- as.numeric(stormProperty_Crop_Value_$Crop_Expo)
# Top Events that affected Property and Crop Damage
Property_Damage <- stormProperty_Crop_Value_ %>%
group_by(Event_Type) %>%
mutate(Property_Value = Property_Damage * Property_Expo) %>%
summarise(Property_Value = sum(Property_Value), n = n()) %>%
arrange(Propert_Value = desc(Property_Value))
# Select top 10
top_Property_Damage <- Property_Damage[c(1:10),]
Crop_Damage <- stormProperty_Crop_Value_ %>%
group_by(Event_Type) %>%
mutate(Crop_Value = Crop_Damage * Crop_Expo) %>%
summarise(Crop_Value = sum(Crop_Value), n = n()) %>%
arrange(Crop_Value = desc(Crop_Value))
# Select top 10
top_Crop_Damage <- Crop_Damage[c(1:10),]
mergeProperty_Crop_Value <- merge(Crop_Damage, Property_Damage, by = "Event_Type")
mergeProperty_Crop_ValueDF_ <- mergeProperty_Crop_Value %>%
select(Event_Type, Crop_Value, Property_Value) %>%
filter(!is.na(Crop_Value))
melt_mergeProperty_Crop_ValueDF_ <- melt(mergeProperty_Crop_ValueDF_)
colnames(melt_mergeProperty_Crop_ValueDF_)[2] <- "Economic_Variables"
colnames(melt_mergeProperty_Crop_ValueDF_)[3] <- "Costs"
plot1 <- qplot(log(Costs), data = melt_mergeProperty_Crop_ValueDF_, fill= Economic_Variables, binwidth = 1)
plot2 <- qplot(log(Costs), data = melt_mergeProperty_Crop_ValueDF_, geom= "density" ,color = Economic_Variables)
mergeFatalities_Injury <- merge(Population_Health_Fatalities, Population_Health_Injuries, by = "Event_Type")
mergeFatalities_InjuryDF_ <- mergeFatalities_Injury %>%
select(Event_Type, Fatalities, Injuries) %>%
filter(!is.na(Fatalities), !is.na(Injuries))
melt_mergeFatalities_InjuryDF_ <- melt(mergeFatalities_InjuryDF_)
colnames(melt_mergeFatalities_InjuryDF_)[2] <- "Health_Variables"
colnames(melt_mergeFatalities_InjuryDF_)[3] <- "Costs"
plot3 <- qplot(log(Costs), data = melt_mergeFatalities_InjuryDF_, fill= Health_Variables, binwidth = 1)
plot4 <- qplot(log(Costs), data = melt_mergeFatalities_InjuryDF_, geom= "density" ,color = Health_Variables)
|
95293d97859646a9ff831f230b5acd868cf5e305
|
1a7ca7e8718daf545ac7c19804a1c814504dec28
|
/app/assault-weapons.R
|
bf116ec621095c95712cf99c313ebe906911105f
|
[
"MIT"
] |
permissive
|
diegovalle/Assault-Weapons
|
f933bd8d4a010ea0ca914e4904c383e29b9b1b08
|
4147a5a96f8cb94eba8ff8435f9b1ae8d992b905
|
refs/heads/master
| 2021-01-01T05:34:04.062048
| 2010-11-12T01:03:00
| 2010-11-12T01:03:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,550
|
r
|
assault-weapons.R
|
########################################################
##### Author: Diego Valle Jones
##### Website: www.diegovalle.net
##### Date Created: Fri Jun 11 12:22:27 2010
########################################################
#The effect of the assault weapons ban on the proportion of Firearms
#used to kill people
#Structural Change models and Graphics
source("app/functions.R")
#A macro to add a deseasonalized trend
addTrend <- defmacro(df, col, start, expr={
df$trend <- data.frame(stl(ts(df[[col]], start = start, freq = 12), "per")$time.series)$trend
})
#Test for cointegration
unitRoot <- function(df){
reg <- lm(df$Murders.with.Firearm ~ df$Murders)
ht <- adf.test(residuals(reg))
ht
}
#Looks ok
dlply(hom, .(State), unitRoot)
########################################################
#Suicides and Suicides with a Firearm
########################################################
addTrend(sui, "prop", 1998)
#sui$trend2 <- data.frame(stl(ts(sui$prop, start = 1998, freq = 12), "per")$time.series)$trend
p <- ggplot(sui, aes(date, prop)) +
geom_line(size = .2) +
geom_line(data = sui, aes(date, trend), color = "blue") +
scale_x_date() +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
opts(title = "Suicides by Firearm Discharge as a Proportion of all Suicides") +
scale_y_continuous(limits = c(0, .27)) +
annotate("text", x = ban, y = .05, hjust = 1.01,
label = "Assault weapon ban expiration") +
xlab("date") + ylab("annualized monthly suicide rate")
savePlotAA(p, "graphs/mexico-sui-firearm-prop.png")
sui$trend <- NULL
sui.both <- sui
sui.both[3:4] <- data.frame(sapply(sui.both[3:4],
function(x) x / pop2$Monthly[1:nrow(sui.both)] *
100000 * 12))
sstl <- apply(sui.both[3:4], 2,
function(x) {
stl(ts(x, start = 1998, freq = 12), "per")
})
sstl <- lapply(sstl, function(x)
cbind(sui.both, data.frame(x$time.series)))
sui.both <- melt(sui.both, id = c("Year", "m"),
measure.var = c("Firearm.Suicides",
"Suicides"))
sui.both$date <- as.Date(paste(sui.both$Year,
sui.both$m,"15", sep = "/"), "%Y/%m/%d")
#Necessary for ggplot
sstl$Firearm.Suicides$date <- sui.both$date[1:132]
sstl$Suicides$date <- sui.both$date[1:132]
sstl$Firearm.Suicides$variable <- "foo"
sstl$Suicides$variable <- "foo"
p <- ggplot(sui.both, aes(date, value, group = variable)) +
geom_line(size = .2) +
geom_line(data = sstl$Firearm.Suicides, aes(date, trend),
color = "blue") +
geom_line(data = sstl$Suicides, aes(date, trend), color = "blue") +
scale_x_date() +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
opts(title = "Monthly Suicide and Suicide by Firearm Discharge Rates with Trends") +
annotate("text", x= as.Date("1999-01-15"), y = 4.2,
label ="Suicides") +
annotate("text", x= as.Date("2000-01-15"), y = 1.1,
label ="Suicides with a Firearm") +
annotate("text", x = ban, y = 2.5, hjust = 1.01,
label = "Assault weapon ban expiration") +
xlab("date") + ylab("annualized monthly suicide rate")
savePlotAA(p, "graphs/mexico-sui-firearm.png")
########################################################
#Homicide and Firearm Homicide rates
########################################################
hom.both <- ddply(hom, .(Year, m), function(df)
c(firearm = sum(df$Murders.with.Firearm),
homicides = sum(df$Murders)))
hom.both[3:4] <- data.frame(sapply(hom.both[3:4],
function(x) x / pop2$Monthly[1:nrow(hom.both)] *
100000 * 12))
#STL Decomposition
stl <- apply(hom.both[3:4], 2,
function(x) {
stl(ts(x, start = 1998, freq = 12), "per")
})
stl <- lapply(stl, function(x)
cbind(hom.both, data.frame(x$time.series)))
hom.both <- melt(hom.both, id = c("Year", "m"))
hom.both$date <- as.Date(paste(hom.both$Year,
hom.both$m,"15", sep = "/"), "%Y/%m/%d")
#Necessary for ggplot
stl$firearm$date <- hom.both$date[1:131]
stl$homicide$date <- hom.both$date[1:131]
stl$homicide$variable <- "foo"
stl$firearm$variable <- "foo"
p <- ggplot(hom.both, aes(date, value, group = variable)) +
geom_line(size = .2) +
geom_line(data = stl$firearm, aes(date, trend), color = "blue") +
geom_line(data = stl$homicide, aes(date, trend), color = "blue") +
scale_x_date() +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
opts(title = bothtitle) +
scale_y_continuous(limits = c(0, 17)) +
annotate("text", x= as.Date("2001-06-15"), y = 12.5,
label ="Homicides") +
annotate("text", x= as.Date("2002-01-15"), y = 3.5,
label ="Homicides by Firearm") +
annotate("text", x = ban, y = 16, hjust = 1.01,
label = "Assault weapon ban expiration") +
xlab("date") + ylab("annualized monthly homicide rate")
savePlotAA(p, "graphs/mexico-hom-firearm.png")
########################################################
#For all of Mexico
########################################################
hom.mx <- ddply(hom, .(Year, m), function(df)
sum(df$Murders.with.Firearm) / sum(df$Murders))
hom.mx$date <- as.Date(paste(hom.mx$Year,
hom.mx$m,"15", sep = "/"), "%Y/%m/%d")
addTrend(hom.mx, "V1", 1998)
rate <- ts(hom.mx$V1, start = kstart.year, freq = 12)
breakmx <- breakpoints(rate ~ 1, h = 12, breaks = 1)
breakconf <- confint(breakmx, breaks = 1)$confint
aw.break <- sapply(breakconf, convertToDate)
p <- ggplot(hom.mx, aes(date, V1)) +
geom_line(size = .2) +
geom_line(aes(date, trend), color = "blue") +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
opts(title = "Homicides by Firearm Discharge as a Proportion of all Homicides in Mexico") +
annotate("text", x = ban, y = .35, hjust = 1.01,
label = "Assault weapon ban expiration") +
xlab("date") + ylab("proportion") +
scale_y_continuous(formatter = "percent", limits = c(0, .8))
savePlotAA(p, "graphs/mexico-prop.png", width = 800,
height = 600)
########################################################
#For the different regions (SW NW, etc)
########################################################
x <- dlply(hom.region, .(Region), transform, trend = data.frame(stl(ts(prop, start = 2001, freq = 12), "per")$time.series)$trend )
hom.region <- rbind.fill(x)
#hom.region$trend <- data.frame(stl(ts(hom.region$prop, start = 1998, freq = 12), "per")$time.series)$trend
hom.region <- ddply(hom.region, .(Region), transform,
order = mean(prop))
hom.region$Region <- reorder(hom.region$Region, -hom.region$order)
p <- ggplot(hom.region, aes(date, prop)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_line(size = 0.2) +
geom_line(aes(date, trend), color = "blue") +
scale_x_date(major = "2 years") +
facet_wrap(~ Region) +
xlab("date") + ylab("proportion of homicides by firearm") +
opts(title="Proportion of Homicides by Firearm Discharge in the Crazy Wrong Regions of Mexico")+
scale_y_continuous(formatter = "percent", limits = c(0, .8))
savePlotAA(p, "graphs/regions.png")
########################################################
#The Regions according to INEGI look crazy, let's do them manually
#http://es.wikipedia.org/wiki/Archivo:800px-Mexico_map_of_regionsfr.png
########################################################
hom$region <- 0
setRegion <- defmacro(df, states, region.name, expr={
df[df$State %in% states,]$region <- region.name
})
regions <- list()
regions[["North West"]] <- c("Durango", "Sinaloa", "Chihuahua", "Sonora", "Baja California Sur", "Baja California")
regions[["West"]] <- c("Nayarit", "Jalisco", "Colima", "Michoacán")
regions[["East"]] <- c("Puebla", "Veracruz Llave", "Tlaxcala", "Hidalgo")
regions[["North East"]] <- c("Coahuila", "Nuevo León", "Tamaulipas")
regions[["South East"]] <- c("Tabasco", "Campeche", "Quintana Roo", "Yucatán")
regions[["South West"]] <- c("Guerrero", "Oaxaca","Chiapas")
regions[["Center North"]] <- c("Aguascalientes", "Guanajuato", "Querétaro", "San Luis Potosí", "Zacatecas")
regions[["Center South"]] <- c("Morelos", "México", "Distrito Federal")
for(i in 1:length(regions))
setRegion(hom, regions[[i]], names(regions)[i])
hom.region <- ddply(hom, .(region, date), function (df) sum(df$Murders.with.Firearm) / sum(df$Murders))
x <- dlply(hom.region, .(region), transform, trend = data.frame(stl(ts(V1, start = 2001, freq = 12), "per")$time.series)$trend )
hom.region <- rbind.fill(x)
hom.region <- ddply(hom.region, .(region), transform,
order = mean(V1))
hom.region$region <- reorder(hom.region$region, -hom.region$order)
p <- ggplot(hom.region, aes(date, V1)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_line(size = 0.4) +
geom_line(aes(date, trend), size = .3, color = "blue") +
scale_x_date(major = "2 years") +
facet_wrap(~ region) +
xlab("date") + ylab("proportion of homicides by firearm") +
opts(title="Proportion of Homicides by Firearm Discharge in the Different Regions of Mexico")+
scale_y_continuous(formatter = "percent", limits = c(0, .8))
savePlotAA(p, "graphs/regions2.png")
########################################################
#For Municipalities near the US Border
########################################################
addTrend(hom.border, "prop", 1998)
p <- ggplot(hom.border, aes(date, prop)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_line(size = 0.2) +
geom_line(aes(date, trend), color = "blue") +
scale_x_date(major = "2 years") +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
annotate("text", x = ban, y = .37, hjust = 1.03,
label = "Assault weapon ban expiration") +
xlab("date") + ylab("proportion of homicides by firearm") +
opts(title="Homicides by Firearm Discharge as a Proportion of all Homicides in Mexican Municipalities that Border the US") +
scale_y_continuous(formatter = "percent", limits = c(0, .9))
savePlotAA(p, "graphs/us-border.png")
########################################################
#For the big border cities
########################################################
x <- dlply(hom.borderct, .(Municipality2), transform, trendP = data.frame(stl(ts(prop, start = 2001, freq = 12), "per")$time.series)$trend )
hom.borderct <- rbind.fill(x)
x <- dlply(hom.borderct, .(Municipality2), transform, trendH = data.frame(stl(ts(Homicides, start = 2001, freq = 12), "per")$time.series)$trend )
hom.borderct <- rbind.fill(x)
hom.borderct <- ddply(hom.borderct, .(Municipality2), transform,
order = mean(prop))
hom.borderct$Municipality2 <- reorder(hom.borderct$Municipality2,
-hom.borderct$order)
p <- ggplot(hom.borderct, aes(date, prop)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_line(size = 0.2) +
geom_line(aes(date, trendP), color = "blue") +
# geom_line(aes(date, trend), color = "blue") +
facet_wrap(~Municipality2) +
scale_y_continuous(formatter = "percent") +
opts(title = "Effect of the Expiration of the Assault Weapon Ban on the Proportion of Homicides\nCommited by Firearm in Mexican Cities that Border the US") +
opts(axis.text.x=theme_text(angle=60, hjust=1.2 )) +
ylab("proportion of homicides by firearm")
savePlotAA(p, "graphs/us-cities-prop.png")
hom.borderct <- ddply(hom.borderct, .(Municipality2), transform,
order = mean(Homicides))
hom.borderct$Municipality2 <- reorder(hom.borderct$Municipality2,
-hom.borderct$order)
p <- ggplot(hom.borderct, aes(date, Homicides)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_line(size = 0.2) +
geom_line(aes(date, trendH), color = "blue") +
# geom_line(aes(date, Firearm.Homicides), color = "#2BC258") +
facet_wrap(~Municipality2)+
opts(title = "Effect of the Expiration of the Assault Weapon Ban on the Number of Homicides Commited\nby Firearm in Mexican Cities that Border the US") +
ylab("number of homicides by firearm")
savePlotAA(p, "graphs/us-cities-number.png")
p <- ggplot(subset(hom.borderct, Municipality2 == "NUEVO LAREDO"),
aes(date, Homicides)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
geom_vline(aes(xintercept = fox.troops),
color = "#000000",
linetype = 2) +
geom_vline(aes(xintercept = osiel.captured),
color = "#000000",
linetype = 2) +
annotate("text", x = fox.troops, y = 23, label = "Troops\nin NL",
hjust = 1.03) +
annotate("text", x = ban, y = 17, hjust = 1.03,
label = "Assault weapon ban\nexpiration") +
annotate("text", x = osiel.captured, y = 12, hjust = 1.03,
label = "Leader of Gulf Cartel\nCaptured") +
#geom_line(color = "#F67D75") +
geom_line(aes(date, Firearm.Homicides)) +
ylab("number of homicides by firearm") +
opts(title = "Number of Homicides by Firearm Discharge in Nuevo Laredo")
savePlotAA(p, "graphs/us-nuevo-laredo.png")
########################################################
#Small Multiples of all the states with breakpoints
########################################################
homsub <- subset(hom, Year >= kstart.year & Year <= kend.year)
breaks.df <- ddply(homsub, .(State), findBreakAW)
names(breaks.df) <- c("State", "low", "breakpoints", "up")
breaks.df$low[breaks.df$low < 0] <- 0
breaks.df$up[breaks.df$up > (knum.years * 12-1)] <- knum.years * 12-1
breaks.df$date <- convertToDate(as.numeric(breaks.df$breakpoints))
breaks.df$min <- convertToDate(as.numeric(breaks.df$low))
breaks.df$max <- convertToDate(as.numeric(breaks.df$up))
breaks.df$prop <- 0
x <- dlply(homsub, .(State), transform, trend = data.frame(stl(ts(prop, start = 2001, freq = 12), "per")$time.series)$trend )
homsub <- rbind.fill(x)
dts <- c("Chihuahua", "Sinaloa", "Durango", "Sonora",
"Guerrero", "Baja California","Michoacán", "Tamaulipas")
st <- c("México", "Chiapas", "Puebla", "Nuevo León",
"Quintana Roo")
filenames <- c("all", "chihuahua", "nuevo-leon", "michoacan","sonora", "dts", "interesting")
filenames <- sapply(filenames,
function(x) paste("graphs/", x,
".png", sep = ""))
widths <- c(960, 640, 640, 640, 640, 700, 700)
heights <- c(600, 480, 480, 480, 480, 525, 525)
mapply(function(x, y, z, height, width)
savePlotAA(plotAsWeBreaks(homsub, breaks.df, ban, x, z), y,
height, width),
list(NULL, "Chihuahua", "Nuevo León", "Michoacán","Sonora",
dts, st), filenames,
list(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE), widths,
heights)
########################################################
#Number of Homicides by Firearm in Nuevo Leon
########################################################
nuevo.leon00 <- subset(homsub, homsub$State == "Nuevo León")
addTrend(nuevo.leon00, "Murders.with.Firearm", 2000)
p <- ggplot(nuevo.leon00, aes(date, Murders)) +
# geom_line(color = "red") +
geom_line(aes(date, trend), color = "blue") +
geom_line(aes(date, Murders.with.Firearm)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
annotate("text", x = ban, y = 16, hjust = 1.05,
label = "Assault weapon ban expiration") +
ylab("number of homicides by firearm discharge") +
opts(title = "Number of Homicides by Firearm Discharge in Nuevo Leon")
savePlotAA(p, "graphs/guns-num-nuevo-leon.png")
########################################################
#Number of Homicides by Firearm in Chihuahua
########################################################
chi00 <- subset(homsub, homsub$State == "Chihuahua")
addTrend(chi00, "Murders.with.Firearm", 2000)
p <- ggplot(chi00) +
# geom_line(color = "red") +
geom_line(aes(date, trend), color = "blue") +
geom_line(aes(date, Murders.with.Firearm)) +
geom_vline(aes(xintercept = as.Date(chapo.escape)), color = "#000000",
linetype = 2) +
annotate("text", x = chapo.escape, y = 47, hjust = 1.05,
label = "\"El Chapo\" Escapes\nfrom Prision") +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
annotate("text", x = ban, y = 3, hjust = 1.05,
label = "Assault weapon ban expiration\n(2004-Sep-14)") +
geom_vline(aes(xintercept = as.Date(rodolfo.death)),
color = "#000000",
linetype = 2) +
annotate("text", x = rodolfo.death, y = 47, hjust = 1.05,
label = "Brother of the leader\nof the Juarez Cartel\nKilled by the Sinaloa Cartel\n(2004-Sep-11)") +
ylab("number of homicides by firearm discharge") +
scale_y_continuous(limits = c(0, 50)) +
opts(title = "Number of Homicides by Firearm Discharge in Chihuahua")
savePlotAA(p, "graphs/guns-num-chihuahua.png", width = 800,
height = 600)
########################################################
#Number of Homicides by Firearm in Chihuahua (including Calderon's
#drug war)
########################################################
p <- ggplot(chihuahua) +
# geom_line(color = "red") +
geom_line(aes(date, Murders.with.Firearm)) +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
annotate("text", x = ban, y = 220, hjust = 1.05,
label = "Assault weapon ban expiration\n(2004-Sep-14)") +
ylab("number of homicides by firearm discharge") +
scale_y_continuous(limits =
c(0, max(chihuahua$Murders.with.Firearm))) +
opts(title = "Monthly Number of Homicides by Firearm Discharge in Chihuahua(1998-2008)")
savePlotAA(p, "graphs/guns-num-chihuahua-1998-2008.png", width = 640, height = 480)
########################################################
#Number of Homicides in Nuevo Leon
########################################################
nuevo.leon00 <- subset(hom, hom$State == "Nuevo León")
homicides.nl <- c(nuevo.leon00$Murders, 8,
c(20, 16, 7, 26, 15, 13, 24, 32, 31,
26, 15, 42, 23, 29, 73, 101, 58, 102, 123))
p <- qplot(seq(as.Date("1998-01-15"), by='1 month',length=12*13-5),
homicides.nl, geom = "line") +
scale_x_date() +
xlab("date") + ylab("number of homicides") +
opts(title = "Monthly Number of Homicides in Nuevo Leon (1998-July 2010)\n(1998-2008 data from INEGI, 2009-2010 from the State Police)") +
#geom_vline(aes(xintercept = as.Date(concord)), color = "#000000",
# linetype = 2) +
#annotate("text", x = concord, y = 102, hjust = 1.05,
# label = "Zetas split\nwith Gulf Cartel") +
geom_vline(aes(xintercept = as.Date(ban)), color = "#000000",
linetype = 2) +
annotate("text", x = ban, y = 42, hjust = 1.05,
label = "Assault weapon ban expiration")
#geom_vline(aes(xintercept = as.Date(op.tam.nl)),
# color = "#000000",
# linetype = 2) +
#annotate("text", x = op.tam.nl, y = 62, hjust = 1.05,
# label = "Joint\nOperation\nTamaulipas-\nNuevo Leon")
savePlotAA(p, "graphs/homicides-num-nl-1998-2008.png", width = 640, height = 480)
|
010e9293a521f9fb96dc0d2b5362d3fc6fc60b09
|
99b5eff4ec20e62f531f7f1aa9429ce311fbac6b
|
/R/cols.R
|
9cc047b86eb565b3b6c209de94302fdf5c3fad7e
|
[] |
no_license
|
Binary117736/RNAdecay
|
1e84079da8b54be04224b0ded528a7bb6edfbf72
|
5eef364514cb76f59ac8af1718b3bd0fd9b125ec
|
refs/heads/master
| 2023-07-10T22:43:08.887802
| 2020-04-20T15:54:52
| 2020-04-20T15:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,324
|
r
|
cols.R
|
#' Indexes column names of a data.frame matching multiple patterns (i.e., multigrep)
#'
#' Identifies dataframe column names that have all of the pattern arguments .
#'
#' Be aware that column data labels that are part of another data label are not advisable (e.g. mut1, mut2, mut1.mut2; cols(df,'mut1') will return indices for both 'mut1' and 'mut1.mut2' labeled columns
#'
#' @param df a dataframe with column names to index
#' @param patterns character vector or vector of regular expressions passed to grep pattern argument
#' @param w,x,y,z (for backwards compatibility) separate arguments for patterns, if used patterns argument will be ignored
#'
#' @return returns a vector of integer indices of the column names of \code{df} that match to all of \code{patterns}
#'
#' @export
#'
#' @examples
#' cols(df=data.frame(xyz=1:5,zay=6:10,ybz=11:15,tuv=16:20),patterns = c('y','z')) ## returns 1 2 3
#' cols(df=data.frame(xyz=1:5,zay=6:10,ybz=11:15,tuv=16:20), w = 'y', x = 'z') ## returns 1 2 3
#'
cols <- function(patterns,df,w=NA,x=NA,y=NA,z=NA) {
# for backwards compatability
if(any(!is.na(c(w,x,y,z)))){
patterns <- c(w,x,y,z)
patterns <- patterns[!is.na(patterns)]
}
column_names <- colnames(df)
which(sapply(column_names, function(y) all(sapply(patterns, function(pat) grepl(pat,y)))))
}
|
3e1a1eef1362ef352289215f7a881e625a6404c3
|
d661fcf03bf858e856c175898c4c7ca60b655763
|
/correlation.R
|
ce467baff008b7ed7ca06b3b0d2c566d87848f6c
|
[] |
no_license
|
saifraider/R-programming
|
4d1c1cdb1857ee9f666000481410545bace822e8
|
c903df061dd34ba24fcdb8824b29541c54003462
|
refs/heads/master
| 2021-01-09T20:57:24.069511
| 2016-07-05T03:41:05
| 2016-07-05T03:41:05
| 62,603,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 867
|
r
|
correlation.R
|
correlation <- function(directory, threshold = 100, id = 1:332){
filenames <- list.files(directory, pattern="*.csv", full.names=TRUE)
count <- 1
corr_vector <- c()
for (i in id){
mydata <- read.csv(file = filenames[i], header = TRUE)
clean_array <- complete.cases(mydata)
total <- length(clean_array[clean_array == TRUE])
if(total > threshold){
nitrate_values <- mydata[,"nitrate"]
clean_nitrate <- nitrate_values[clean_array]
sulfate_values <- mydata[,"sulfate"]
clean_sulfate <- sulfate_values[clean_array]
corr_vector <- c(corr_vector,cor(clean_sulfate,clean_nitrate))
}
}
return(corr_vector)
}
ans <- correlation("specdata", 2000)
n <- length(ans)
cr <- correlation("specdata", 1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
|
4e154bbdd51ec89e65725b0e425ab8ce53f5040a
|
7183bc44b60bb14b23869560c7937f765e62bb06
|
/man/cimiss_get_obs_latest_time.Rd
|
68e95a3ea5fb2f6a56e10db979fcf84756eb8d1b
|
[
"MIT"
] |
permissive
|
nmcdev/nmcMetIO
|
f0f025d52f6b84cfcc035788fd3fa9c35530e8b2
|
6e549f61049cc09f1ab4bfe37a9e8d1a5608b7f3
|
refs/heads/master
| 2022-02-05T23:24:41.219260
| 2022-01-29T09:14:26
| 2022-01-29T09:14:26
| 207,302,340
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 769
|
rd
|
cimiss_get_obs_latest_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrieve_cimiss_server.R
\name{cimiss_get_obs_latest_time}
\alias{cimiss_get_obs_latest_time}
\title{Get the observation latest time.}
\usage{
cimiss_get_obs_latest_time(dataCode = "SURF_CHN_MUL_HOR", latestTime = 6)
}
\arguments{
\item{dataCode}{: dataset code, like "SURF_CHN_MUL_HOR", "SURF_CHN_MUL_HOR_N", and so on.}
\item{latestTime}{: latestTime > 0, like 2 is return the latest time in 2 hours.}
}
\value{
the latest time, like '20200216020000'
}
\description{
\preformatted{Retrieve data latest times by cimiss music REST API.
Refer to http://10.20.76.55/cimissapiweb/index_index.action
}
}
\examples{
data <- cimiss_get_model_latest_time('SURF_CHN_MUL_HOR_N', latestTime=12)
}
|
308c41caf591fb1cb7de3b5ad88bea8b72d8514a
|
eef234939eeebc2e5dcf2ad9cfd1888ce36259df
|
/stat312/project2/zheng001/man/new_gaussian.Rd
|
edb4c1f5b2edf081c596dea2fb85a0e47c5f5cd8
|
[] |
no_license
|
snarles/misc
|
5d4e138cbb17bfd08143fc4c097fb84417446990
|
246f9fac0130340e44837b528a2f59e9256f2711
|
refs/heads/master
| 2023-06-26T06:53:53.933752
| 2023-06-12T13:29:17
| 2023-06-12T13:29:17
| 18,860,939
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 456
|
rd
|
new_gaussian.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gauss_class_initialize.R
\name{new_gaussian}
\alias{new_gaussian}
\title{Create an instance of the gaussian dist}
\usage{
new_gaussian(covariance)
}
\arguments{
\item{covariance}{The covariance matrix}
}
\description{
Create an instance of the gaussian dist
}
\examples{
cm <- diag(rep(1, 3))
gd <- new_gaussian(cm)
x <- sample_points(gd, 10)
de <- density_at(gd, x)
}
|
93e78d71278b2b008d9c1c3ad580e9a981c7e679
|
aa102054184db05b3d59fc6247776158eb3e6596
|
/plot3.R
|
22b74a8bd5a077a93dc91642db67ddd27b480e98
|
[
"MIT"
] |
permissive
|
pparacch/ExData_Plotting2
|
c5ba8ffd9e0b31316d9d500a1215602af5c9dab4
|
fc84975422d7694246a337eae544da628239cd19
|
refs/heads/master
| 2020-06-20T17:51:31.609236
| 2016-11-26T21:36:16
| 2016-11-26T21:36:16
| 74,851,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,809
|
r
|
plot3.R
|
library(ggplot2) #load ggplot2 package for the plotting
## Read the datasource
#Assumption:
#the files are located in the current workind directory together with the script
NEI <- readRDS("summarySCC_PM25.rds")
#Relevant Question (Question 3)
#Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
#variable,
# Which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008?
#Subsetting original data in order to consider only Baltimore relevant data.
NEI_baltimore <- subset(NEI, fips == "24510")
#type values
#unique(NEI_baltimora$type)
#[1] "POINT" "NONPOINT" "ON-ROAD" "NON-ROAD"
#Aggregate the baltimora data by year & types
NEI_baltimore_aggregate <- aggregate(NEI_baltimore$Emissions, list(Year = NEI_baltimore$year, Type = NEI_baltimore$type), sum)
names(NEI_baltimore_aggregate) <- c("Year", "Type", "TotalEmissions")
#Use the ggplot2 plotting system to make a plot answer this question.
g <- ggplot(NEI_baltimore_aggregate, aes(Year, TotalEmissions))
p <- g + geom_point(aes(color = Type)) +
facet_grid(. ~ Type) +
geom_line(linetype=3) +
labs(x = "Year") +
labs(y = "Total Emission (ton)") +
labs(title = "PM2.5 per Year in Baltimore City, Maryland")
print(p)
#Save the plot as a png using ggsave{ggplot2}
ggsave("plot3.png")
#Answer
#The total emission of PM2.5 in Baltimore City, Maryland has the following trends
#from 1999 to 2008
#NON-ROAD -> decreased from 522.94 ton (1999) to 55.82356 ton (2008)
#NONPOINT -> decreased from 2107.625 ton (1999) to 1373.207 ton (2008)
#ON-ROAD -> decreased from 346.82 ton (1999) to 88.27546 ton (2008)
#POINT -> increased from 296.7950 ton (1999) to 344.9752 ton (2008)
|
adbdab955668bfe367f55ec066780156a5b19c9b
|
1b5e359af6dd0cdc3435fd968c3476d0d4f88945
|
/man/print_table.Rd
|
dff5fb0c22b7afc0ec93cf831d78bf7e106bd9da
|
[] |
no_license
|
sirh-hug/outilsSIRH
|
8ab8945a1af4ba7cbf292cd331f3860b7c36132c
|
1c0045e866f63e74311cb387284c59952aa0a6ec
|
refs/heads/master
| 2020-12-05T09:43:40.675525
| 2020-04-26T09:09:10
| 2020-04-26T09:09:10
| 232,071,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
print_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_table.R
\name{print_table}
\alias{print_table}
\title{Formater un tableau croise}
\usage{
print_table(data, bg.col = "#a0d0ab", title = NULL, caption = "")
}
\arguments{
\item{bg.col}{color. Background color}
\item{title}{character.}
\item{caption}{character.}
\item{table}{a table.}
}
\description{
Formater un tableau croise
}
|
bb5cf56a9b1a316caef1eb5679784b2272ce10a5
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/integrate-test.R
|
be3fdf92ea19105e3044cc588c049a85238b156b
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 180
|
r
|
integrate-test.R
|
function (x, fx)
{
e <- get("data.env", .GlobalEnv)
e[["integrate"]][[length(e[["integrate"]]) + 1]] <- list(x = x,
fx = fx)
.Call("_irt_integrate", x, fx)
}
|
0c073a3774789aa96e3b088367cba5b7f7f8a29c
|
4f522d2d8d9d0a54ec6340f94ee66f74b1022050
|
/man/MicrobenchmarkSparseMatrixKernel.Rd
|
dd887e3a1bdf9289b8fcb7edff09c009987e7588
|
[
"Apache-2.0"
] |
permissive
|
cran/RHPCBenchmark
|
2b69e57b644651eb3034b23bd8ff9df5a1ca1c84
|
c1335a4fcb14e6b871d73e3929f188a92a273308
|
refs/heads/master
| 2021-01-21T20:11:16.606699
| 2017-05-23T16:26:28
| 2017-05-23T16:26:28
| 92,201,283
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,893
|
rd
|
MicrobenchmarkSparseMatrixKernel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microbenchmark_sparse_matrix_kernel.R
\name{MicrobenchmarkSparseMatrixKernel}
\alias{MicrobenchmarkSparseMatrixKernel}
\title{Performs microbenchmarking of a sparse matrix linear algebra kernel}
\usage{
MicrobenchmarkSparseMatrixKernel(benchmarkParameters, numberOfThreads,
resultsDirectory, runIdentifier)
}
\arguments{
\item{benchmarkParameters}{an object of type
\code{\link{SparseMatrixMicrobenchmark}} specifying the matrix
dimensions of matrices to be tested and the number of performance trials
to perform for each matrix dimension.}
\item{numberOfThreads}{the number of threads the microbenchmark is being
performed with. The value is for informational purposes only and does not
effect the number threads the kernel is executed with.}
\item{resultsDirectory}{a character string specifying the directory
where all of the CSV performance results files will be saved}
\item{runIdentifier}{a character string specifying the suffix to be
appended to the base of the file name of the output CSV format files}
}
\value{
a dataframe containing the performance trial times for each matrix
tested, that is the raw performance data before averaging. The columns
of the data frame are the following:
\describe{
\item{BenchmarkName}{The name of the microbenchmark}
\item{NumberOfRows}{An integer specifying the expected number of rows in
the input sparse matrix}
\item{NumberOfColumns}{An integer specifying the expected number of
columns in the input sparse matrix}
\item{UserTime}{The amount of time spent in user-mode code within the
microbenchmarked code}
\item{SystemTime}{The amount of time spent in the kernel within the
process}
\item{WallClockTime}{The total time spent to complete the performance
trial}
\item{DateStarted}{The date and time the performance trial was commenced}
\item{DateFinished}{The date and time the performance trial ended}
}
}
\description{
\code{MicrobenchmarkSparseMatrixKernel} performs microbenchmarking of a
sparse matrix linear algebra kernel for several matrix dimensions
}
\details{
This function performs microbenchmarking of a sparse matrix linear algebra
kernel for several matrix dimensions and a given number of threads. The
kernel to be performance tested, the matrix dimensions to be tested, and
other parameters specifying how the kernel is to be benchmarked are given in
the input object \code{benchmarkParameters} which is an instance of
the class \code{\link{SparseMatrixMicrobenchmark}}.
For each matrix dimension to be tested, the run time performance of the
kernel is averaged over multiple runs. The kernel can also be executed with
multiple threads if the kernel supports multithreading.
See \code{\link{SparseMatrixMicrobenchmark}}
for more details on the benchmarking parameters.
}
|
adb920e9806658237914938cafb479a696067a96
|
5e8036a92450788ed33cf05a59463eb7e69fea99
|
/20131030/getFileName.r
|
da9a111d40c0fc5dd893cefc7ea9ac29dac0c2c5
|
[] |
no_license
|
Chillangri/QCData
|
42f7a41c98efc7784b99363d0ea3e55743050b8f
|
d67f999158d0aa815ac38662aca8e31486188605
|
refs/heads/master
| 2021-01-20T05:04:48.540686
| 2015-04-28T03:36:52
| 2015-04-28T03:36:52
| 34,706,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
getFileName.r
|
getFileName <- function() {
#setwd("/Volumes/JHBae-Data/Dropbox/QCData")
#setwd("/Volumes/JHBae-Data/Dropbox/QCData/SJ20131018")
currentWD <- getwd()
cat("Your DATA file must be at ", currentWD, "\n", sep="")
ANS <- readline("Is it right? (Y/y or N/n): ")
if ((substr(ANS, 1, 1)=="n") || (substr(ANS, 1, 1)=="N")) {
return()
} else {
fileName <- readline("What is the file Name? ")
fileName <- paste(currentWD, "/", fileName, sep="")
return(fileName)
}
}
|
edfa1fd8b6868d7b0c7d1c72dc5c89abc34a613d
|
83ae0b50fcbda4bdf17f4bad11070f759f903e69
|
/man/hackint_lm.Rd
|
29d85968ec34b56d4778d30e9cd2960852521426
|
[
"MIT"
] |
permissive
|
beauCoker/hacking
|
d7fee8d0d412eb2f3d17049fdf31b09d9ba4492d
|
57b76dd127f1b4cd2cfcd19fe464246eb8e3ee1e
|
refs/heads/master
| 2020-06-25T14:45:55.412973
| 2019-09-09T02:20:12
| 2019-09-09T02:20:12
| 199,341,352
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,572
|
rd
|
hackint_lm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hackint.R
\name{hackint_lm}
\alias{hackint_lm}
\title{Hacking intervals for linear models}
\usage{
hackint_lm(mdl, data, treatment, theta = 0.1, frac_remove_obs = 1,
verbose = TRUE)
}
\arguments{
\item{mdl}{\code{lm} object representing the "base" model}
\item{data}{\code{data.frame} used to fit \code{mdl}}
\item{treatment}{name of binary treatment variable (inputted as \code{character})}
\item{theta}{loss tolerance for tethered hacking (default = 0.1)}
\item{frac_remove_obs}{fraction of observations to consider for removal (default = 1, meaning all observations considered)}
\item{verbose}{whether or not to print summary}
}
\value{
\code{list} containing all hacking intervals (\code{tethered}, \code{constrained}, \code{tethered_and_constrained})
as well as complete list of all manipulations applied to the base model (\code{hacks_all})
}
\description{
Computes tethered and constraint-based hacking intervals of the coefficient of a binary treatment variable in a linear model.
See \url{https://github.com/beauCoker/hacking} for examples.
}
\examples{
N = 50 # Number of observations
data <- data.frame(
y = rnorm(N), # Response variable (continuous)
w = rbinom(N, 1, .5), # Treatment variable (binary)
X = matrix(rnorm(N*3), nrow=N), # Covariates included in base model
Z = matrix(rnorm(N*3), nrow=N) # Covariates excluded from base model
)
mdl <- lm(y ~ w + X.1*X.2, data=data) # fit linear "base" model
output <- hackint_lm(mdl, data, theta=0.1, treatment = 'w')
}
|
d18559ca4696b11c446cfcb1855aba2beafccf17
|
b13e7df0f6829c0b5cb880abfb02376f833152e8
|
/R/utils.R
|
7213d1bb4e6f189d0800d0ee4ffe452da55c98d2
|
[] |
no_license
|
randrescastaneda/mld_simulation
|
828f0ece8b7e1b850d5d6bad05f112cf2bea11bd
|
4f5c224677f885d2f4b145403dc8e3a31360ddce
|
refs/heads/master
| 2023-03-11T23:06:49.181725
| 2021-03-04T18:50:26
| 2021-03-04T18:50:26
| 344,145,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 973
|
r
|
utils.R
|
new_mld <- function(welfare, weight, sh, sc, rp) {
ln <- length(welfare)
wf <- welfare
wf <- wf*sc
if (rp == "ignore") {
wf[1:round(ln*sh)] <- 0
} else if (rp == "1") {
wf[1:round(ln*sh)] <- 1
} else if (rp == "min") {
wf_min <- fmin(wf[wf>0])
wf[1:round(ln*sh)] <- wf_min
} else if (rp == "shr") {
wf_min1 <- fmin(wf[wf>0])
wf_min2 <- fmin(wf[wf>wf_min1])
wf[1:round(ln*sh)] <- wf_min1*(wf_min1 / wf_min2)
} else {
stop("replace not valid")
}
mld <- fmld(wf, weight = weight)
dt <- data.table(share = sh,
scale = sc,
replace = rp,
mld = mld)
return(dt)
}
fmld <- function (welfare, weight) {
weight <- weight[welfare > 0]
welfare <- welfare[welfare > 0]
mean_welfare <- collapse::fmean(x = welfare, w = weight)
deviation <- log(mean_welfare/welfare)
mld <- collapse::fmean(x = deviation, w = weight)
return(mld)
}
|
0d35bfdccae9097483bf62eef2ca85617440cacf
|
e7708dd3d11b442a39848719fb03e420196b3f0c
|
/02-consolidate-after-openrefine.R
|
00ac5c3d02be3e5e7ba3ddae75c8e773857de0e0
|
[
"MIT"
] |
permissive
|
jmcastagnetto/sunedu-licenciamiento
|
ca6d5c965d1981aea4048f73dc0112b0afc5b6d1
|
5e5ffadf1d37794f8b669627dce78aead6558c31
|
refs/heads/main
| 2023-02-12T00:53:25.380741
| 2021-01-14T21:40:16
| 2021-01-14T21:40:16
| 329,703,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
02-consolidate-after-openrefine.R
|
library(tidyverse)
raw_df <- read_csv("proc/sunedu-datos-combinados-crudo-csv-openrefine-editado.csv")
df <- raw_df %>%
mutate(
Estado = str_replace(Estado, "LICENCIADA", "LICENCIA OTORGADA"),
TIPO_GESTION = str_replace_all(TIPO_GESTION,
c("DO" = "DA", "CO$" = "CA")),
gestion = if_else(
is.na(`Tipo de Gestión`),
TIPO_GESTION,
`Tipo de Gestión`
),
departamento = if_else(
is.na(DEPARTAMENTO_LOCAL),
Región,
DEPARTAMENTO_LOCAL
)
) %>%
rename(
nota_estado_abril_2020 = ESTADO_LICENCIAMIENTO
) %>%
janitor::clean_names() %>%
rename(
provincia = provincia_local,
distrito = distrito_local,
nombre = universidad,
codigo = codigo_entidad,
latitud = latitud_ubicacion,
longitud = longitud_ubicacion
) %>%
select(
nombre,
codigo,
gestion,
departamento,
provincia,
distrito,
latitud,
longitud,
estado,
periodo_licenciamiento,
grupo,
nota_estado_abril_2020
)
write_csv(
df,
file = "datos/sunedu-licenciamiento-20210114.csv"
)
saveRDS(
df,
file = "datos/sunedu-licenciamiento-20210114.rds"
)
|
04dfe60641626021d9e9d4f4aff79741550715f7
|
702b9b9133319efabe3d78cfc54e51ed2da8e879
|
/in-class/8-3 Class Material/app08 ext- answer (for and geoms).R
|
528f2e5138b002b506eb0d0316c5ee3d3e45b104
|
[] |
no_license
|
QFCatMSU/GGPlot-Class-Material
|
b7280ad76fcbba6418741cea4d60bfbcfde2fd95
|
5dcb3ccabda5dbb2dfb04fe65d08edfd8642557b
|
refs/heads/master
| 2022-03-25T11:18:23.218395
| 2022-03-09T21:35:33
| 2022-03-09T21:35:33
| 238,790,531
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,419
|
r
|
app08 ext- answer (for and geoms).R
|
{
source( file="scripts/reference.R" );
library(reshape);
weatherData = read.csv( file="data/LansingNOAA2016-3.csv",
stringsAsFactors = FALSE );
# format the dateYr column as a date with just the abbreviated month (%b)
months = format.Date(weatherData$dateYr, format="%b");
weatherData$month = months; # save months to data frame as new column
# 3 vectors we need: months, sum of heatDays, sum of coolDays
month_unique = unique(months); # could also use month.abb
heatDays = rep(0, times=12);
coolDays = rep(0, times=12);
# sum up the heatDays and coolDays for each month
for(i in 1:nrow(weatherData)) # for each day (row)
{
for(j in 1:length(month_unique)) # for each month
{
# check if the month on the row is the same and the indexed month
if(weatherData$month[i] == month_unique[j])
{
# add the day's value to the total value
heatDays[j] = heatDays[j] + weatherData$heatDays[i];
coolDays[j] = coolDays[j] + weatherData$coolDays[i];
break; # break out of the for loop (don't need to check other months)
}
}
}
## create the data frames
# 3 columns: month, total heatdays, total cooldays
heatCoolByMonth = data.frame("month" = month_unique, heatDays, coolDays);
# melted df, 3 columns: month, heatdays/cooldays factor, total value
meltedDF = data.frame("month" = c(month_unique, month_unique),
"variable" = c(rep("heatDays",12), rep("coolDays", 12)),
"value" = c(heatDays, coolDays));
#### Four different plots -- same output
# 1) geom_col() with regular data frame
# 2) geom_col() with melted data frame
# 3) geom_bar() with regular data frame
# 4) geom_bar() with melted data frame
# geom_col method -- unmelted dataframe
thePlot = ggplot(heatCoolByMonth) +
# plot the two column separately, nudge them so they are not stacked
geom_col(mapping=aes(x=month, y=heatDays),
width=0.4,
fill= "red",
position=position_nudge(x=-0.2)) +
geom_col(mapping=aes(x=month, y=coolDays),
width=0.4,
fill= "blue",
position=position_nudge(x=0.2)) +
scale_x_discrete(limits = month.abb) +
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "geom_col() -- unmelted dataframe",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
# geom_col method -- melted dataframe
thePlot = ggplot(meltedDF) +
# plot values and fill with variable, use dodge so they do not stack
geom_col(mapping=aes(x=month, y=value, fill=variable),
width=0.4,
position=position_dodge()) +
scale_x_discrete(limits = month.abb) +
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "geom_col() -- melted dataframe",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
# geom_bar method -- unmelted dataframe
thePlot = ggplot(data=heatCoolByMonth) +
geom_bar(mapping=aes(x=month, y=heatDays),
stat="identity", # without this, geom_bar does a count
width=0.4,
fill="red",
position = position_nudge(x=0.2)) +
geom_bar(mapping=aes(x=month, y=coolDays),
stat="identity",
width=0.4,
fill="blue",
position = position_nudge(x=-0.2)) +
scale_x_discrete(limits = month.abb) +
scale_fill_manual(values=c("red", "blue")) +
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "geom_bar() -- unmelted dataframe",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
# geom_bar method -- melted dataframe
thePlot = ggplot(data=meltedDF) +
geom_bar(mapping=aes(x=month, y=value, fill=variable),
stat="identity", # without this, it will do a count (on X or Y)
width=0.5,
position=position_dodge()) + # without this, it will stack
scale_x_discrete(limits = month.abb) +
scale_fill_manual(values=c("red", "blue")) +
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "geom_bar() -- melted dataframe",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
}
|
0851f8aaa5474d67379bb611a1d689666b4fcfe3
|
3307c40d08b8d82f526a4ce69317b8b45562299a
|
/R/gdls.R
|
d9298e8d4bd10d8aae0349d2563b27018ba49d68
|
[] |
no_license
|
helixcn/cmna
|
d2f725124fa46879e87be947c3034dfd99b07339
|
4ea309bd2edac241e23c94ba4176b0724cf3b57f
|
refs/heads/master
| 2020-03-25T18:33:43.747394
| 2017-06-13T12:26:47
| 2017-06-13T12:26:47
| 144,037,107
| 1
| 0
| null | 2018-08-08T15:59:47
| 2018-08-08T15:59:47
| null |
UTF-8
|
R
| false
| false
| 2,604
|
r
|
gdls.R
|
## Copyright (c) 2016, James P. Howard, II <jh@jameshoward.us>
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#' @name gdls
#'
#' @title Least squares with graident descent
#'
#' @description
#' Solve least squares with graident descent
#'
#' @param A a square matrix representing the coefficients of a linear system
#' @param b a vector representing the right-hand side of the linear system
#' @param alpha the learning rate
#' @param tol the expected error tolerance
#' @param m the maximum number of iterations
#'
#' @details
#'
#' \code{gdls} solves a linear system using gradient descent.
#'
#' @return the modified matrix
#'
#' @family linear
#'
#' @examples
#' head(b <- iris$Sepal.Length)
#' head(A <- matrix(cbind(1, iris$Sepal.Width, iris$Petal.Length, iris$Petal.Width), ncol = 4))
#' gdls(A, b, alpha = 0.05, m = 10000)
#'
#' @export
gdls <- function(A, b, alpha = 0.05, tol = 1e-6, m = 1e5) {
iter <- 0
n <- ncol(A)
theta <- matrix(rep(0, n))
oldtheta = theta + 10 * tol
while(vecnorm(oldtheta - theta) > tol) {
if((iter <- iter + 1) > m) {
warning("iterations maximum exceeded")
return(theta)
}
e <- (A %*% theta - b)
d <- (t(A) %*% e) / length(b)
oldtheta <- theta
theta <- theta - alpha * d
}
return(theta)
}
|
619bb5d1403655d3a1fb1fa52b6e9792647ffbdc
|
dbad5cd8845d3972efea526ddee80b0a052716e4
|
/R_Start.r
|
e620166e92763b12682c22b59863480b0ccf28a3
|
[] |
no_license
|
zzeddo/work
|
6cb10e7adb871b2f530358c77e2b979c8378d093
|
a283a78b985ec784642ac32e27d2fb0f61ecf7cb
|
refs/heads/master
| 2021-01-21T12:53:32.714733
| 2016-05-22T08:51:30
| 2016-05-22T08:51:30
| 35,747,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,805
|
r
|
R_Start.r
|
#----------------
# <I-5>
#----------------
mean(abs(rnorm(100)))
rnorn(10)
setwd("d:\\Work")
pdf("aa.pdf")
hist(rnorm(100))
dev.off()
rnorm(10) # 표준정규 분포하에서 생성
data() # R에서 기본 제공하는 데이터
#----------------
# <I-8>
#----------------
help.start() # 도움말(공식 사이트)
help(seq) # seq 함수에 대한 도움말(수열 생성)
?seq # help(seq) 동일한 기능
help(abs)
RSiteSearch("lm") # Linear Model에 대하여
# History 기능
history()
setwd("C:\\Work")
savehistory(file = "history_150515.log")
loadhistory(file = "history_150515.log")
#----------------
# <I-8>
#----------------
help(rnorm)
rnorm(10) # random number 생성( mean = 0, sd = 1)
mean(abs(rnorm(100))) #
hist(rnorm(10)) # Histgram 표시
#----------
getwd() # Work Directory 가져옴
setwd("c:/Work") # Work Directory 설정
dir.create("c:/Work/Rtraining")
getwd()
setwd("c:/Work/Rtraining")
data() # R에서 기본적으로 제공하는 샘플데이터
BOD # R data에서 제공하는 생물학적 산소 요구량
BJsales # Sales Data with Leading Indicator
help(BJsales)
options() # 현재 작업 환경의 Option들
#----------------
# <I-11>
#----------------
mtcars
lm(mpg~wt, data=mtcars) # mtcars 예제 데이터의 wt(무게)가 출력(mpg)에 영향까지 선형 분석
fit <- lm(mpg~wt, data=mtcars) # 선형 분석 결과를 fit 변수에 대입
str(fit) # 분석과정를 구조체로 변환
help(str) # fit 라는 변수의 저장된 값 표시
#----------------
# <I-13>
#----------------
install.packages("vcd") # Visualizing Categorical Data
help(pakage="vcd") # 데이터를 저장하고 있는 팩키지
library(vcd) # 라이브러리 명려을 통한 팩키지 사용
help(Arthritis) # 관절염 임상 데이터
Arthritis
example(Arthritis) # 사용예
|
c95bfae8eaa35b7908d0c6a99d4f94ef8ced6128
|
e40c1a68216ba0bbec58448694a55b1267771b7a
|
/man/removeNonBin.Rd
|
f35b3d284bd855825cd98167a5378fa4ffb3d919
|
[] |
no_license
|
cran/motmot.2.0
|
a97817dee6def95a73e0a441bf54a54e7fa32267
|
6d279d2a3382011d8438e53dfa4eb39afc1c83c3
|
refs/heads/master
| 2021-05-12T16:02:16.675411
| 2018-01-10T17:51:44
| 2018-01-10T17:51:44
| 116,998,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 934
|
rd
|
removeNonBin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/removeNonBin.R
\name{removeNonBin}
\alias{removeNonBin}
\title{remove species occuring before time in the past (internal function)}
\usage{
removeNonBin(phy, traitData, keepByTime = 0, tol = 1e-08)
}
\arguments{
\item{phy}{An object of class "phylo" (see ape package).}
\item{traitData}{data associated with the species}
\item{keepByTime}{an age at which to keep preserve tips before the present (time = 0)}
\item{tol}{edge length precision in time cut (default = 1e-08)}
}
\value{
a list with the prunedPhylogeny and a prunedData
}
\description{
removes tips and lineages after a time in the past
}
\references{
Puttick, M. N., Kriwet, J., Wen, W., Hu, S., Thomas, G. H., & Benton, M. J. (2017). Body length of bony fishes was not a selective factor during the biggest mass extinction of all time. Palaeontology, 60, 727-741.
}
\author{
Mark Puttick
}
|
27ec75e9a1b02d091f1ac9ae9e75b91e34411e7e
|
c842ad249301396870d26039aea09fa7cebcb0f9
|
/R/func-NA.R
|
8e31621468431335130550a411cfe5af0d30cdd7
|
[] |
no_license
|
bkkkk/creditScorer
|
9517b2a4e1ef44f4a5b96f3598bba6959268e1f1
|
cb346ccb7fcc7e9d06e290c9f64c28f3a9e3193c
|
refs/heads/master
| 2021-07-07T18:07:00.957275
| 2020-06-19T04:05:59
| 2020-06-19T04:05:59
| 71,555,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,333
|
r
|
func-NA.R
|
#' Returns a vector of whether a column in a dataframe contains
#' NAs or not
#'
#' @param df data frame
#'
#' @return named vector with boolean values
#' @export
nacols <- function(df) {
unlist(lapply(df, function(x) any(is.na(x))))
}
#' Names of columns in dataframe which contain NA elements
#'
#' @param df a data frame
#'
#' @return character vector with column names which contain NAs
#' @export
nacols_name <- function(df) {
colnames(df)[nacols(df)]
}
#' Get percentage of items in vector which are NA
#'
#' @param col vector column
#'
#' @return numeric value of the percentage of items in col which are NA
#' @export
nacol_percent <- function(col) {
100 * sum(is.na(col)) / length(col)
}
#' Replace all NAs in x with a value
#'
#' @param x vector containing NAs
#' @param fill value with which to replace NAs
#'
#' @return original vector NAs substituted by fill value
#' @export
replace_na_with <- function(x, fill = 0) {
x[is.na(x)] <- fill
x
}
#' Replace certain values in vector with NA
#'
#' This can be used when dealing with encoded status values in
#' collected data.
#'
#' @param x vector
#' @param replace vector of values to be replaced with NA
#'
#' @return vector containing NAs instead of replaced values
#' @export
replace_with_na <- function(x, replace) {
ifelse(x %in% replace, NA, x)
}
|
16a6827aa5aee293e8d9d6cf5bd02cd7174dc608
|
5984ceda2dec1d78ab7dd7112085534c6493668d
|
/R/ls_param.R
|
7f73236ee9c799450130efe384e452ee64dde1a2
|
[] |
no_license
|
dpastoor/sinew
|
596bc547708925d8a4c2fc101eda0631089b38ae
|
cc363a508f009c22730c942e05eb006e153d5d61
|
refs/heads/master
| 2021-06-23T02:59:34.371814
| 2017-07-19T23:08:49
| 2017-07-19T23:08:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,344
|
r
|
ls_param.R
|
#' @title Return roxygen2 parameter calls from parameter dictionary
#' @description Return roxygen2 parameter calls from the intersection of the parameters listed in
#' the package dictionary and the formals of a function
#' @param obj function or name of function
#' @param dictionary character, path_to_dictionary, Default: 'roxygen-man/Dictionary-1.R'
#' @param print boolean print output to console, Default: TRUE
#' @return character vector
#' @examples
#' repo='https://raw.githubusercontent.com/metrumresearchgroup/sinew/master/'
#' dict_loc=file.path(repo,'man-roxygen/Dictionary-1.R')
#' ls_param(sinew::makeOxygen,dictionary=dict_loc)
#' @export
ls_param=function(obj,dictionary='man-roxygen/Dictionary-1.R',print=TRUE){
dictionary_lines=readLines(dictionary,warn = FALSE)
dictionary_lines=grep("#' @param ",dictionary_lines,value=TRUE)
dictionary_params=sapply(strsplit(gsub("#' @param ",'',dictionary_lines),' '),'[',1)
names(dictionary_lines)=dictionary_params
if(is.character(obj)) obj=eval(parse(text=obj))
nm=names(formals(obj))
out=dictionary_lines[intersect(nm,dictionary_params)]
if(print) cat(out,sep='\n')
out=mapply(function(nm,out) gsub(sprintf("^#' @param %s\\s+|, Default:.*$",nm),'',out),
nm=names(out),out=out)
#out=gsub("^#' @param(.*?),|, Default:.*$",'',out)
invisible(out)
}
|
b43dfe8b4d29f629d19b755c0cdc001e85bbc219
|
9d59520955aec6e0d572d737900da5464f9c1cc6
|
/man/print_file_loading_info.Rd
|
d5918080e953181f19eae820e3919cb6c6070284
|
[] |
no_license
|
LisaHopcroft/CTutils
|
f4d40692364431d8a4c8183f8faf712564c83790
|
f8c052f3e4d54038da644446fb3d8909cf64df49
|
refs/heads/master
| 2023-09-01T15:43:54.850949
| 2021-10-04T07:35:36
| 2021-10-04T07:35:36
| 294,407,600
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 400
|
rd
|
print_file_loading_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_file_loading_info.R
\name{print_file_loading_info}
\alias{print_file_loading_info}
\title{Print information about which files were loaded.}
\usage{
print_file_loading_info(file_list)
}
\arguments{
\item{file_list}{A list of files that were loaded.}
}
\description{
Print information about which files were loaded.
}
|
95359142d4dc588339bd70be5f6059bed622d092
|
fb7655e2bcfc5ee8c228eed0684e7516eee432f8
|
/02_build/03e_elevation_ruggedness_block.R
|
3f6b2d79fe558919cfe4a52b4dd78a81a472b8d3
|
[] |
no_license
|
galsk223/tribalclimate
|
738e7ea2e4c74b142d84f3e00f4eb7575e8f89dd
|
bced46be1953ae06b54a1b7a9bda48523b98fff8
|
refs/heads/main
| 2023-07-24T07:12:27.477008
| 2021-08-30T20:16:10
| 2021-08-30T20:16:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,835
|
r
|
03e_elevation_ruggedness_block.R
|
setwd("/home/sambk223/tribalclimate")
library(raster)
library(tidyverse)
library(data.table)
library(conflicted)
library(lubridate)
library(sf)
library(tigris)
library(RPostgreSQL)
library(dbplyr)
library(RcppRoll)
library(scales)
library(broom)
library(dotwhisker)
library(janitor)
library(directlabels)
library(googledrive)
library(cowplot)
library(plotly)
library(rmarkdown)
library(readxl)
library(SafeGraphR)
library(vroom)
library(ncdf4)
library(furrr)
library(elevatr)
conflict_prefer("filter", "dplyr")
conflict_prefer("between", "dplyr")
conflict_prefer("last", "dplyr")
conflict_prefer("lag", "dplyr")
conflict_prefer("select", "dplyr")
conflict_prefer("year", "lubridate")
conflict_prefer("ggsave", "ggplot2")
Sys.setenv(RSTUDIO_PANDOC="/usr/lib/rstudio-server/bin/pandoc")
rm(list = ls())
if(!dir.exists("01_data/cache/e_elevrugg_blocks")){
dir.create("01_data/cache/e_elevrugg_blocks")
}
if(!dir.exists("01_data/clean/e_elevrugg_blocks")){
dir.create("01_data/clean/e_elevrugg_blocks")
}
tribedf <- read_rds("01_data/cache/tribe_shapefiles_micro.rds") %>%
filter(!state %in% c("02","15"))
#Begin loop over counties
done <- tibble(files = list.files("01_data/cache/e_elevrugg_blocks/")) %>%
mutate(GEOID_county = str_sub(files,1,-5))
tribedo <- tribedf %>%
filter(!GEOID_county %in% done$GEOID_county) %>%
group_split(GEOID_county)
# t <- unique(tribedo$GEOID_county)[1]
if (length(tribedo)>0){
plan(multisession, workers = 4)
future_map(tribedo,function(t){
#Generate DEM for county
elev.raster <- get_elev_raster(t,z=9,expand = 1)
names(elev.raster) <- "elevation"
#Calculating topographic information and appending to elevation
topo.raster <- stack(elev.raster,terrain(elev.raster,opt = c("slope","aspect","TRI")))
#Extract elevation data
elev.temp <- terra::extract(topo.raster,
t)
write_rds(elev.temp,paste0("01_data/cache/e_elevrugg_blocks/",unique(t$GEOID_county),".rds"))
})
plan(sequential)
gc()
}
fl_cache <- list.files("01_data/cache/e_elevrugg_blocks", full.names = T)
# f <- fl_cache[10]
# et <- 1
map(fl_cache,function(f){
elev.temp <- read_rds(f)
t <- str_sub(f,-9,-5)
geodf <- tribedf %>%
filter(GEOID_county == t)
out <- map_dfr(1:length(elev.temp),function(et){
r <- as_tibble(elev.temp[[et]]) %>%
summarize_all(funs(mean=mean(.,na.rm=T),
sd=sd(.,na.rm=T))) %>%
mutate(GEOID10 = geodf$GEOID10[et])
})
write_rds(out, paste0("01_data/clean/e_elevrugg_blocks/",t,".rds"))
})
fl_final <- list.files("01_data/clean/e_elevrugg_blocks/", full.names = T)
wire.save <- map_dfr(fl_final,function(f){
r <- read_rds(f)
})
write_rds(wire.save,"01_data/clean/e_elevrugg_blocks.rds")
|
5feede52219dfe5bf9f5f273139b7e86b66bd57d
|
f95aadca2d622ccfdd593396a91bae330931f81d
|
/negbin/code/analyze_result.R
|
b4048becb3de6f2118c921db20c22fc098e01b7a
|
[] |
no_license
|
blayes/location-scatter-wasp
|
9948c14e188db5a098a0cb7b6b46fa9c84d2043a
|
04f9ac0cf100fc1809abe18b5876278e1a3944a9
|
refs/heads/master
| 2022-12-01T17:17:07.275272
| 2020-08-09T17:08:49
| 2020-08-09T17:08:49
| 286,288,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,309
|
r
|
analyze_result.R
|
setwd("~/negbin/code/")
rm(list = ls())
library(expm)
library(matrixStats)
library(xtable)
calcAccuracy <- function (fullSamps, approxSamps) {
library(expm)
fullMean <- colMeans(fullSamps)
approxMean <- colMeans(approxSamps)
fullCov <- cov(fullSamps)
approxCov <- cov(approxSamps)
trm1 <- sum((fullMean - approxMean)^2)
trm2 <- sum(abs(diag(fullCov + approxCov - 2 * (sqrtm(sqrtm(approxCov) %*% fullCov %*% sqrtm(approxCov))))))
sqrt(trm1 + trm2)
}
acc <- array(NA, dim = c(2, 2, 10, 2, 2),
dimnames = list(paste0("n", 1:2),
paste0("k", 1:2),
paste0("cv", 1:10),
paste0("dim", 1:2),
c("wasp", "xl")
))
eff <- array(NA, dim = c(2, 2, 10, 2, 2),
dimnames = list(paste0("n", 1:2),
paste0("k", 1:2),
paste0("cv", 1:10),
paste0("dim", 1:2),
c("wasp", "xl")
))
for (cc in 1:10) {
for (pp in 1:2) {
fres <- readRDS(paste0("/Shared/ssrivastva/negbin/result/full/rep_", cc, "_ndim_", pp, "_n_1e5.rds"))
fbeta <- do.call(cbind, fres$res)
ftime <- fres$time
for (kk in 1:2) {
npart <- c(20, 50)[kk]
fname <- paste0("/Shared/ssrivastva/negbin/result/subs/comb_rep_", cc, "_ndim_", pp, "_k_", npart, "_n_1e5.rds")
pres <- readRDS(fname)
pt <- rep(NA, npart)
for (tt in 1:npart) {
tmp <- readRDS(paste0("/Shared/ssrivastva/negbin/result/subs/rep_", cc, "_ndim_", pp, "_nsub_", tt, "_k_", npart, "_n_1e5.rds"))
pt[tt] <- tmp$time
}
acc[1, kk, cc, pp, "wasp"] <- calcAccuracy(fbeta, pres$res$wasp)
acc[1, kk, cc, pp, "xl"] <- calcAccuracy(fbeta, pres$res$xueLiang)
eff[1, kk, cc, pp, "wasp"] <- ftime / (mean(pt) + pres$res$wTime)
eff[1, kk, cc, pp, "xl"] <- ftime / (mean(pt) + pres$res$xlTime)
}
}
}
for (cc in 1:10) {
for (pp in 1:2) {
fres <- readRDS(paste0("/Shared/ssrivastva/negbin/result/full/rep_", cc, "_ndim_", pp, "_n_1e4.rds"))
fbeta <- do.call(cbind, fres$res)
ftime <- fres$time
for (kk in 1:2) {
npart <- c(20, 50)[kk]
fname <- paste0("/Shared/ssrivastva/negbin/result/subs/comb_rep_", cc, "_ndim_", pp, "_k_", npart, "_n_1e4.rds")
pres <- readRDS(fname)
pt <- rep(NA, npart)
for (tt in 1:npart) {
tmp <- readRDS(paste0("/Shared/ssrivastva/negbin/result/subs/rep_", cc, "_ndim_", pp, "_nsub_", tt, "_k_", npart, "_n_1e4.rds"))
pt[tt] <- tmp$time
}
acc[2, kk, cc, pp, "wasp"] <- calcAccuracy(fbeta, pres$res$wasp)
acc[2, kk, cc, pp, "xl"] <- calcAccuracy(fbeta, pres$res$xueLiang)
eff[2, kk, cc, pp, "wasp"] <- ftime / (mean(pt) + pres$res$wTime)
eff[2, kk, cc, pp, "xl"] <- ftime / (mean(pt) + pres$res$xlTime)
}
}
}
accSumm <- round(apply(acc, c(1, 2, 4, 5), mean), 4)
effSumm <- round(apply(eff, c(1, 2, 4, 5), mean), 4)
xtable(format(
rbind(c(c(accSumm[2, 1, 1, "wasp"], accSumm[2, 1, 1, "xl"], accSumm[2, 1, 2, "wasp"], accSumm[2, 1, 2, "xl"]),
c(accSumm[2, 2, 1, "wasp"], accSumm[2, 2, 1, "xl"], accSumm[2, 2, 2, "wasp"], accSumm[2, 2, 2, "xl"])),
c(c(accSumm[1, 1, 1, "wasp"], accSumm[1, 1, 1, "xl"], accSumm[1, 1, 2, "wasp"], accSumm[1, 1, 2, "xl"]),
c(accSumm[1, 2, 1, "wasp"], accSumm[1, 2, 1, "xl"], accSumm[1, 2, 2, "wasp"], accSumm[1, 2, 2, "xl"])
)
), nsmall = 4
)
)
xtable(format(
rbind(c(c(effSumm[2, 1, 1, "wasp"], effSumm[2, 1, 1, "xl"], effSumm[2, 1, 2, "wasp"], effSumm[2, 1, 2, "xl"]),
c(effSumm[2, 2, 1, "wasp"], effSumm[2, 2, 1, "xl"], effSumm[2, 2, 2, "wasp"], effSumm[2, 2, 2, "xl"])),
c(c(effSumm[1, 1, 1, "wasp"], effSumm[1, 1, 1, "xl"], effSumm[1, 1, 2, "wasp"], effSumm[1, 1, 2, "xl"]),
c(effSumm[1, 2, 1, "wasp"], effSumm[1, 2, 1, "xl"], effSumm[1, 2, 2, "wasp"], effSumm[1, 2, 2, "xl"])
)
), nsmall = 4
)
)
|
ad429c7bf1a5d88ee4b7db40570ba13684707f93
|
5e2fba91bcb5074f731160ad6771552d8f6e2c67
|
/WorkingWithDates.R
|
9a055253e2155454103de336049c4e167d2244a7
|
[] |
no_license
|
TheMrCSC/GettingAndCleaningData-Wk4
|
11b052ec3e230fae3e4e21db07c5a224327c0219
|
28dcc0cba4596381b5018f4bce81a8a9db509bd5
|
refs/heads/master
| 2022-04-28T03:19:01.886009
| 2020-04-13T16:46:08
| 2020-04-13T16:46:08
| 255,264,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
r
|
WorkingWithDates.R
|
#starting simple
d1 = date()
class(d1)
#Date class
d2 = Sys.Date()
class(d2)
#formatting dates
format(d2, "%a %b %d %Y")
#creating dates
x = c("1jan1960","2jan1960", "31mar1960", "30jul1960")
z = as.Date(x, "%d%b%Y")
z[1]-z[2]
as.numeric((z[1]-z[2]))
#converting to julian
weekdays(d2)
months(d2)
julian(d2)
#lubridate
install.packages("lubridate")
library(lubridate)
ymd("20140108")
mdy("08/04/2013")
dmy("03/04/2018")
ymd_hms("2011-08-03 10:15:30")
ymd_hms("2011-08-03 10:15:30", tz= "Pacific/Auckland")
x = dmy(c("1jan2013", "2jan2013", "31mar2013","30jul2013"))
wday(x[1])
wday(x[1], label = TRUE)
|
df1bbbe8d146a3646234ddc8f367ef7333865973
|
c1034eb8f34b18105acf3244bf9a0b0339d6ca8d
|
/man/getVIPScores.pls.Rd
|
87ad047813e0a94651eda8eba08b93997ae836a8
|
[
"MIT"
] |
permissive
|
svkucheryavski/mdatools
|
f8d4eafbb34d57283ee753eceea1584aed6da3b9
|
2e3d262e8ac272c254325a0a56e067ebf02beb59
|
refs/heads/master
| 2023-08-17T16:11:14.122769
| 2023-08-12T16:58:49
| 2023-08-12T16:58:49
| 11,718,739
| 31
| 11
|
NOASSERTION
| 2020-07-23T18:50:22
| 2013-07-28T11:10:36
|
R
|
UTF-8
|
R
| false
| true
| 586
|
rd
|
getVIPScores.pls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pls.R
\name{getVIPScores.pls}
\alias{getVIPScores.pls}
\title{VIP scores for PLS model}
\usage{
\method{getVIPScores}{pls}(obj, ncomp = obj$ncomp.selected, ...)
}
\arguments{
\item{obj}{a PLS model (object of class \code{pls})}
\item{ncomp}{number of components to count}
\item{...}{other parameters}
}
\value{
matrix \code{nvar x 1} with VIP score values
}
\description{
Returns vector with VIP scores values. This function is a proxy for \code{\link{vipscores}}
and will be removed in future releases.
}
|
710866b2ad2f0f34693b2589679b019fcd13a3e4
|
6ae53579a4fb61221365e75ee31c9b2f75369c98
|
/server.R
|
5bf227cf878588b86d2ed9377105eed780d4d065
|
[] |
no_license
|
kaiqiong/predictionShinyApp
|
8139aa8de7a91661d5af0c81bd9f5e875153be3f
|
a0014755c67087575041b0830bf1376ebf6afa4b
|
refs/heads/master
| 2023-01-24T11:37:06.673063
| 2020-12-05T01:47:58
| 2020-12-05T01:47:58
| 318,674,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,064
|
r
|
server.R
|
library(ggeffects)
library(sjPlot)
# pacman::p_load(sjPlot)
# pacman::p_load(cowplot)
library(ggplot2)
#library(plogr)
fit <- readRDS("data/Pred_Model.rds")
#load packages and data, although i could be missing some
shinyServer(function(input, output, session) {
predicted_prob <- reactive({
req(input$Age, input$Length_Of_Stay, input$Operative_Time)
pred_prob_all <- ggpredict(fit, terms = "Age [18:95]", condition = c( DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing, Length_Of_Stay = input$Length_Of_Stay,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class, Operative_Time = input$Operative_Time))
pred_prob_all[which(pred_prob_all$x == input$Age),, drop = FALSE]
})
predicted_prob_los <- reactive({
req(input$Age, input$Length_Of_Stay, input$Operative_Time)
pred_prob_all_los <- ggpredict(fit, terms = "Length_Of_Stay [1:395]", condition = c(Age = input$Age, DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class, Operative_Time = input$Operative_Time))
pred_prob_all_los[which(pred_prob_all_los$x == input$Length_Of_Stay),, drop = FALSE]
})
predicted_prob_opt <- reactive({
req(input$Age, input$Length_Of_Stay, input$Operative_Time)
pred_prob_all_opt <- ggpredict(fit, terms = "Operative_Time[5:700]", condition = c(Age = input$Age, DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing, Length_Of_Stay = input$Length_Of_Stay,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class))
pred_prob_all_opt[which(pred_prob_all_opt$x == input$Operative_Time),, drop = FALSE]
})
plot_data <- reactive({
res <- ggpredict(fit, terms = "Age [all]", condition = c( DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing, Length_Of_Stay = input$Length_Of_Stay,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class, Operative_Time = input$Operative_Time))
res
})
plot_data_los <- reactive({
res <- ggpredict(fit, terms = "Length_Of_Stay [all]", condition = c( DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing, Age = input$Age,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class, Operative_Time = input$Operative_Time))
res
})
plot_data_opt <- reactive({
res <- ggpredict(fit, terms = "Operative_Time [all]", condition = c( DIABETES= input$DIABETES, Hypertension_On_Medication = input$Hypertension_On_Medication,
SEX = input$SEX, SMOKing = input$SMOKing, Length_Of_Stay = input$Length_Of_Stay,
Major_Morbidity = input$Major_Morbidity, Elective_Surgery = input$Elective_Surgery, Resection = input$Resection,
Disease = input$Disease, ASA_Class = input$ASA_Class, Age = input$Age))
res
})
# Generate an HTML table view of the data ----
output$table <- renderTable({
#
sjPlot::tab_model(fit)
# pander::pander(summary(fit)$coefficients, split.table = Inf)
})
output$print_pred = renderPrint({
sprintf("Probability of readmission: %.2f,\n 95%% Confidence Interval: [%.2f, %.2f]",
predicted_prob()[,"predicted"],predicted_prob()[,"conf.low"],predicted_prob()[,"conf.high"])
# predicted_prob()
# predicted_prob[,"prob"]
# input$preopventilat
})
output$prob1 <- renderText({
paste("Probability of Readmission:",round(predicted_prob()[,"predicted"],3))
})
output$CI <- renderText({
paste("95% Confidence Interval: [",round(predicted_prob()[,"conf.low"],3), ", ",
round(predicted_prob()[,"conf.high"],3) ,"]")
})
output$plot <- renderPlot({
trop <- c("darkorange", "dodgerblue", "hotpink" , "limegreen" , "yellow")
par(mfrow = c(3, 1))
par(mai=c(0.65,0.9,0.1,0.1))
par(oma = c(4, 1, 1, 1))
plot(plot_data()[,"x"], plot_data()[,"predicted"], lwd = 4, type = "l", ylab = "Probability of Readmission", xlab = "Age", col = trop[2],
bty="n", xaxt="n", cex.lab = 2.4, xlim = c(20,100), ylim = range(plot_data()[,c("predicted","conf.low","conf.high")]), cex.axis = 2)
axis(1, labels = T, at = seq(20,90,10),cex.axis=2)
lines(plot_data()[,"x"], plot_data()[,"conf.low"], lty = 2, col = "grey", lwd=3)
lines(plot_data()[,"x"], plot_data()[,"conf.high"], lty = 2, col = "grey", lwd=3)
points(x = input$Age,y =predicted_prob()[,"predicted"], pch = 19, col = "red", cex = 2)
plot(plot_data_los()[,"x"], plot_data_los()[,"predicted"], lwd = 4, type = "l", ylab = "Probability of Readmission", xlab = "Length of Stay", col = trop[2],
bty="n", xaxt="n", cex.lab = 2.4, xlim = c(0,80), ylim = range(plot_data_los()[,c("predicted","conf.low","conf.high")]), cex.axis = 2)
axis(1, labels = T, at = seq(1,400,10), cex.axis=2)
lines(plot_data_los()[,"x"], plot_data_los()[,"conf.low"], lty = 2, col = "grey", lwd=3)
lines(plot_data_los()[,"x"], plot_data_los()[,"conf.high"], lty = 2, col = "grey", lwd=3)
points(x = input$Length_Of_Stay,y =predicted_prob_los()[,"predicted"], pch = 19, col = 'red', cex = 2)
plot(plot_data_opt()[,"x"], plot_data_opt()[,"predicted"], lwd = 4, type = "l", ylab = "Probability of Readmission", xlab = "Operation Time", col = trop[2],
bty="n", xaxt="n", cex.lab = 2.4, xlim = c(0,700), ylim = range(plot_data_opt()[,c("predicted","conf.low","conf.high")]), cex.axis = 2)
axis(1, labels = T, at = seq(0,700,50), cex.axis=2)
lines(plot_data_opt()[,"x"], plot_data_opt()[,"conf.low"], lty = 2, col = "grey", lwd=3)
lines(plot_data_opt()[,"x"], plot_data_opt()[,"conf.high"], lty = 2, col = "grey", lwd=3)
points(x = input$Operative_Time,y =predicted_prob_opt()[,"predicted"], pch = 19, col = "red", cex = 2)
# trop <- c("darkorange", "dodgerblue", "hotpink" , "limegreen" , "yellow")
#par(mai=c(0.85,0.9,0.1,0.2))
#par(oma = c(4, 1, 1, 1))
#plot(plot_data()[,"numage"], plot_data()[,"prob"], lwd = 4, type = "l", ylab = "Probability of Death", xlab = "Age", col = trop[2],
# bty="n", xaxt="n", cex.lab = 1.4, xlim = c(20,100), ylim = range(plot_data()[,c("prob","lower","upper")]))
# axis(1, labels = T, at = seq(20,90,10))
# lines(plot_data()[,"numage"], plot_data()[,"lower"], lty = 2, col = "grey")
# lines(plot_data()[,"numage"], plot_data()[,"upper"], lty = 2, col = "grey")
# points(x = input$numage,y = predicted_prob()[,"prob"], pch = 19, col = "red", cex = 2)
#plot(plot_data(), grid=F) +
# theme(legend.position = "bottom",title = element_text(size = 20),
# axis.text.x = element_text(angle = 0, hjust = 1, size = 16),
# axis.text.y = element_text(size = 16),
# legend.text = element_text(size = 16), legend.title = element_text(size = 16),
# strip.text = element_text(size = 18)) +
# labs(x = "Age", y = " ")
}, height = 900, width = 600)
})
|
92341e8921ebe90a5290b6835d6378ef45c34327
|
ca8dd4c043368c43cc42aafcba8bf8f3a6ac77b5
|
/R/byrnes.R
|
77b6f1c442ff28afd2b4861e277214d27220125c
|
[] |
no_license
|
gudaleon/semeco
|
47a01d82df04b88a740e4d4c9195e5aea7632292
|
1b2491ffe83c18303d8df91b1628ffecf0479d7d
|
refs/heads/master
| 2020-03-20T05:34:54.292407
| 2016-09-23T11:09:49
| 2016-09-23T11:09:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
byrnes.R
|
#' Effects of storm frequency on kelp forest community structure and food web
#' complexity
#'
#' @docType data
#' @format Model specification and data frame with 253 observations of 15 variables
#' @aliases byrnes byrnes.model
#' @usage
#' data(byrnes)
#' byrnes
#' byrnes.model
#' @name byrnes
#' @source
#' Article:
#'
#' \url{http://onlinelibrary.wiley.com/doi/10.1111/2041-210X.12512/abstract}
#' @references
#' Byrnes J. et al. 2011. Climate-driven increases in storm frequency simplify
#' kelp forest food webs.
#'
#' @examples
#' \dontrun{
#' data(byrnes)
#' summary(sem(byrnes.model, byrnes, estimator = "MLM"), standardized = TRUE)
#' }
|
90a6f23ca9b8808b399be7c49b1b96aabe6c0846
|
8bcdf49de7b0a84de66ab21d4e05d1be5a8e1a95
|
/SWing MCD CA.R
|
cc37d29485abfd832cff341fb765b3e28cb178c6
|
[] |
no_license
|
crystalptacek/SouthPavilionWing
|
89a467d439291d701297688cadcff027e78ca6c9
|
7be59db5b6da0ebbce44d30c2ac85386bd956c86
|
refs/heads/master
| 2020-04-20T21:18:13.557140
| 2020-02-03T14:13:52
| 2020-02-03T14:13:52
| 169,105,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,973
|
r
|
SWing MCD CA.R
|
# wareTypeCAandMCD.R
# Establish a DBI connection to DAACS PostgreSQL database and submnit SQL queries
# Created by: FDN 8.5.2014
# Previous update: EAB 3.24.2015 To add MCDs and TPQs by Phase
# Last update: LAB 9.5.2017 to add List of Contexts with Phase Assignments for database updates
# Updated to South Wing CLP 10.20.2018
setwd("P:/Reports/South Pavilion and South Wing/R Code")
#load the library
require(RPostgreSQL)
library(plyr)
library(dplyr)
source('credentials.R')
# get the table with the ware type date ranges
MCDTypeTable<- dbGetQuery(DRCcon,'
SELECT *
FROM "tblCeramicWare"
')
# submit a SQL query: note the use of \ as an escape sequence
# note the LEFT JOIN on the Feature table retains non-feature contexts
#Fill in your appropriate projectID
wareTypeData<-dbGetQuery(DRCcon,'
SELECT
"public"."tblCeramic"."Quantity",
"public"."tblCeramicWare"."Ware",
"public"."tblCeramicWare"."BeginDate",
"public"."tblCeramicWare"."EndDate",
"public"."tblContextFeatureType"."FeatureType",
"public"."tblCeramicGenre"."CeramicGenre",
"public"."tblContext"."QuadratID",
"public"."tblContext"."ProjectID",
"public"."tblContext"."Context",
"public"."tblContextDepositType"."DepositType",
"public"."tblContext"."DAACSStratigraphicGroup",
"public"."tblContext"."FeatureNumber"
FROM
"public"."tblContext"
INNER JOIN "public"."tblContextSample" ON "public"."tblContextSample"."ContextAutoID" = "public"."tblContext"."ContextAutoID"
INNER JOIN "public"."tblGenerateContextArtifactID" ON "public"."tblContextSample"."ContextSampleID" = "public"."tblGenerateContextArtifactID"."ContextSampleID"
LEFT JOIN "public"."tblContextDepositType" ON "public"."tblContext"."DepositTypeID" = "public"."tblContextDepositType"."DepositTypeID"
INNER JOIN "public"."tblCeramic" ON "public"."tblCeramic"."GenerateContextArtifactID" = "public"."tblGenerateContextArtifactID"."GenerateContextArtifactID"
INNER JOIN "public"."tblCeramicWare" ON "public"."tblCeramic"."WareID" = "public"."tblCeramicWare"."WareID"
LEFT JOIN "public"."tblContextFeatureType" ON "public"."tblContext"."FeatureTypeID" = "public"."tblContextFeatureType"."FeatureTypeID"
LEFT JOIN "public"."tblCeramicGenre" ON "public"."tblCeramic"."CeramicGenreID" = "public"."tblCeramicGenre"."CeramicGenreID"
WHERE
"public"."tblContext"."ProjectID" = \'68\'
')
#Remove contexts with deposit type cleanup and surface collection
#wareTypeData <- subset(csr1410, ! csr1410$DepositType %in% c('Clean-Up/Out-of-Stratigraphic Context',
# 'Surface Collection'))
# Section 2:Create the UNIT Variable ######################
# This is the level at which assemblages are aggregated
# in the analysis
# compute new numeric variables from original ones, which we will need to compute the MCDs
MCDTypeTable<-within(MCDTypeTable, { # Notice that multiple vars can be changed
midPoint <- (EndDate+BeginDate)/2
span <- EndDate - BeginDate
inverseVar <- 1/(span/6)^2
})
# let's see what we have for ware types and counts
#help(aggregate)
require(plyr)
summary2<-ddply(wareTypeData, .(Ware), summarise, Count=sum(Quantity))
summary2
# Now we do some ware type recoding if necessary
# For example if "American Stoneware" is William Rogers, we might recode it as "Fulham Type"
# wareTypeData$Ware[wareTypeData$Ware =='American Stoneware'] <- 'Fulham Type'
# get rid of types with no dates
typesWithNoDates <- MCDTypeTable$Ware[(is.na(MCDTypeTable$midPoint))]
wareTypeDataY<- wareTypeData[!wareTypeData$Ware %in% typesWithNoDates,]
# Take out anamolous contexts. Can do here or in line 156.
# wareTypeData<- subset(wareTypeData, ! wareTypeData$ContextID %in%
# c('67-2589B'))
#wareTypeData1<- subset(wareTypeData, ! wareTypeData$FeatureNumber %in%
# c('F01', 'F03'))
#Take out utility lines
# wareTypeData<- subset(wareTypeData, ! wareTypeData$FeatureType %in%
# c('Trench, utility'))
#Replace blanks in SG and Feature Number to NA
wareTypeData1 <-
mutate(wareTypeDataY, unit=ifelse((FeatureNumber == '' & DAACSStratigraphicGroup == ''),
paste(Context),
ifelse((FeatureNumber != '' & DAACSStratigraphicGroup == ''),
paste(Context, FeatureNumber),
ifelse((FeatureNumber == '' & DAACSStratigraphicGroup != ''),
paste(DAACSStratigraphicGroup),
ifelse((FeatureNumber != '' & DAACSStratigraphicGroup != ''),
paste(FeatureNumber, DAACSStratigraphicGroup),
paste(Context)
)))))
#Removing ware types with less than 5 sherds total. Will also do below in line 153.
#Can do it here or down further.
#wareTypeData2<- subset(wareTypeData1, ! wareTypeData1$Ware %in% c('Astbury Type',
# 'Black Basalt',
# 'Bristol Glaze Stoneware'))
# 'Buckley-type',
# 'Canary Ware'))
## Section 3:Transpose the Data ######################
# lets get a data frame with contexts as rows and type as cols, with the
# entries as counts
WareByUnit <- ddply(wareTypeData1, .(unit, Ware), summarise, Count=sum(Quantity))
# now we transpose the data so that we end up with a context (rows) x type
# (cols) data matrix; unit ~ ware formula syntax, left side = row, right side = column, to fill in
# body of table with the counts, fill rest with zeros
require(reshape2)
WareByUnitT <- dcast(WareByUnit, unit ~ Ware, value.var='Count', fill=0 )
# lets compute the totals for each context i.e. row
# Note the use of column numbers as index values to get the type counts, which are
# assumed to start iin col 2.
WareByUnitTTotals<- rowSums(WareByUnitT[,2:ncol(WareByUnitT)])
# OK now let's get rid of all the rows where totals are <= 5
WareByUnitT0 <-WareByUnitT[WareByUnitTTotals>5,]
#delete any outliers
WareByUnitT1 <-subset(WareByUnitT0, !WareByUnitT0$unit %in% c('F11 SG18'))
#Ok now let's get rid of all the columns (ware types) where totals < 0
#WareByUnitT2<-WareByUnitT0[, colSums(WareByUnitT1 != 0) > 0]
WareByUnitT2<-WareByUnitT1[, colSums(WareByUnitT1 != 0) > 0]
##Section 4: Define an MCD function and Function to Remove Types w/o Dates ######################
# now we build a function that computes MCDs
# two arguments: 1. unitData: a dataframe with the counts of ware types in units. We assume
# the left variable IDs the units, while the rest of the varaibles are types
# 2. typeData: a dataframe with at least two variables named 'midPoint' and 'inversevar'
# containing the manufacturing midpoints and inverse variances for the types.
# retruns a list comprise of two dataframes:
# MCDs has units and the vanilla and BLUE MCDs
# midPoints has the types and manufacturing midpoints, in the order they appeaed in the input
# unitData dataframe.
EstimateMCD<- function(unitData,typeData){
#for debugging
#unitData<- WareByUnitT1
#typeData <-mcdTypes
countMatrix<- as.matrix(unitData[,2:ncol(unitData)])
unitNames <- (unitData[,1])
nUnits <- nrow(unitData)
nTypes<- nrow(typeData)
nTypesFnd <-ncol(countMatrix)
typeNames<- colnames(countMatrix)
# create two col vectors to hold inverse variances and midpoints
# _in the order in which the type variables occur in the data_.
invVar<-matrix(data=0,nrow=nTypesFnd, ncol=1)
mPoint <- matrix(data=0,nrow=nTypesFnd, ncol=1)
for (i in (1:nTypes)){
for (j in (1:nTypesFnd)){
if (typeData$Ware[i]==typeNames[j]) {
invVar[j,]<-typeData$inverseVar[i]
mPoint[j,] <-typeData$midPoint[i]
}
}
}
# replace NAs for types with no dates with 0s -- so they do not count
# compute the blue MCDs
# get a unit by type matrix of inverse variances
invVarMat<-matrix(t(invVar),nUnits,nTypesFnd, byrow=T)
# a matrix of weights
blueWtMat<- countMatrix * invVarMat
# sums of the weight
sumBlueWts <- rowSums(blueWtMat)
# the BLUE MCDs
blueMCD<-(blueWtMat %*% mPoint) / sumBlueWts
# compute the vanilla MCDs
sumWts <- rowSums(countMatrix)
# the vanilla MCDs
MCD<-(countMatrix %*% mPoint) / sumWts
# now for the TPQs
meltedUnitData<- melt(unitData, id.vars='unit', variable.name = 'Ware', value.name='count')
meltedUnitData <- subset(meltedUnitData, count > 0)
mergedUnitData <- merge(x = meltedUnitData, y = typeData, by.x='Ware', by.y='Ware')
# the trick is that to figure out the tpq it's best to have each record (row) represent an individual sherd
# but in its current state, each record has a count that is likely more than 1 so it's necessary to break them up
# use rep and rownames - rowname is a unique number for each row, kind of link an index
# rep goes through dataframe mergedUnitData and replicates based on the count column, i.e. if count is
# 5 it will create 5 records or rows and only replicates columns 2 and 6 (2 is unit name and 6 is begin date)
repUnitData <- mergedUnitData[rep(rownames(mergedUnitData),mergedUnitData$count),c(2,6)]
#once all the rows have a count of one, then can run the quantile function
TPQ <- tapply(repUnitData$BeginDate,repUnitData$unit,
function(x) quantile(x, probs =1.0, type=3 ))
TPQp95 <- tapply(repUnitData$BeginDate,repUnitData$unit,
function(x) quantile(x, probs = .95 , type=3 ))
TPQp90 <- tapply(repUnitData$BeginDate,repUnitData$unit,
function(x) quantile(x, probs = .90, , type=3 ))
# Finally we assemble the results in to a list
MCDs<-data.frame(unitNames,MCD,blueMCD, TPQ, TPQp95, TPQp90, sumWts )
colnames(MCDs)<- c('unit','MCD','blueMCD', 'TPQ', 'TPQp95', 'TPQp90', 'Count')
midPoints <- data.frame(typeNames,mPoint)
MCDs <- list('MCDs'=MCDs,'midPoints'=midPoints)
return(MCDs)
}
#end of function EstimateMCD
# apply the function
MCDByUnit<-EstimateMCD(WareByUnitT2,MCDTypeTable)
MCDByUnit
# a function to sort the rows and cols of a matrix based on the
# orders from two arguments (e.g. MCDs and midpoints)
# arguments: the name of the variable that contains the unit scores (e.g. MCDs)
# the name of the variable that contains the type score (e.g. the midpoints)
# the name of the dataframe that contains the counts of ware types in units
# returns: the sorted dataframe
sortData<- function(unitScores,typeScores,unitData){
#unitScores<-U3MCDByUnit$MCDs$blueMCD
#typeScores<-U3MCDByUnit$midPoints$mPoint
#unitData<- U3WareByUnitT1
sortedData<-unitData[order(unitScores),]
sortedData<-sortedData[,c(1,order(typeScores)+1)]
return(sortedData)
}
# apply the function
WareByUnitT2Sorted<-sortData(MCDByUnit$MCDs$blueMCD,
MCDByUnit$midPoints$mPoint,
WareByUnitT2)
WareByUnitT2Sorted
# now we prep the sorted dataframe to make a Bertin plot
# convert to a matrix, whose cols are the counts
# make the unit name a 'rowname" of the matrix
Mat<-as.matrix(WareByUnitT2Sorted[,2:ncol(WareByUnitT2Sorted)])
rownames(Mat)<-WareByUnitT2Sorted$unit
rSums<- matrix (rowSums(Mat),nrow(Mat),ncol(Mat), byrow=F)
MatProp<-Mat/rSums
# do the plot
#(package for seriation)
library(plotrix)
battleship.plot(MatProp,
mar=c(2,5,5,1),
#main = 'Seriation',
#xlab='ManuTech',
ylab= 'Context',
col='grey')
# now let's try some Correspondence Analysis
Matx<-as.matrix(WareByUnitT2[,2:ncol(WareByUnitT2)])
rownames(Matx)<-WareByUnitT2$unit
require(ca)
ca3<-ca(Matx)
#summary(ca3)
#Scree plot
plot(1:(length(ca3$sv)), ca3$sv^2 / sum(ca3$sv^2), cex=1.25)
#ca3$sv
#default plot
plot(ca3, cex.lab=1.25, cex.axis=1.25)
#create dataframe of unit/context dimension 1 and 2 scores for ggplot
rowscores <- data.frame(ca3$rowcoord[,1], ca3$rowcoord[,2])
colnames(rowscores) <- c("Dim1", "Dim2")
#create dataframe of ware type dimension 1 and 2 scores for ggplot
colscores <- data.frame(ca3$colcoord[,1], ca3$colcoord[,2])
colnames(colscores) <- c("Dim1", "Dim2")
# plot the row scores on dim1 and dim2
# plot(ca3$rowcoord[,1], ca3$rowcoord[,2], pch=21, bg="cornflower blue", cex=1.25,
# xlab="Dimension 1", ylab="Dimension 2", cex.lab=1.25, cex.axis=1.25)
# text(ca3$rowcoord[,1],ca3$rowcoord[,2],rownames(ca3$rowcoord),
# pos=4, cex=1.0, col="black", cex.lab=1.5)
#ggplot version of row scores dim 1 and dim 2
require(ggplot2)
library(ggrepel)
p1 <- ggplot(rowscores, aes(x=rowscores$Dim1,y=rowscores$Dim2))+
geom_point(shape=21, size=5, colour="black", fill="cornflower blue")+
#geom_text(aes(label=CA_MCD_Phase1$unit),vjust=-.6, cex=5)+
geom_text_repel(aes(label=rownames(rowscores)), cex=4) +
theme_classic()+
labs(title="South Wing", x="Dimension 1", y="Dimension 2")+
theme(plot.title=element_text(size=rel(2.25), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)))
p1
#save the plot for website chronology page/presentations
ggsave("SouthWing_Figure1Dim1Dim2_2018.png", p1, width=10, height=7.5, dpi=300)
# plot the col scores on dim1 and dim2, which types are important in which regions of the plot
#plot(ca3$colcoord[,1],ca3$colcoord[,2],pch=21, bg="cornflower blue",cex=1.25,
# xlab="Dimension 1", ylab="Dimension 2", asp=1, cex.lab=1.25, cex.axis=1.25)
#text(ca3$colcoord[,1],ca3$colcoord[,2],rownames(ca3$colcoord),
# pos=4 ,cex=1.25, col="black")
#ggplot version of col scores dim 1 and dim 2
p2 <- ggplot(colscores, aes(x=colscores$Dim1,y=colscores$Dim2))+
geom_point(shape=21, size=5, colour="black", fill="cornflower blue")+
#geom_text(aes(label=CA_MCD_Phase1$unit),vjust=-.6, cex=5)+
geom_text_repel(aes(label=rownames(colscores)), cex=5) +
theme_classic()+
labs(title="South Wing", x="Dimension 1", y="Dimension 2")+
theme(plot.title=element_text(size=rel(2.25), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)))
p2
#save the plot for website chronology page/presentations
ggsave("SouthWing_Figure2WareTypes_2018.png", p2, width=10, height=7.5, dpi=300)
# finally let's see what the relationship is between MCDs and CA scores
# CA Dim 1 vs. MCDs
#plot(ca3$rowcoord[,1], MCDByUnit$MCDs$blueMCD, pch=21, bg="black",cex=1.25,
# xlab="Dimension 1", ylab="BLUE MCD",cex.lab=1.5, cex.axis=1.5)
#text(ca3$rowcoord[,1],MCDByUnit$MCDs$blueMCD,rownames(ca3$rowcoord),
# pos=4, cex=1.25, col="black")
#ggplot version of CA Dim 1 vs. MCDs
p3 <- ggplot(rowscores, aes(x=rowscores$Dim1,y=MCDByUnit$MCDs$blueMCD))+
geom_point(shape=21, size=5, colour="black", fill="cornflower blue")+
#geom_text(aes(label=CA_MCD_Phase1$unit),vjust=-.6, cex=5)+
#geom_text_repel(aes(label=rownames(rowscores)), cex=6) +
theme_classic()+
labs(title="South Wing", x="Dimension 1", y="BLUE MCD")+
theme(plot.title=element_text(size=rel(2.25), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)))
p3 + geom_vline(xintercept=c(-1.6))
cor.test(ca3$rowcoord[,1],MCDByUnit$MCDs$blueMCD, method="kendall")
#save the plot for website chronology page/presentations
ggsave("SouthWing_Dim1BLUEMCD_2018.png", p3, width=10, height=7.5, dpi=300)
# CA Dim 2 vs. MCD
#plot(ca3$rowcoord[,2], MCDByUnit$MCDs$blueMCD, pch=21, bg="black", cex=1.25,
# xlab="Dimension 2", ylab="BLUE MCD", cex.lab=1.5, cex.axis=1.5)
#text(ca3$rowcoord[,2],MCDByUnit$MCDs$blueMCD,rownames(ca3$rowcoord),
# pos=4, cex=1.25, col="black")
p4 <- ggplot(rowscores, aes(x=rowscores$Dim2,y=MCDByUnit$MCDs$blueMCD))+
geom_point(shape=21, size=5, colour="black", fill="cornflower blue")+
#geom_text(aes(label=CA_MCD_Phase1$unit),vjust=-.6, cex=5)+
# geom_text_repel(aes(label=rownames(rowscores)), cex=6) +
theme_classic()+
labs(title="South Wing", x="Dimension 2", y="BLUE MCD")+
theme(plot.title=element_text(size=rel(2.25), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)))
p4
cor.test(ca3$rowcoord[,2],MCDByUnit$MCDs$blueMCD, method="kendall")
#ggsave("SouthWing_Dim2BLUEMCD.png", p4, width=10, height=7.5, dpi=300)
#create table of contexts, counts, and mcds
unit <- MCDByUnit$MCDs$unit
dim1Scores <- ca3$rowcoord[,1]
dim2Scores <- ca3$rowcoord[,2]
MCD<- MCDByUnit$MCDs$MCD
blueMCD <-MCDByUnit$MCDs$blueMCD
count<- MCDByUnit$MCDs$Count
CA_MCD<-data.frame(unit, dim1Scores,dim2Scores,MCD,blueMCD, count)
#Create weighted histogram for phasing
library(plotrix)
#Compares counts of sherds in all units with BLUE MCDs that fall within bin
#You may need to change sequence dates
weighted.hist(CA_MCD$blueMCD, CA_MCD$count, breaks=seq(1760,1870,10), col='lightblue')
#Dim 1 Scores Weighted Histogram, you may need to change scale
#Currently creates different plot than hist!!!!!!
p5 <- ggplot(CA_MCD, aes(x=CA_MCD$dim1Scores, weight=CA_MCD$count/sum(CA_MCD$count)))+
geom_histogram(aes(y=..density..), colour="gray", fill="tan", binwidth=0.1, boundary=0.5)+
#xlim(-4,3)+
#stat_function(fun = dnorm, colour = "blue")+
# scale_x_continuous(breaks=seq(-4, 2, 0.5), limits=c(-3.5,3))+
theme_classic()+
labs(title="South Wing", x="Dimension 1", y="Density")+
theme(plot.title=element_text(size=rel(2.25), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)))+
geom_density(fill=NA)
p5
p5a <- p5 + geom_vline(xintercept=c(-1.6))
p5a
ggsave("SouthWing_Histogram_2018.png", p5a, width=10, height=7.5, dpi=300)
#Add lines for phase breaks
#p5 + geom_vline(xintercept = 75, size = 1, colour = "gray", linetype = "dashed")
#save the plot for website chronology page/presentations
# ggsave("FirstHerm_Histogram.png", p5, width=10, height=7.5, dpi=300)
#
# #Dim 1 Scores Weighted Histogram, you may need to change scale
# #Lines step adds density curve to weighted histogram
# hist(rep(ca3$rowcoord[,1], MCDByUnit$MCDs$Count),col='tan',border='grey', breaks=seq(-6,2,.1),
# main='West Cabin',
# xlab="Dimension 1 Scores",
# freq=F, cex.lab=1.5, cex.axis=1.5, cex.main=1.5)
# lines(density(ca3$rowcoord[,1], weights=MCDByUnit$MCDs$Count/sum(MCDByUnit$MCDs$Count)),
# lwd=2)
# #Add line breaks to the plot for phases
# abline(v=-2, lty=1, col="grey")
# abline(v=0, lty=1, col="grey")
# create a vector for the phases with as many entries as assemblages
Phase <- rep(NA, length(ca3$rowcoord[,1]))
# do the phase assigments
Phase[(ca3$rowcoord[,1] <= -1.6)] <- 'P01'
#Phase[(ca3$rowcoord[,1] > -2.2) & (ca3$rowcoord[,1]) <= -1.2] <- 'P02'
Phase[(ca3$rowcoord[,1] > -1.6) ] <- 'P02'
Phase
#create df of contexts, counts, mcds and phases
unit <- MCDByUnit$MCDs$unit
dim1Scores <- ca3$rowcoord[,1]
dim2Scores <- ca3$rowcoord[,2]
MCD<- MCDByUnit$MCDs$MCD
blueMCD <-MCDByUnit$MCDs$blueMCD
count<- MCDByUnit$MCDs$Count
CA_MCD_Phase<-data.frame(unit, dim1Scores,dim2Scores,MCD,blueMCD, Phase, count)
#Order by dim1 score
CA_MCD_Phase1 <- CA_MCD_Phase[order(CA_MCD_Phase$dim1Scores),]
CA_MCD_Phase1
#weighted mean
#tapply function = applies whatever function you give it, x is object on which you calculate the function
#W is numerical weighted vector
tapply(CA_MCD_Phase1$blueMCD, CA_MCD_Phase1$Phase, weighted.mean)
#Export data
#write.csv(CA_MCD_Phase, file='CA_MCD_Phase_SouthWing.csv')
#BlueMCDByDim1 plot
#black border with unit labels can comment out geom_point and geom_text lines to add, situate, and remove labels
require(ggplot2)
library(ggrepel)
p6 <- ggplot(CA_MCD_Phase1,aes(x=CA_MCD_Phase1$dim1Scores,y=CA_MCD_Phase1$blueMCD))+
# scale_y_continuous(limits=c(1760, 1920))+
geom_point(aes(colour=CA_MCD_Phase1$Phase),size=5)+
geom_text_repel(aes(label=CA_MCD_Phase1$unit), cex=4) +
theme_classic()+
labs(title="South Wing", x="Dimension 1", y="BLUE MCD")+
theme(plot.title=element_text(size=rel(2), hjust=0.5),axis.title=element_text(size=rel(1.75)),
axis.text=element_text(size=rel(1.5)), legend.text=element_text(size=rel(1.75)),
legend.title=element_text(size=rel(1.5)), legend.position="bottom")+
scale_colour_manual(name="DAACS Phase",
labels=c("P01", "P02"),
values=c("skyblue", "blue", "darkblue"))
p6
#save the plot for website chronology page/presentations
ggsave("SouthWing_Dim1MCDcolor_2018.png", p6, width=10, height=7.5, dpi=300)
|
6e6e179dc78f86eeb7fc04055ac7cee30b47793f
|
26080c27d35e63e7b2ac501f65d3f606806b34a6
|
/R/kma_lifeIndex_type_check.R
|
dabcf6d1bc2d7474c98d2916760cd4c0388054ae
|
[
"MIT"
] |
permissive
|
lawine90/datagokR
|
1fb953a1a2ef91ee0300a3787c0903e3acf9695f
|
b4026238ab7b307c9d079f117c9412f3bbd12985
|
refs/heads/master
| 2021-07-03T23:33:50.207804
| 2020-09-23T07:46:11
| 2020-09-23T07:46:11
| 180,745,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
kma_lifeIndex_type_check.R
|
#' Data for kmaLifeIndex function.
#'
#' A matrix containing TRUE or FALSE which means the availability of indeces for each month.
#'
#' @docType data
#'
#' @usage data(kma_lifeIndex_type_check)
#'
#' @format A matrix of 8 by 12
#'
#' @keywords datasets
#'
#' @name kma_lifeIndex_type_check
#'
#' @examples
#' data(kma_lifeIndex_type_check)
"kma_lifeIndex_type_check"
|
b6e0a79280573a8f1082d787ed1653d32796d784
|
17521e848af63657da5f5c23cd1066513e8e5e2a
|
/R/plotManhattan.R
|
00e0d74f4fbf6e8839a2968ae355edf8ca4d6f9d
|
[
"MIT"
] |
permissive
|
oncogenetics/oncofunco
|
10730eec08498035162d235c8260367fab40e1dc
|
b17a663f0e207fd68f1a4365a14a40996b9f64a7
|
refs/heads/master
| 2022-07-25T13:07:37.996277
| 2020-10-27T12:54:15
| 2020-10-27T12:54:15
| 57,195,181
| 3
| 1
|
MIT
| 2022-07-05T07:29:01
| 2016-04-27T08:00:35
|
R
|
UTF-8
|
R
| false
| false
| 10,267
|
r
|
plotManhattan.R
|
#' LocusExplorer - Manhattan plot
#'
#' Manhattan plot for LocusExplorer.
#' @param assoc SNP association results, data.frame object with c("SNP","BP","P") columns. Required.
#' @param LD plink LD output format, data.frame object with c("BP_A","SNP_A","BP_B","SNP_B","R2") columns. Optional/recommended.
#' @param geneticMap Recombination map, data.frame object with c("BP", "RECOMB") columns. Subset of one of genetic_map_*_combined_b37.txt, at http://mathgen.stats.ox.ac.uk/impute/1000GP_Phase3/ . Optional.
#' @param suggestiveLine Suggestive line, default is 5.
#' @param genomewideLine Genomewide link, dafault is 8.
#' @param xStart,xEnd Region range, zoom, minimum BP and maximum BP, advised to keep this less than 5Mb.
#' @param hits SNP names to label in the plot. Must be present in assoc data.frame.
#' @param hitsName alternative SNP names to label in the plot. Default same as `hits`
#' @param hitsLabel Default is TRUE, set to FALSE not to show SNP names on the plot.
#' @param hitsColour Default NULL, uses ggplot colours.
#' @param pad Default is TRUE, to align plots pad strings with spaces, using oncofunco::strPadLeft().
#' @param postprob Default is FALSE, used for LocusExplorer to plot JAM PostProbs instead of Pvalues.
#' @param yRangeBy y-axis ticks, setting to 5 means ticks will be placed at `c(0, 5, 10, ...)`.
#' @param title character string for plot title. Default is NULL, i.e.: no plot title.
#' @param opts Default is c("Recombination","LD","LDSmooth","SuggestiveLine","GenomewideLine","Hits"), parts of plot to display.
#' @export plotManhattan
#' @author Tokhir Dadaev
#' @return a \code{ggplot} object
#' @keywords manhattan plot SNP genetics
plotManhattan <- function(
assoc = NULL,
LD = NULL,
geneticMap = NULL,
suggestiveLine = 5,
genomewideLine = 8,
xStart = NULL,
xEnd = NULL,
hits = NULL,
hitsName = hits,
hitsLabel = TRUE,
hitsColour = NULL,
pad = TRUE,
postprob = FALSE,
yRangeBy = NULL,
title = NULL,
opts = c("Recombination","LD","LDSmooth","SuggestiveLine",
"GenomewideLine","Hits")){
# Check input - assoc -------------------------------------------------------
#check assoc
if(is.null(assoc)) stop("assoc is missing, must provide assoc with columns: c('SNP','BP','P')")
if(!all(c("SNP","BP","P") %in% colnames(assoc))) stop("assoc must have columns: c('SNP','BP','P')")
# if SNP type is missing set all as typed
if(!"TYPED" %in% colnames(assoc)){assoc$TYPED <- 2}
assoc <- setDT(assoc, key = "BP")
#set plog max
assoc[, PLog := -log10(P) ]
# XY range ------------------------------------------------------------------
if(is.null(xStart)) xStart <- min(assoc$BP, na.rm = TRUE)
if(is.null(xEnd)) xEnd <- max(assoc$BP, na.rm = TRUE)
yMax <- ceiling(max(c(10, assoc$PLog)))
if(is.null(yRangeBy)) yRangeBy <- ifelse(yMax >= 90, 10, 5)
yRange <- c(0, max(c(10, ceiling((yMax + 1)/yRangeBy) * yRangeBy)))
xRange <- c(xStart, xEnd)
# If Y is post prob then update, range 0-1
if(postprob){
assoc$PLog <- assoc$P
yMax <- 1
yRangeBy <- 0.25
yRange <- c(0, 1)
}
#Check input - recomb -------------------------------------------------------
if("Recombination" %in% opts){
if(is.null(geneticMap)) stop("geneticMap data is missing for recombination, must have columns: c('BP', 'RECOMB')")
if(!all(c("BP", "RECOMB") %in% colnames(geneticMap))) stop("geneticMap data must have columns: c('BP', 'RECOMB')")
geneticMap[, RECOMB_ADJ := RECOMB * yMax / 100 ]}
# Plot all SNPs - background ------------------------------------------------
gg_out <-
ggplot(assoc, aes(x = BP, y = PLog)) +
# all snps grey hollow shapes
geom_point(size = 4, colour = "#B8B8B8", shape = assoc$TYPED, na.rm = TRUE) +
geom_hline(yintercept = seq(0, yMax, yRangeBy),
linetype = "dotted", col = "grey60")
# Plot - Effect -----------------------------------------------------------
if("Effect" %in% opts & "EFFECT" %in% colnames(assoc)){
y_loess = predict(loess(EFFECT ~ BP, data = assoc, span = 0.1))
y_loess_adj = scales::rescale(y_loess, to = c(yRange[2]/4 * 3, yRange[2]))
vline = c(0, diff(sign(y_loess))) != 0
datEffect <- data.table(
BP = assoc$BP,
log_OR = assoc$EFFECT,
y_loess, y_loess_adj, vline)
datEffect_shade <- datEffect[
vline,
.(xStart = BP,
xEnd = data.table::shift(BP, type = "lead"),
yStart = 0, yEnd = yRange[ 2 ],
fill = rep_len(c("#996777", "#c5a8b1"),
length.out = sum(vline)))][ !is.na(xEnd), ]
gg_out <- gg_out +
geom_line(data = datEffect, aes(BP, y_loess_adj), col = "#6E273D") +
geom_rect(data = datEffect_shade,
aes(xmin = xStart, xmax = xEnd, ymin = yStart, ymax = yEnd,
fill = fill, alpha = 0.5),
inherit.aes = FALSE, alpha = 0.2) +
scale_fill_identity()
}
# Plot - Recombination ------------------------------------------------------
if("Recombination" %in% opts & nrow(geneticMap) > 2 ){
gg_out <- gg_out +
geom_area(data = geneticMap,
aes(BP, RECOMB_ADJ),
fill = "#11d0ff", colour = "#00B4E0", alpha = 0.3)}
# Check input - LD ----------------------------------------------------------
if("LD" %in% opts | "LDSmooth" %in% opts){
if(is.null(LD)) stop("LD is missing, must have columns: c('BP_A','SNP_A','BP_B','SNP_B','R2')")
if(!all(c("BP_A","SNP_A","BP_B","SNP_B","R2") %in% colnames(LD)))
stop("LD must have columns: c('BP_A','SNP_A','BP_B','SNP_B','R2')")
LD <- setDT(LD)
if(is.null(hits)){
hits <- unique(LD$SNP_A)
hits <- hits[1:min(5, length(hits))]
warning(
paste("hits missing, selected first <5 SNPs as hits from LD$SNP_A, n = :",
length(unique(LD$SNP_A))))
}
if(is.null(hitsColour)){
colourLD <- oncofunco::colourHue(length(hits))
} else {
colourLD <- hitsColour#[ seq_along(hits) ]
}
colourLDPalette <- unlist(lapply(colourLD, function(i){
colorRampPalette(c("grey95", i))(100)}))
#merge LD with assoc, to get R2 shades per point
plotDat <- merge(
LD[ SNP_A %in% hits, .(BP_A, SNP_A, BP_B, SNP_B, R2)],
assoc[, .(BP, TYPED, PLog)],
by.x = "BP_B", by.y = "BP", all = TRUE)[order(BP_A), ]
plotDat[, LDColIndex := ifelse(round(R2, 2) == 0, 1, round(R2, 2) * 100)]
plotDat[, hitColIndex := as.numeric(factor(SNP_A, levels = hits))]
plotDat[, hitCol := colourLD[hitColIndex] ]
plotDat[, LDCol := colourLDPalette[(hitColIndex - 1) * 100 + LDColIndex] ]
plotDat[, R2Adj := yMax * R2 * 0.8]
# Plot - LD Fill & LD Smooth --------------------------------------------
#LD fill
if("LD" %in% opts){
gg_out <- gg_out +
geom_point(data = plotDat, aes(BP_B, PLog),
size = 4,
shape = plotDat$TYPED + 15,
col = plotDat$LDCol,
alpha = 0.8, na.rm = TRUE, show.legend = FALSE)
}
#LDSmooth
if("LDSmooth" %in% opts){
gg_out <- gg_out +
geom_smooth(data = plotDat, aes(x = BP_B, y = R2Adj, col = hitCol),
method = ifelse(nrow(plotDat) <=10, "lm", "loess"),
se = FALSE, na.rm = TRUE,
formula = "y ~ x",
show.legend = FALSE)
}
} # END if("LD" %in% opts | "LDSmooth" %in% opts)
# Suggestiveline ----------------------------------------------------------
if("SuggestiveLine" %in% opts &
!is.null(suggestiveLine) &
!is.na(suggestiveLine) &
is.numeric(suggestiveLine) &
suggestiveLine > 0){
gg_out <- gg_out +
geom_hline(aes(yintercept = y), data = data.frame(y = suggestiveLine),
size = 0.5,
colour = "#1a9641")}
# Genomewideline ----------------------------------------------------------
if("GenomewideLine" %in% opts &
!is.null(genomewideLine) &
!is.na(genomewideLine) &
is.numeric(genomewideLine) &
genomewideLine > 0){
gg_out <- gg_out +
geom_hline(aes(yintercept = y), data = data.frame(y = genomewideLine),
size = 0.5,
colour = "#ca0020")}
# Mark Hits: shape and vline ----------------------------------------------
if("Hits" %in% opts & !is.null(hits) & any(hits %in% assoc$SNP)){
gg_out <- gg_out +
#mark hit SNPs - outline shapes
geom_point(data = assoc[ SNP %in% hits, ],
aes(x = BP, y = PLog, shape = TYPED),
size = 4, colour = "black", na.rm = TRUE) +
scale_shape_identity() +
#mark hit SNPs - vertical lines
geom_segment(data = assoc[ SNP %in% hits, ],
aes(x = BP, y = 0, xend = BP, yend = PLog),
colour = "black",
linetype = "dashed")}
# Mark Hits: Labels -------------------------------------------------------
# SNP names on the plot for hits,
# if alternative names given then use those, hitsName
if("Hits" %in% opts & length(hits) > 0)
if(!is.null(hitsLabel))
if(hitsLabel){
plotDat <- assoc[ SNP %in% hits, ]
plotDat[, label := setNames(hitsName, hits)[ hits ] ]
gg_out <-
gg_out +
geom_text_repel(
aes(BP, PLog, label = label),
data = plotDat)
}
# Add title ---------------------------------------------------------------
if(!is.null(title)) gg_out <- gg_out + ggtitle(title)
# General options ---------------------------------------------------------
gg_out <- gg_out +
coord_cartesian(
xlim = xRange,
ylim = yRange) +
scale_y_continuous(
breaks = seq(0, yRange[2], yRangeBy),
#labels = oncofunco::strPadLeft(seq(0, ROIPLogMax, 5)),
labels = if(pad){strPadLeft(seq(0, yRange[2], yRangeBy))} else {
seq(0, yRange[2], yRangeBy)},
name = if(postprob){
expression(PostProb[])
} else {expression(-log[10](italic(p)))}
) +
scale_colour_identity()
# Output ------------------------------------------------------------------
return(gg_out)
} #END plotManhattan
|
f5aed75e43454041f0aeb158c15f5775f23c4dd7
|
7c5caeca7735d7909c29ee3ed6074ad008320cf0
|
/misc/aqp2/man_deprecated/SPC-unique-methods.Rd
|
0bfc22bcf1326fdb31be6952ab18b0ae0e2f3d4a
|
[] |
no_license
|
ncss-tech/aqp
|
8063e800ed55458cfa7e74bc7e2ef60ac3b1e6f5
|
c80591ee6fe6f4f08b9ea1a5cd011fc6d02b5c4a
|
refs/heads/master
| 2023-09-02T07:45:34.769566
| 2023-08-31T00:14:22
| 2023-08-31T00:27:14
| 54,595,349
| 47
| 12
| null | 2023-08-17T15:33:59
| 2016-03-23T21:48:50
|
R
|
UTF-8
|
R
| false
| false
| 1,759
|
rd
|
SPC-unique-methods.Rd
|
\name{unique-methods}
\docType{methods}
\alias{unique}
\alias{uniqueSPC}
\alias{unique,SoilProfileCollection-method}
\title{Get Indices to Unique Soil Profiles Within a Collection}
\description{This function returns a set of indices to a subset of profiles within a \code{SoilProfileCollection} object that are uniquely defined by a named set of horizon and site level attributes.}
\section{Methods}{
\describe{
\item{\code{signature(x = "SoilProfileCollection")}}{
}
}}
\usage{
uniqueSPC(x, vars)
}
\arguments{
\item{x}{a SoilProfileCollection}
\item{vars}{a character vector naming those horizon and site level attributes that will be used to test for duplication}
}
\details{Duplicates are identified via MD5 hash of select horizon and site level attributes.}
\value{A vector of integer indices that can be used to subset unique profiles from the original \code{SoilProfileCollection} object.}
\author{D.E. Beaudette}
\examples{
# use the digest library to detect duplicate data
data(sp1)
sp1$soil_color <- with(sp1, munsell2rgb(hue, value, chroma))
# upgrade to SoilProfileCollection
depths(sp1) <- id ~ top + bottom
site(sp1) <- ~ group
# make a copies
s.1 <- sp1
s.2 <- sp1
# update IDs in second copy
profile_id(s.2) <- sprintf('\%s-copy', profile_id(s.2))
# union SPCs
s <- union(list(s.1, s.2))
# check
plot(s)
# digests are computed from horizon-level data only
# horizon boundaries and 'prop'
# result is an index of unqique profiles
u <- unique(s, vars=c('top', 'bottom', 'prop'))
# compare with and without dupes:
# note subsetting of SoilProfileCollection
cbind(dupes=length(s), no.dupes=length(s[u, ]))
# get unique profile by index
s.unique <- s[u, ]
# unique data
plot(s.unique)
}
\keyword{methods}
\keyword{manip}
|
7f3ef41ddf6729bd46a5241f246c8ee2d9d75dce
|
b1723af1c77ef4a447ad970a43948673640f3281
|
/R 데이터 분석/3주차꺼.R
|
45c4d788b9aa05324ebda18c1fd0e7af6034bf70
|
[] |
no_license
|
tjddms587/Portfolio
|
f25897177ff1736a9ab89b5c31aaade66e6d9456
|
365c53424a0c1ab21a9ce752c24d7efb6c544dc6
|
refs/heads/master
| 2020-12-15T21:34:29.041098
| 2020-01-21T05:59:38
| 2020-01-21T05:59:38
| 235,260,697
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 3,454
|
r
|
3주차꺼.R
|
# 3주차 정리
x <- data.frame(소득=c("1,000만 원","2,000만 원","3,000만 원","4,000만 원"),
성별=c("남자","남자","여자","여자"),학점=c(3.8,4.2,2.6,4.5))
x
x <- cbind(x,국적=c("대한민국","대한민국","대한민국","대한민국"))
x #c가 열이네
# 변수 생성 및 연산
var1=c(1,2,3,4,5)
var2=c(1:6)
var3=seq(1:5)
var4=seq(1,5,by=1)
var3
var4
var1+2 # 각 열에 +2씩 더해짐.
var1
var2
var1+var2 # 배열의 열 개수가 다른경우 산수 안됨.
#문자형 변수 생성 (문자는 "" 붙여야함.)
str1="a"
str1
str2 <- "eeeext"
str2
str3 = c("a","b","c")
str3
# 함수 사용 예제
# 시험점수변수 만들고 출력
x <- data.frame(학생명단= c("학생1","학생2","학생3","학생4","학생5"),시험점수=
c(80,60,70,50,90))
y <- c(80,60,70,50,90)
x
y
# 전체 평균 구하기 (학생들의 전체 평균 점수)
mean(y)
# 전체 평균 변수 만들고 출력
score <- mean(y)
score
data <- data.frame(성별=c("남자","여자","남자"),연령=c(26,42,35),학점=c(3.8,4.2,2.6),연봉=c("2,700만원","4,000만원","3,500만원"))
data
#데이터 프레임 만들기
a = c(10,20,30,40)
b = c(59,59,596,49)
exam <- data.frame(a,b)
exam
mean(data$연령) # 데이터 테이블에서 연령만 뽑아내서 평균.
# 예제 : 데이터 프레임 생성 및 함수 사용
j <- data.frame(제품=c("사과","딸기","수박"),가격=c(1800,1500,3000),판매량=c(24,38,13))
j #데이터 프레임으로 출력한거임.
#과일 가격과 판매량의 평균
mean(j$가격)
mean(j$판매량)
#csv 파일 불러오기 / 만들기
W <- read.csv("C:/Users/TEMP.PC16.000.001.002/Desktop/4. score.csv")
head(W) #6개 까지
W #전부
View(W)
# 파일 만들기
one = c("동길","재건","성은")
two = c(30,40,50)
three = c(1,2,3)
data.frame(one,two,three)
four <- data.frame(one,two,three)
four
write.csv(four, file="ohama.csv") #이게 만드는 것
age=c(10,20,30)
height=c(50,50,50)
weight=c(1,2,3)
p=c("age","height","weight")
p
avg=c(mean(age),mean(height),mean(weight))
avg
wow = data.frame(p,avg)
wow
write.csv(wow, file="ssss.csv")
#파일을 엑셀로 내보내기 ??? 어떻게??
#데이터 확인 및 파악
# head, tail, View, dim, str, summary
soft <- read.csv("C:/Users/TEMP.PC16.000.001.002/Desktop/3. students.csv")
soft
View(soft)
dim(soft) # 행, 열 개수 나타냄
str(soft) # 속성들 나타냄.
summary(soft)
# 조건문 (if)
# if는 if, if else, if else, else
x <- 5
if (x>0) {
print('true')
print("hello")
} else {
print("false")
print("world")
}
# 짝수 홀수 판별 프로그램
if (x %% 2 == 0) {
print("짝수")} else {
print("홀수")
}
# 점수가 100이하 90이상 A, 90 미만 80이상 B, 나머지 점수 c
X <- 85
if (x <= 100 & 90 <= x){
print("A")
}else if (x >= 80){
print("B")
}else { print("C")}
# 모범답안
myGrade <- function(){
print("0~100사이의 점수를 입력하세요.")
score <- as.numeric(readline())
if (score >= 90 & score <= 100){
grade <- "A"
} else if (score >= 80){
grade <- "B"
} else {
grade <- "C"
}
cat("당신의 학점은",grade,"입니다.")
}
myGrade()
# 반 복 문 (for, while)
# 구구단 2단 출력
for (i in 1:9) {
print("2*i")
}
|
2c61271d4a3ba832fcf39da19ab0e9494b5497e2
|
85999c77c90d12ce117f3b46c861d9d168ad1300
|
/timeseries/R/eggs.R
|
cae60a0ec081d1eb82bb21869ab9c7e157f4d62f
|
[] |
no_license
|
somnath1077/ML
|
42d1d03048b8143daa75ce3cddaf8527d7ed587d
|
48c7619ed90f8efa36d5b46403d097e77b7f3dc3
|
refs/heads/master
| 2021-05-23T05:37:12.900681
| 2021-04-29T06:33:14
| 2021-04-29T06:33:14
| 95,074,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
eggs.R
|
#' Price of eggs
#'
#' Price of dozen eggs in US, 1900--1993, in constant dollars.
#'
#'
#' @format Time series data
#' @source Makridakis, Wheelwright and Hyndman (1998) \emph{Forecasting:
#' methods and applications}, John Wiley & Sons: New York. Chapter 9.
#' @keywords datasets
#' @examples
#' plot(eggs)
#' @export
eggs <- stats::ts(c(276.79, 315.42, 314.87, 321.25, 314.54, 317.92,
303.39, 288.62, 292.44, 320.92, 323.38, 270.77, 301.77, 282.99,
295.06, 276.47, 292.80, 358.78, 345.82, 345.42, 314.10, 228.74,
215.76, 224.67, 225.93, 250.87, 236.24, 209.12, 237.31, 251.67,
205.36, 167.21, 150.42, 154.09, 183.67, 246.66, 227.58, 214.60,
208.41, 181.21, 185.67, 230.86, 266.34, 310.29, 267.18, 303.03,
278.46, 293.36, 283.62, 274.26, 218.12, 265.62, 226.70, 258.00,
196.98, 213.38, 209.17, 184.50, 192.61, 155.83, 176.32, 172.13,
161.63, 163.00, 157.63, 154.50, 174.28, 135.17, 141.36, 157.83,
145.65, 112.06, 106.85, 170.91, 156.26, 140.78, 148.35, 132.61,
115.72, 116.07, 98.76, 100.33, 89.12, 88.67, 100.58, 76.84, 81.10,
69.60, 64.55, 80.36, 79.79, 74.79, 64.86, 62.27),s=1900,f=1)
|
2402c7cf4332c96ef7240d7fb2eab94a08e2bceb
|
849fa6771da77abee1629871e56ae010d7470501
|
/inst/doc/An_Introduction_to_excerptr.R
|
b3f08058ff4536c71ebb24e68bf8a4217803c19a
|
[] |
no_license
|
cran/excerptr
|
87d50dbf5dab05c17c71d9478fa7aa4e97def685
|
3598c254ba6f49535cbcef82624aeba6bcd23012
|
refs/heads/master
| 2021-08-28T19:46:52.687910
| 2021-08-04T23:20:02
| 2021-08-04T23:20:02
| 89,372,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
An_Introduction_to_excerptr.R
|
## ----setup, include=FALSE-----------------------------------------------------
# adapted from reticulate vignettes/python_packages.Rmd
if (!reticulate::py_available(initialize = TRUE)) {
knitr::opts_chunk$set(eval = FALSE)
} else {
inst <- tryCatch(reticulate::py_install("excerpts"), error = identity)
if (inherits(inst, "error")) {
knitr::opts_chunk$set(eval = FALSE)
} else {
knitr::opts_chunk$set(eval = TRUE)
}
}
## ---- comment = ""------------------------------------------------------------
# path <- system.file("tests", "files", "some_file.R", package = "excerptr")
# cat(readLines(path), sep = "\n")
## -----------------------------------------------------------------------------
# excerptr::excerptr(file_name = path, run_pandoc = FALSE, output_path = tempdir())
## ---- comment = ""------------------------------------------------------------
# cat(readLines(file.path(tempdir(), sub("\\.R$", ".md", basename(path)))),
# sep = "\n")
## -----------------------------------------------------------------------------
# is_pandoc_installed <- nchar(Sys.which("pandoc")) > 0 &&
# nchar(Sys.which("pandoc-citeproc")) > 0
# is_pandoc_version_sufficient <- FALSE
# if (is_pandoc_installed) {
# reference <- "1.12.3"
# version <- strsplit(system2(Sys.which("pandoc"), "--version", stdout = TRUE),
# split = " ")[[1]][2]
# if (utils::compareVersion(version, reference) >= 0)
# is_pandoc_version_sufficient <- TRUE
# }
# if (is_pandoc_version_sufficient)
# excerptr::excerptr(file_name = path, pandoc_formats = "html",
# output_path = tempdir())
## ---- comment = ""------------------------------------------------------------
# if (is_pandoc_version_sufficient)
# cat(readLines(file.path(tempdir(), sub("\\.R$", ".html", basename(path)))),
# sep = "\n")
## ---- echo = FALSE, results = "hide"------------------------------------------
# if (is_pandoc_version_sufficient)
# excerptr::excerptr(file_name = path, pandoc_formats = "html",
# output_path = file.path(rprojroot::find_root(rprojroot::is_r_package),
# "inst", "tests", "files")
# )
## ---- eval = FALSE------------------------------------------------------------
# browseURL(file.path(tempdir(), sub("\\.R$", ".html", basename(path))))
#
|
dad055bb616cdd5180cca62c4a55b6add32fc2e0
|
2bf9a12e1d07eb63d89a74df4659f9bb3cab5be6
|
/Boosting.R
|
faba0255a5817b80277e9b1ae4ef60550497f76d
|
[] |
no_license
|
rdolia/All-my-codes
|
bac39b6f46ab5a4407cc3b9fc40bbc28485de0c8
|
30f5ded6f6d6a981798faf226879a94efaad578b
|
refs/heads/master
| 2020-04-09T18:01:17.598378
| 2019-03-12T09:50:07
| 2019-03-12T09:50:07
| 160,499,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,072
|
r
|
Boosting.R
|
#Boosting Algorithm
library(xgboost)
library(magrittr)
library(Matrix)
library(dplyr)
#Data
Appdata <- read.csv(file.choose(),header=T)
str(Appdata)
Appdata$rank <- as.factor(Appdata$rank)
#Partitioning
set.seed(1234)
index <- sample(2,nrow(Appdata),replace = T,prob = c(0.8,0.2))
train <- Appdata[index == 1,]
test <- Appdata[index == 2,]
library(Matrix)
#Creating Matrixes
trainm <- sparse.model.matrix(admit ~ .-1,data = train)
head(trainm)
train_label <- train[,"admit"]
train_matrix <- xgb.DMatrix(data = as.matrix(trainm),label = train_label)
testm <- sparse.model.matrix(admit~.-1,data = test)
test_label <- test[,"admit"]
test_matrix <- xgb.DMatrix(data = as.matrix(testm),label = test_label)
#Additionl parameters
nc <- length(unique(train_label))
xgb_params <- list("objective"="multi:softprob",
"eval_metric" = "mlogloss",
"num_class"= nc)
watchlist <- list(train = train_matrix,test = test_matrix)
#eXtreme Gradient Boosting Model
Bst_model <- xgb.train(params = xgb_params,
data = train_matrix,
nrounds = 1000,
watchlist = watchlist,
eta = 0.001,max.depth = 3,
gamma = 0,
subsample = 1,
colsample_bytree = 1,
missing = NA,
seed = 333)
#by defaukt eta =.3
#test and Training error plot
e<- data.frame(Bst_model$evaluation_log)
plot(e$iter,e$train_mlogloss,col = 'blue')
lines(e$iter,e$test_mlogloss,col = 'red')
min(e$test_mlogloss)
e[e$test_mlogloss == 0.62571,]
#Feature importance
imp <- xgb.importance(colnames(train_matrix),model = Bst_model)
xgb.plot.importance(imp)
#Prediction and accuracy
p <- predict(Bst_model,newdata = test_matrix)
pred <- matrix(p,nrow = nc,ncol = length(p)/nc)%>%t()%>%data.frame()%>%
mutate(label = test_label,max_prob = max.col(.,"last")-1)
table(Prediction = pred$max_prob,Actual = pred$label)
|
bcad26da8b106acda5c744544681eb015109d8fd
|
7eb4f4e8349622f2b318648d4e7aaf5cd02f1aed
|
/lab5/lab5.R
|
605100ecb6ed5d17618b06b879b4f3ddb7453960
|
[
"Unlicense"
] |
permissive
|
SergeyMirvoda/da2016
|
fd0d61c7aae089e1133ce40e83ce26bef6707eff
|
95cb5c78b3f5b09584f01bbd2146845ec6b468b9
|
refs/heads/master
| 2022-05-01T02:08:49.469864
| 2022-04-06T14:11:06
| 2022-04-06T14:11:06
| 68,286,082
| 0
| 21
| null | 2016-12-09T12:21:46
| 2016-09-15T10:47:27
|
HTML
|
UTF-8
|
R
| false
| false
| 115
|
r
|
lab5.R
|
gamma.estimate <- function(data) {
m <- mean(data)
v <- var(data)
s <- v/m
a <- m/s
return(c(a=a,s=s))
}
|
0876211ed49ea68f4af836facbcb20353b3b8fed
|
3bd38faaf5d3d55339da23a448ad93c56dedc432
|
/First Decision Trees 20200109.R
|
a97d6c67f538cf2195adfa7a3b1718b538c7fe66
|
[] |
no_license
|
boxcarrovers/M2A2-R-Scripts
|
5fbd112bcd339cd12ba644d6381abb66176050b3
|
6474449df8fb4af4b531ef787b5d226b0a187812
|
refs/heads/master
| 2021-01-14T05:19:41.661792
| 2020-02-24T00:39:24
| 2020-02-24T00:39:24
| 242,610,625
| 0
| 0
| null | 2020-02-24T00:39:25
| 2020-02-23T23:53:27
|
R
|
UTF-8
|
R
| false
| false
| 1,538
|
r
|
First Decision Trees 20200109.R
|
# Jim's Notes on How to Build A Decision Tree
# Machine Learning / Classification Problem
# January 9 2020
# This example is from datacamp and uses the titanic dataset
titanic <- datasets::Titanic
# note - this will not work as is as the titanic dataset here is different than the one used
# by datacamp.. datacamp has the full 714 records...
set.seed(1)
library(rpart,rattle,rpart.plot)
library(RColorBrewer)
# installed above packages; am having toruble with RColorBrewer...
# line of code below asks for prediction of survived using all variables (.) with
# method of class since this is a classification problem
tree <- rpart(Survived ~ ., train, method = 'class')
s
# Notes from an LM Model
# world_bank_train and cgdp_afg is available for you to work with
# Plot urb_pop as function of cgdp
# this is a simple scatterlot with x = cgdp, y = urb_pop)
plot(world_bank_train$cgdp,world_bank_train$urb_pop)
# Set up a linear model between the two variables: lm_wb
# note syntax lm (y ~ x, data_set)
lm_wb <- lm(urb_pop~cgdp,world_bank_train)
# Add a red regression line to your scatter plot
# i don't know what abline means or why one just picks the coefficients here -
# presumably abline is a graph of a line based on the coefficients fed to it.
abline(lm_wb$coefficients, col= 'red')
# Summarize lm_wb and select R-squared
# this is kind of an unusual syntax....
summary(lm_wb)$r.squared
# Predict the urban population of afghanistan based on cgdp_afg
predict(lm_wb, cgdp_afg)
|
4e9512da0cbcd926303349f8307a63e1ba118ab1
|
d69ad2b566885772a7862fbdc830c9ff3ac5d684
|
/descriptive_stats.R
|
86d741e1d8864a92403e5078b0eaccfe9e48adf6
|
[] |
no_license
|
mraess/diss_r
|
267db67d36921d5c18a63546a693df1df8b9e476
|
7a69cc704b17e59b8d1ed85a29661ef8e848fe9c
|
refs/heads/master
| 2021-09-09T16:14:06.204873
| 2018-03-17T20:30:12
| 2018-03-17T20:30:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,110
|
r
|
descriptive_stats.R
|
# Descriptive stats -------------------------------------------------------
diss_data %>% group_by(gender) %>% summarise(mean = mean(age), sd = sd(age))
sd(diss_data$age)
# independent 2-group t-test
t.test(diss_data$age~diss_data$gender, paired = FALSE) # where y is numeric and x is a binary factor
library(effsize)
cohen.d(diss_data$age ~ diss_data$gender)
dif <- abs(diss_data[diss_data$gender == "female",]$age - diss_data[diss_data$gender == "male",]$age)
hist(dif) #difference is not normally distributed...
hist(diss_data$age)
# independent 2-group Mann-Whitney U Test
wilcox.test(diss_data$age~diss_data$gender)
# where y is numeric and A is A binary factor
# Relationship status
diss_data %>% group_by(gender, relationship) %>% summarise(n = n())
rel <- diss_data %>% filter(relationship == "In a relationship" | relationship == "Single") %>% droplevels()
rel$gender <- as.factor(rel$gender)
rel$relationship <- as.factor(rel$relationship)
rel <- table(rel$gender, rel$relationship)
chisq.test(rel, p = 0.05) # 2x2 - effect size:phi # weird result p = 1
library(vcd)
assocstats(rel)
t2 <- table(diss_data$gender, diss_data$relationship)
fisher.test(t2)
# We do not reject H0 that gender and relationship are independent, they are thus not related in some way...
e <- diss_data %>% select(gender, edu) %>% droplevels()
e <- table(e$gender, e$edu)
fisher.test(e)
assocstats(e)
e2 <- diss_data %>% select(gender, edu2) %>% droplevels()
e2 <- table(e2$gender, e2$edu2)
fisher.test(e2)
assocstats(e2)
e3 <- diss_data %>% select(gender, employ) %>% droplevels()
e3 <- table(e3$gender, e3$employ)
fisher.test(e3)
assocstats(e3)
## Twitter statistics
diss_data %>% group_by(gender, years_twitter) %>% summarise(n = n())
diss_data %>% group_by(gender, check_twitter) %>% summarise(n = n())
diss_data %>% group_by(gender, function_hash) %>% summarise(n = n())
k <- c(46, 1, 15)
round((k/62) * 100, 1)
tw <- diss_data %>% select(gender, years_twitter) %>% droplevels()
tw <- table(tw$gender, tw$years_twitter)
fisher.test(tw)
assocstats(tw)
tw2 <- diss_data %>% select(gender, check_twitter) %>% droplevels()
tw2 <- table(tw2$gender, tw2$check_twitter)
fisher.test(tw2)
assocstats(tw2)
tw3 <- diss_data %>% select(gender, function_hash) %>% droplevels()
tw3 <- table(tw3$gender, tw3$function_hash)
fisher.test(tw3)
assocstats(tw3)
## Time on Twitter t-test
diss_data %>% group_by(gender) %>% summarise(mean = mean(time_twitter_min),
sd = sd(time_twitter_min), med = median(time_twitter_min))
mean(diss_data$time_twitter_min)
sd(diss_data$time_twitter_min)
# independent 2-group t-test
t.test(diss_data$time_twitter_min~diss_data$gender) # where y is numeric and x is a binary factor
cohen.d(diss_data$time_twitter_min ~ diss_data$gender)
# Participants personality scores
mean(diss_data$o)
sd(diss_data$o)
median(diss_data$o)
diss_data %>% group_by(gender) %>% summarise(mean = mean(o),
sd = sd(o), med = median(o))
# Anova to compare groups
diss_data_gather <- diss_data %>% gather(`e`, `a`, `c`, `n`, `o`, key = "p_type", value = "bfi10_score") %>% arrange(part_id)
diss_data_gather$p_type <- as.factor(diss_data_gather$p_type)
diss_data_gather$p_type <- factor(diss_data_gather$p_type, levels = c("e", "a", "c", "n", "o"))
fit1 <- aov(bfi10_score ~ gender + p_type, data = diss_data_gather)
fit1
summary(fit1) # significant result for gender and p_type overall BUT low effect sizes (partial omegas) >
# higher risk of type II error = false negative (H0 accepted as true but shouldn't)
plot(fit1, 1)
plot(fit1, 2)
library(car)
leveneTest(bfi10_score ~ gender, data = diss_data_gather)
leveneTest(bfi10_score ~ p_type, data = diss_data_gather)
leveneTest(bfi10_score ~ gender*p_type, data = diss_data_gather)
# From the output above we can see that the p-value is not less than the significance level of 0.05.
# This means that there is no evidence to suggest that the variance across groups is statistically significantly different.
# Therefore, we can assume the homogeneity of variances in the different treatment groups.
TukeyHSD(fit1)
pairwise.t.test(diss_data_gather$bfi10_score, diss_data_gather$gender, p.adjust.method = "bonferroni")
cohen.d(diss_data_gather$bfi10_score ~ diss_data_gather$gender)
pairwise.t.test(diss_data_gather$bfi10_score, diss_data_gather$p_type, p.adjust.method = "bonferroni")
library(lsr) # eta squared
library(broom)
tidy(fit1)
etaSquared(fit1, anova = TRUE)
library(sjstats)
omega_sq(fit1)
eta_sq(fit1)
## Alternative with slightly different function - results virtually the same
partialOmegas <- function(mod){
aovMod <- mod
if(!any(class(aovMod) %in% 'aov')) aovMod <- aov(mod)
sumAov <- summary(aovMod)[[1]]
residRow <- nrow(sumAov)
dfError <- sumAov[residRow,1]
msError <- sumAov[residRow,3]
nTotal <- nrow(model.frame(aovMod))
dfEffects <- sumAov[1:{residRow-1},1]
ssEffects <- sumAov[1:{residRow-1},2]
msEffects <- sumAov[1:{residRow-1},3]
partOmegas <- abs((dfEffects*(msEffects-msError)) /
(ssEffects + (nTotal -dfEffects)*msError))
names(partOmegas) <- rownames(sumAov)[1:{residRow-1}]
partOmegas
}
partialOmegas(fit1)
## Twitter corpus stats
tweet_data_ger <- readRDS("tweet_data_ger.rds")
## Make three age groups for diss_data...
library(Hmisc)
diss_data$age_group <- cut(diss_data$age, breaks = c(18,24,35,45),
labels = c("20 - 24 years", "25 - 35 years", "36 - 45 years"))
#intervals closed on the right by default = not inlcuded in the next interval
diss_data$age_group <- as.factor(diss_data$age_group)
levels(diss_data$age_group)
diss_data %>% group_by(gender) %>% summarise(tweets = sum(tweet_num))
k <- c(2057, 10202, 7513)
round((k/19772) * 100, 1)
diss_data %>% summarise(mean = mean(tweet_num),
sd = sd(tweet_num), med = median(tweet_num))
## Sentiment scores
sent_data <- left_join(tweet_data_ger[,c("part_id", "sent_score", "statusSource")], diss_data[,c("part_id", "age", "age_group" ,"gender", "e", "a", "c", "n", "o", "emoji_dens")], by = "part_id")
sent_data <- sent_data %>% na.omit() # 1,789 observations, 45 participants
sent_data$part_id <- as.factor(sent_data$part_id)
sent_data %>% group_by(gender, age_group) %>% summarise(mean = round(mean(sent_score),2),
sd = round(sd(sent_score),2))
round(sd(sent_data$sent_score),2)
# ANOVA for age groups and gender
fit2 <- aov(tweet_num ~ gender + age_group, data = diss_data)
summary(fit2)
source("partial_omegas_anova.R")
partialOmegas(fit2)
TukeyHSD(fit2)
## Eg denn weil dens - male mean higher => females more informal??
# Modeling tweet number
### Poisson model produces statistically significant results
# OR NEG BINOMIAL
library(MASS)
ggplot(diss_data) + geom_histogram(aes(tweet_num)) # > count data
po <- glm(tweet_num ~ gender + age + e + a + c + n + o, data = diss_data, family = "poisson")
summary(po)
logLik(po)
tweet_num.nb <- glm.nb(tweet_num ~ gender + age, data = diss_data) # tweet num does follow a negative binomial dist.
summary(tweet_num.nb)
tidy(tweet_num.nb)
est <- cbind(Estimate = coef(tweet_num.nb), confint(tweet_num.nb))
exp(est)
logLik(tweet_num.nb) ## Fitted log-lik of NB is a lot larger/better using just one additional parameter (6 coefs and 1 theta)
library(car)
vif(tweet_num.nb) # no high variance inflation values for multicolinearity!! Good!!
# Perform likelihood ratio test: H0:theta = Inf (poisson model) against HA: theta < Inf (neg binomial model)
library(lmtest)
lrtest(po, tweet_num.nb)
# = very significant, we can reject H0 and assume the negative binomial model is the better fit.
# AIC for NB is also a lot lower
# Pseudo R2
library(pscl)
pR2(tweet_num.nb)
# Next neg.binom model with hour of day as predictor for tweet number
tweet_ger_date <- readRDS("tweet_ger_date.rds")
library(lubridate)
tweet_ger_date <- tweet_ger_date %>% mutate(hour = hour(created))
t <- as.data.frame(tweet_ger_date %>% group_by(hour) %>% summarise(n = n()))
cor.test(t$hour, t$n)
t2 <- as.data.frame(tweet_ger_date %>% group_by(hour, gender, age, age_group) %>% summarise(n = n()))
tweets_nour.po <- glm(n ~ hour + gender + age, data = t2, family = "poisson")
summary(tweets_nour.po)
tweets_nour.nb <- glm.nb(n ~ hour + gender + age, data = t2)
summary(tweets_nour.nb)
lrtest(tweets_nour.po, tweets_nour.nb)
# again NB model is better fit
library(pscl)
pR2(tweets_nour.nb)
### Participants' other tweet measures
diss_data %>%
summarise(meand = mean(weil_dens, na.rm = TRUE), sd = sd(weil_dens, na.rm = TRUE))
### Table for statusSource and participant gender/age
tweet_ger_date %>% group_by(gender, age_group, statusSource) %>% summarize(n = n()) %>% top_n(5)
## Model testing
summary(gam(tweet_num ~ gender + age, data = diss_data))
|
3e91f5ebad65e50addec535b476b92de3e3d86d1
|
c4310e70ab916777f23c392822cc5e0978ded772
|
/run_analysis.R
|
ec3af89cde0dd3e8400bf6f887daee33cde1eab5
|
[] |
no_license
|
chiahsun/cousera-data-cleaning
|
152eca6b2f4f0d95339c053647c703f9e2877e27
|
2bcc06f0007e340cfc46266876ec7a657e1809dc
|
refs/heads/main
| 2023-05-06T05:03:50.233404
| 2021-05-18T09:03:20
| 2021-05-18T09:03:20
| 368,460,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,199
|
r
|
run_analysis.R
|
## Read data
### Read metadata
features <- read.csv('./UCI\ HAR\ Dataset/features.txt', sep="", header=FALSE)
dim(features)
head(features)
activity_labels <- read.csv('./UCI\ HAR\ Dataset/activity_labels.txt', sep="", header=FALSE)
activity_labels
### Read train and test data
# Define read function
read_data <- function(xPath, yPath, subjectPath, features, activity_labels) {
X <- read.csv(xPath, sep="", header=FALSE)
dim(X)
Y <- read.csv(yPath, sep="", header=FALSE)
dim(Y)
subjects <- read.csv(subjectPath, sep="", header=FALSE)
dim(subjects)
table(subjects)
# Assign features labels on column names
featuredData <- X
names(featuredData) <- features[,2]
# Keep only `mean()` and `std()` features
filteredData <- featuredData[,c(grep('mean\\(\\)', features[,2]), grep('std\\(\\)', features[,2]))]
# Assign subjects
subjectAddedData <- cbind(filteredData, subjects)
colnames(subjectAddedData)[length(colnames(subjectAddedData))] <- 'subject'
# Add activity labels
activityAddedData <- cbind(subjectAddedData, Y)
colnames(activityAddedData)[length(colnames(activityAddedData))] <- 'activity'
dim(activityAddedData)
return(activityAddedData)
}
trainData <- read_data('./UCI\ HAR\ Dataset/train/X_train.txt', './UCI\ HAR\ Dataset/train/Y_train.txt', './UCI\ HAR\ Dataset/train/subject_train.txt', features, activity_labels)
dim(trainData)
head(trainData)
testData <- read_data('./UCI\ HAR\ Dataset/test/X_test.txt', './UCI\ HAR\ Dataset/test/Y_test.txt', './UCI\ HAR\ Dataset/test/subject_test.txt', features, activity_labels)
dim(testData)
data <- rbind(trainData, testData)
## Tidy data
### Gather column features into rows
library(tidyr)
gatheredData <- gather(data, key=feature, value=value, -c('subject', 'activity'))
head(gatheredData)
### Separate feature into three columns, `feature`, `measurement` and `axis`
# Separate feature into three columns
separatedData <- separate(gatheredData, feature, c("feature", "measurement", "axis"))
# If axis is empty, make it MAG which means the magnitude(Euclidean norm of the X,Y,Z signals)
separatedData$axis[separatedData$axis == ''] <- 'MAG'
table(separatedData$axis)
# Make axis as factor variable
factoredData <- separatedData
factoredData$axis <- as.factor(factoredData$axis)
table(factoredData$axis)
# Make measurement as factor variable
factoredData$measurement = as.factor(factoredData$measurement)
table(factoredData$measurement)
# Make subject and activity as factor variables
factoredData$subject <- as.factor(factoredData$subject)
table(factoredData$subject)
factoredData$activity <- as.factor(factoredData$activity)
table(factoredData$activity)
head(factoredData)
### Add domain
domainedData <- factoredData
domainedData$domain <- 'time'
domainedData[grep('^f', domainedData$feature),]$domain <- 'frequency'
domainedData$domain <- as.factor(domainedData$domain)
table(domainedData$domain)
head(domainedData)
### Add sensor type
sensoredData <- domainedData
sensoredData$sensor <- 'accelerometer'
sensoredData[grep('Gyro', sensoredData$feature),]$sensor <- 'gyroscope'
sensoredData$sensor <- as.factor(sensoredData$sensor)
table(sensoredData$sensor)
head(sensoredData)
### Add accelerometer type
acceTypeData <- sensoredData
acceTypeData$acceType <- 'NA'
acceTypeData[acceTypeData$sensor == 'accelerometer',]$acceType = 'body'
acceTypeData[acceTypeData$sensor == 'accelerometer' & grepl('Gravity', acceTypeData$feature),]$acceType = 'gravity'
acceTypeData$acceType <- as.factor(acceTypeData$acceType)
table(acceTypeData$acceType)
head(acceTypeData)
### Add jerk label
jerkData <- acceTypeData
jerkData$jerk <- 'no'
jerkData[grep('Jerk', jerkData$feature),]$jerk <- 'yes'
jerkData$jerk <- as.factor(jerkData$jerk)
table(jerkData$jerk)
head(jerkData)
## Write data
tidyData <- jerkData
dim(tidyData)
write.table(tidyData, './tidy_data.csv', row.name=FALSE)
## Calculate average data
library(dplyr)
grouped <- group_by(tidyData, subject, activity, measurement, axis, domain, sensor, acceType, jerk)
meanGrouped <- summarise(grouped, mean=mean(value))
dim(meanGrouped)
head(meanGrouped)
write.table(meanGrouped, './tidy_data_grouped.csv', row.name=FALSE)
|
87cf1bc869a24294b5ae34a97017cc522f233721
|
f6461fc0f967a7d302d08f2c05f4f112eca7ec51
|
/cachematrix.R
|
06e970a086eb110961ea0cef0ec706a0b976f34f
|
[] |
no_license
|
jboludae/ProgrammingAssignment2
|
170ad80df91d337b968e229252736da815fc8223
|
6dac48a4e707b4aa96d7d4874ad3dd61317f8f8c
|
refs/heads/master
| 2021-01-18T06:20:49.604314
| 2015-02-22T21:27:56
| 2015-02-22T21:27:56
| 31,142,818
| 0
| 0
| null | 2015-02-21T22:04:03
| 2015-02-21T22:04:03
| null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix creates a list of four functions
## it takes as input a matrix
makeCacheMatrix <- function(x = matrix()) {
# we set the inverse matrix to NULL. This is the case that will
# be used when we call cacheSolve for the first time
inv<-NULL
get<-function() x # this returns the matrix x
set<-function(y){ # we use this function if we want to change the values of the matrix
x<<-y
inv<<-NULL
}
getinv<-function() inv # this function returns the inverse matrix
setinv<-function(inverse){ #this function sets the value of "inv" to the inverse matrix
# we calculate in the cacheSolve function
inv<<-inverse
}
list(get=get,set=set,getinv=getinv,setinv=setinv) #this is the output list. We assign names
# to each of the functions
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv<-x$getinv() # first step is to get the value of "inv"
if(!is.null(inv)){ # if it is already stored in the cache, we take it and return it
message("getting cached data")
return(inv)
}
# if it is not yet stored we have to calculate the inverse matrix
data<-x$get() # first we get the matrix
inv<-solve(data) # second we calculate the inverse and store it
x$setinv(inv) # third we store inv in the cache through the setinv function
inv
## Return a matrix that is the inverse of 'x'
}
|
93100bb6b7e1b657cb6847c5c41367f8493420c6
|
df3144a971f165897af0cb3e14131e433e7cb397
|
/Week 4 assignment/rankhospital.R
|
1c1272535600d96160231f69b16b135db160f169
|
[] |
no_license
|
mfelz/R-Programming
|
7ebe8c3657486d73210280042d353f93f035bffc
|
4e4f4771b54a47a47a97d348e620190cfdcee68e
|
refs/heads/master
| 2021-01-10T06:34:16.580303
| 2015-12-25T18:43:44
| 2015-12-25T18:43:44
| 48,588,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
rankhospital.R
|
rankhospital <- function(state,outcome, num = "best") {
## read outcome data
ind <- c("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
col <- ind[outcome]
data[, 11] <- as.numeric(data[, 11])
data[, 17] <- as.numeric(data[, 17])
data[, 23] <- as.numeric(data[, 23])
## Check that state and outcome are valid
states = append(state.abb, "DC")
if (is.na(match(state, states))) {
stop("invalid state")
}
if (is.na(match(outcome, c("heart attack", "heart failure", "pneumonia")))){
stop("invalid outcome")
}
##Return hospital name in that state with lowest 30-day death rate
data <- data[data[,7] == state,] ##clean data by state
data <- data[data[,col]!="Not Available",]##get rid of NA vals
data <- data[!is.na(data[,2]),]
data <- data[order(data[col],data[2]),] ##sort
if (num == "best"){
num <- 1
} else if (num == "worst") {
num <- nrow(data)
} else {
num <- num
}
data[num,2] ##return hospital
}
|
1263cb1c33eab2cefcd96a8dbb34dec4b46270e5
|
08ef584027f0ebbc89e82ec3ff40e53e115293a7
|
/IFCanalysis/moran_global.r
|
11c91b6cc5d40473e39ebaf52ad0948c0863475c
|
[
"MIT"
] |
permissive
|
bimgissql/R
|
50fc323756eabad0c837c9fd09072548c90d4a41
|
0c1cb5ca849d0efb222ebfca43b62966c82743b9
|
refs/heads/master
| 2020-05-23T10:12:31.794027
| 2017-03-07T11:26:37
| 2017-03-07T11:26:37
| 80,403,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
moran_global.r
|
#
# Moran's test for spatial autocorrelation using a spatial weights matrix
#
# load PostgreSQL library
library(RPostgreSQL)
# connection parameters
host = "127.0.0.1"
port = 5432
dbname = "bimdb"
user = "username"
password = "password"
# IFC tables
tblSpaceGeometries = "ifc.spacegeometries"
# connect database
con = dbConnect(drv, host=host, port=port, user=user, password=password, dbname=dbname)
# display connection parameters
summary(con)
# remove password
rm(password)
# id of floor to perform Moran's test
floorid <- '100'
# table of space centroids
tblCentroids <- dbGetQuery(con, paste("SELECT id, ST_X(ST_Centroid(spacegeometry)) AS x, ST_Y(ST_Centroid(spacegeometry)) AS y, noiseratio FROM", tblSpaceGeometries, 'WHERE floorid =', floorid, sep=" "))
# distance matrix
tblCentroids.dists <- as.matrix(dist(tblCentroids$x, tblCentroids$y))
# inverted distance matrix
tblCentroids.dists.inv <- 1/tblCentroids.dists
# set diagonal to zero
diag(tblCentroids.dists.inv) <- 0
# calculate global Moran statistics
moran.test(tblCentroids$noiseratio, tblCentroids.dists.inv)
# close connection
dbDisconnect(con)
# unload PostgreSQL driver
dbUnloadDriver(drv)
|
7003aa7c3ee7ec6b12874048c22cd78842e91bb0
|
aeaa9c7e195ffe48b03c99e2c0b75af4c000edd3
|
/Functions.R
|
1e0b02ec9bdefcd63762465ad8b7819c88b2a365
|
[] |
no_license
|
RedArmy95/Database-Project
|
367cc2503f7d8f427e2978832bd91131a2fca374
|
a415dd716ab9d2c3dff2a88f9e57f223ef95c5eb
|
refs/heads/master
| 2020-12-14T21:40:33.326706
| 2020-01-19T09:54:38
| 2020-01-19T09:54:38
| 234,876,660
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,393
|
r
|
Functions.R
|
library(RODBC)
library(dbconnect)
library(DBI)
library(gWidgets)
library(RMySQL)
library(stringr)
library(dplyr)
con = dbcon(RMySQL::MySQL(), dbname = "data_base",username = "root", password = "f129968890"
,host = "localhost", port = 3306)
dbListTables(con)
singer = dbGetQuery(con ,"select * from singer")
song = dbGetQuery(con ,"select * from song")
dbSendQuery(con, "insert into board(Board_ID,Bname) values ('100003', 'favorite')")
board = dbGetQuery(con ,"select * from board")
board
# 從專輯名找歌曲
select_al<- function(x){
# "select name from song where AID = (select AID from albums where Aname = 'Suck')"
dbGetQuery(con ,str_c("select * from song where AID = (select AID from albums where Aname = '", "')", sep = x))
}
# 加入新歌輸入歌名、類型、SID、AID
insert_song <- function(name, genre, SID, AID){
song = dbGetQuery(con ,"select * from song")
dbSendQuery(con, str_c("insert into song(ID, name, release_time, genre, SID, Sname, AID) values ('",
as.character(max(song$ID)+1),"','", name,"','",Sys.time(),"','", genre,"','", SID,"','",
dbGetQuery(con, str_c("select Sname from Singer where SID =", SID)),"','", AID,"')"))
song = dbGetQuery(con ,"select * from song")
}
# 新增playlist
insert_playlist<-function(pname, UID){
playlist = dbGetQuery(con ,"select * from playlist")
dbSendQuery(con ,str_c("insert into playlist(PID, Pname, UID) values ('",as.character(max(playlist$PID)+1),"','",
pname,"','", UID,"')"))
playlist = dbGetQuery(con ,"select * from playlist")
}
albums = dbGetQuery(con ,"select * from albums")
#查詢album的id
find_AID <- function(albumName){
ans <- data.frame()
for(i in 1:nrow(albums))
{
temp <- as.data.frame(albums[i,])
if(albums[i,2] == albumName)
{
ans <- rbind(ans, temp)
}
}
return(ans)
}
#查詢歌的id
find_ID <- function(songName){
song = dbGetQuery(con ,"select * from song")
ans <- data.frame()
for(i in 1:nrow(song))
{
temp <- as.data.frame(song[i,])
if(song[i,2] == songName)
{
ans <- rbind(ans, temp)
}
}
return(ans)
}
#把歌加進playlist
insert_song_to_playlist <- function(PID2, ID){
dbSendQuery(con ,str_c("insert into song_in_list(PID, ID) values ('",PID2,"','", ID,"')"))
dbGetQuery(con ,str_c("select * from song_in_list where PID = ", PID2))
}
|
fcf1b3e9b65e922bad5feb11f08a01a9c8fc816a
|
8050c1f20c57de8fc4e49d95e31cb220a3e9230b
|
/plot3.R
|
154085f43c815a814c68fece8b7a83fadc5fb9e1
|
[] |
no_license
|
Ramlimab/ExData_Plotting1
|
1f319c23ea5637c0e347473b2ddcfa1fd662f349
|
82ecf5d07fb001d5c6db7ed84f3c29495f8394b4
|
refs/heads/master
| 2021-01-11T18:21:31.320839
| 2016-10-03T09:51:58
| 2016-10-03T09:51:58
| 69,627,898
| 0
| 0
| null | 2016-09-30T02:51:57
| 2016-09-30T02:51:57
| null |
UTF-8
|
R
| false
| false
| 880
|
r
|
plot3.R
|
rawdata<-read.csv('household_power_consumption.txt',sep=';',na.strings='?')
#combine first two cols into datetime column
rawdata$Datetime<-strptime(paste(rawdata$Date,rawdata$Time),format='%d/%m/%Y %H:%M:%S')
#convert first col to Date format and filter out unused rows
rawdata[[1]]<-as.Date(rawdata[[1]],format='%d/%m/%Y')
startdate<-as.Date('2007-02-01')
enddate<-as.Date('2007-02-02')
newdata<-subset(rawdata,(Date>=startdate)&(Date<=enddate))
png('plot3.png',width=480,height=480)
with(newdata, plot(Datetime,Sub_metering_1,type='n',xlab='',ylab='Energy sub metering'))
with(newdata, lines(Datetime,Sub_metering_1,col='black'))
with(newdata, lines(Datetime,Sub_metering_2,col='red'))
with(newdata, lines(Datetime,Sub_metering_3,col='blue'))
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),col=c('black','red','blue'),lty=c(1,1,1))
dev.off()
|
127691e74e0dd347c36ee7a9f9e73c0cdf8917d2
|
a1355acdd7419f9d0a844cf2524d68b9af0af193
|
/城市指标可视化/直接映射中国地图.R
|
7a45051c87c004ddf4e3dd28fca4259ef1366489
|
[] |
no_license
|
ShydowLi/data-view--with-R
|
d9ededa91f167e5abb70234a8e6b53273db5cb66
|
41148b22b57eb376a1c19179c55d08f4bafa7d01
|
refs/heads/master
| 2020-04-09T01:26:11.890871
| 2018-12-01T03:13:41
| 2018-12-01T03:13:41
| 159,904,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 170
|
r
|
直接映射中国地图.R
|
#用maps包直接映射中国地图
library(mapdata)
library(maps)
library(ggplot2)
china1<-map_data('china')
p1<-ggplot(china1,aes(long,lat,group=group))+geom_polygon()
|
a1eb17a7b574fc2a0b81c7e620cc3428adf7d5fd
|
cf000e9056040d8087067cf61247a582218bffe5
|
/shinyApp_demo1/My_new_app/server.R
|
c661bf408284016cf3887a58dea3644f06c079ed
|
[] |
no_license
|
abishekarun/Stat545
|
86c173786d088188469195313607cf7e8971173d
|
e2a3631fe51fae9038f50d57a02f1cba1fe5cd72
|
refs/heads/master
| 2021-08-14T06:06:43.237986
| 2017-11-14T18:52:33
| 2017-11-14T18:52:33
| 103,297,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
server.R
|
# Define server logic required to draw a histogram
server <- function(input, output) {
bcl_data <- read.csv("Data/bcl-data.csv")
output$Hist_AlcCount <- renderPlot({
bcl_data %>%
ggplot()+
aes(x = Alcohol_Content) +
geom_histogram(binwidth = 5)
})
}
|
b0ac527faf4729c9854620dc037b3de1a6ed72b0
|
4e751ea99ec33a74e76a691f81fd9b51090bb22c
|
/R/plot_histogram.R
|
a329f22b546b808dabdd1cf1cacdd48ed1a64f9a
|
[] |
no_license
|
slkarkar/RGCCA
|
cd621a66b045bcc2a7f89ac065d0eb4effbd1bc6
|
0c4894e6805097459e1d8c1b083984c9845f65bc
|
refs/heads/master
| 2023-02-03T09:39:24.266270
| 2020-04-17T12:12:18
| 2020-04-17T12:12:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,299
|
r
|
plot_histogram.R
|
#' Histogram settings
#'
#' Default font for a vertical barplot.
#'
#' @inheritParams plot2D
#' @param group A vector of character giving the group for the rows
#' @param cex_axis An integer for the size of the axis text
#' @param colors reoresenting a vector of colors
plot_histogram <- function(
p,
df,
title = "",
group = NA,
colors = NULL,
cex = 1,
cex_main = 25 * cex,
cex_sub = 16 * cex,
cex_axis = 10 * cex
) {
for (i in c("cex", "cex_main", "cex_sub", "cex_axis"))
check_integer(i, get(i))
stopifnot(is(p, "ggplot"))
check_colors(colors)
title <- paste0(title, collapse = " ")
group <- as.vector(group)
if (NROW(df) <= 10 || is(df, "d_ave")) {
width <- NULL
if (is(df, "d_ave"))
cex_axis <- 12
} else
width <- 1
if (NROW(df) < 3)
mar <- 60
else if (NROW(df) < 5)
mar <- 30
else
mar <- 0
axis <- function(margin){
element_text(
size = cex_axis,
face = "italic",
color = "gray40"
)
}
p <- p + geom_bar(stat = "identity", width = width) +
coord_flip() + labs(title = title, x = "", y = "") +
theme_classic() +
theme_perso(cex, cex_main, cex_sub) +
theme(
axis.text.y = axis(),
axis.text.x = axis(),
axis.line = element_blank(),
axis.ticks = element_blank(),
plot.subtitle = element_text(
hjust = 0.5,
size = cex_sub,
face = "italic"
),
plot.margin = margin(0, 0, mar, 0, "mm")
)
if (!is(df, "d_ave")) {
p <- p +
scale_x_continuous(breaks = df$order, labels = rownames(df)) +
labs(fill = "Blocks")
if (length(group) == 1){
if (is.null(colors))
colors <- c(color_group(seq(3))[3], "gray", color_group(seq(3))[1])
p <- p +
scale_fill_gradientn(colors = colors, na.value = "black")
} else if ((is.character2(group[!is.na(group)]) ||
length(unique(group)) <= 5 )) {
p <- p + scale_fill_manual(values = color_group(group, colors))
}
}
return(p)
}
|
6b6fdaf1b7894ba437fda94ff6723663564664a3
|
b3d92487c861dc5132d860f5ab6e7e58d752eb26
|
/lab5/lab5_mecon.R
|
04c2160281b48d6387246baedbcfbf4bbd69b20d
|
[] |
no_license
|
codercola-mw/Computational-Stat
|
ccfca16bdb8ab2f5cd67a7cde379f8e99f21c276
|
7d8d513aa8167f38d5842fd4c38a4fcea6fe8d69
|
refs/heads/master
| 2022-04-08T12:17:10.046098
| 2020-03-06T16:03:58
| 2020-03-06T16:03:58
| 235,975,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,233
|
r
|
lab5_mecon.R
|
# Question 1
data1 =readxl::read_xls("~/Desktop/732A90_VT2020_Materials/lottery.xls")
X = data1$Day_of_year
Y = data1$Draft_No
### 1.
plot(Y,X, "p")
# Yes, it's so random
### 2.
estimate_Y = loess(Y~X, data=data1)
plot(Y,X, "p")
lines(X, estimate_Y$fitted,col="red")
#The line is slightly going downward, it might implied at the end of the year, the number of draft going lower.
### 3. use the test statistics to check whether the lottery is random
library(boot)
stat1 <- function(data,vn){
data<- data[vn,]
loess_fun = loess(Draft_No~Day_of_year, data=data)
yhat = loess_fun$fitted
Xb= data$Day_of_year[which.max(yhat)]
Xa = data$Day_of_year[which.min(yhat)]
stat0 <- (max(yhat)-min(yhat))/(Xb-Xa)
return(stat0)
}
set.seed(12345)
res = boot(data=data1,stat=stat1, R=2000)
hist(res$t, breaks=50, main="Histogram of T-Statistics",
xlab="T Statistic")
# H0= T>0, H1 = T<=0
mean(res$t>0)
## From the histogram, we can see all the values are below zero.
## So, after we calculate the P-value = 0.001<0.05 which is true, so
## we reject H0, meaning that the data is not random
### 4.using a permutation test
# H0: it is random, H1: It is non-random
permutation <- function(data,B){
stat=numeric(B)
n=dim(data)[1]
for(b in 1:B){
Gb = sample(data$Day_of_year,n)
loess_fun = loess(Draft_No~Gb,data=data)
yhat = loess_fun$fitted
Xb= Gb[which.max(yhat)]
Xa = Gb[which.min(yhat)]
stat[b]=(max(yhat)-min(yhat))/(Xb-Xa)
}
loess_fun = loess(Draft_No~Day_of_year, data=data)
yhat = loess_fun$fitted
Xb= data$Day_of_year[which.max(yhat)]
Xa = data$Day_of_year[which.min(yhat)]
stat0=(max(yhat)-min(yhat))/(Xb-Xa)
res = mean(abs(stat)>=abs(stat0))
return(res)
}
set.seed(12345)
permutation(data1, 2000)
#0.156 > 0.05, so we cant reject the H0
### 5. make a crude estimate of the power in step4
new_Y=c()
for( i in 1:366){
beta = rnorm(n=1, mean=183, sd=10)
new_Y[i] =max(0, min((0.1*X +beta), 366))
}
new_data= cbind("Day_of_year"=X,"Draft_No"=new_Y)
new_data= as.data.frame(new_data)
set.seed(12345)
permutation(new_data,200)
## repeat the step
# Question 2
data =read.csv2("~/Desktop/732A90_VT2020_Materials/prices1.csv")
hist(data$Price)
cat("The mean of the housing price:\n")
mean(data$Price)
#1080.473
set.seed(12345)
B = 2000
n = dim(data)[1]
stat = c()
for (b in 1:B){
bootstrap_sample = sample(data$Price, n, replace=TRUE)
stat[b] = mean(bootstrap_sample)
}
# mean(stat)
#Bias-corrected estimator
T1 = 2*mean(data$Price)-mean(stat)
# > T1
# [1] 1080.819
#the variance of estimator
var_boot = sum((stat-mean(stat))^2)/(B-1)
#> var_boot
#[1] 1280.552
#### use the boot* find the bias-correction
stat <- function(data,vn){
data<- data[vn,]
stat0 <- mean(data)
return(stat0)
}
price = as.matrix(data$Price)
res <- boot(data=price, stat, R=1000)
# > mean(res$t)
# [1] 1079.24
#bias std. error
#0.4852364 34.71439
cat("95% confidence interval is: \n")
print(boot.ci(res))
### 3 . jackknife
B = 1000
n = dim(data)[1]
T_i = c()
stat = mean(data$Price)
for (b in 1:n ){
T_i[b] = n*stat-(n-1)*mean(data$Price[-b])
}
J_T = mean(T_i)
var_jack = sum((T_i-J_T)^2)/(n*(n-1))
### 4. compare with the C.I
|
89c18230f8ce0f8bd3ad76b18398c8dbe8944e3a
|
1372e37e4d022d254460d8c00b2077a983d7be1a
|
/pxlmatching.R
|
4bc1579ce3e0d03a48d7e43ec4fb563aec87ee39
|
[] |
no_license
|
andriizayac/sagebrush_spacetime_nlcd
|
6963649e36b99002051af9904ec6470e8767cfd7
|
cee78f1d29693c116e8bdb87b920e53a4dc50404
|
refs/heads/master
| 2023-04-12T00:10:11.642216
| 2022-12-21T21:59:26
| 2022-12-21T21:59:26
| 283,573,577
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,906
|
r
|
pxlmatching.R
|
# === prior: raster_processing.R
# === libraries + paths
pkgs <- c("cluster", "raster", "rgdal", "dplyr", "sf")
sapply(pkgs, require, character.only = T)
years <- c(1985:2018)
# === import data: these data represent wildfires that meet
# the following criteria: single fire, no treatment
fires <- readRDS("data/fpolygons.rds")
fires$FireYer <- as.numeric(fires$FireYer)
sage <- readRDS("data/sagelist.rds")
dfEnv <- readRDS("data/dfEnv_covars.rds")
pxldfEnv <- readRDS("data/pxlcovlist.rds")
N <- nrow(fires)
for(i in 1:3){
for(j in 1:N) {
r <- projectRaster(pxldfEnv[[i]][[j]], sage[[j]][[1]])
sage[[j]] <- addLayer(sage[[j]], r)
}
}
sagedf <- list()
covdf <- list()
xys <- list()
for(i in 1:N){
dat <- as.data.frame(sage[[i]], xy = TRUE, na.rm = T)
xys[[i]] <- dat[, 1:2]
sagedf[[i]] <- dat[, 3:35]
covdf[[i]] <- dat[, 36:38]
}
# === out of the larger pool of sites select those that
# after the fire have a decrease in average cover > 1%
ssize <- sapply(sagedf, nrow) # range of sample sizes
maxdiff <- rep(NA, N)
for(i in 1:N){
l <- as.numeric(apply(sagedf[[i]], 2, function(x) { mean(x[1:23]) } ))
maxdiff[i] <- min(diff(l))
}
diffins <- which(sapply(maxdiff, function(x) { -x > 1 }))
subid <- diffins
# === add pre-disturbance covariates to cov df
# assigns average stability to the pixels with no variation.
tfires <- fires[subid, ]
tsage <- sagedf[subid]
tdfEnv <- dfEnv[subid, ]
tpxlcov <- covdf[subid]
txys <- xys[subid]
# remove NORTH BLUE MOUNTAIN fire as there is no variation
outid <- which(tfires$FireNam == "NORTH BLUE MOUNTAIN")
tfires <- tfires[-outid, ]
tsage <- tsage[-outid]
tdfEnv <- tdfEnv[-outid, ]
tpxlcov <- tpxlcov[-outid]
txys <- txys[-outid]
for(i in 1:length(tpxlcov)){
var1 <- apply(tsage[[i]][, 1:(tfires$FireYer[i]-1985)], 1, mean)
var2 <- as.numeric(apply(tsage[[i]][, 1:(tfires$FireYer[i]-1985)], 1, function(x) { mean(x)/sd(x) } ))
tpxlcov[[i]]$prefire <- var1
stabmean <- mean(var2[is.finite(var2)], na.rm = T)
tpxlcov[[i]]$stab <- ifelse(!is.finite(var2), stabmean, var2)
}
# === import/calculate pixel-level covariates
# === apply kmeans clustering
library(cluster)
library(factoextra)
# add cluster
for(M in 2:15) {
for(i in 1:length(tpxlcov)){
set.seed(123)
# https://stackoverflow.com/questions/16274788/k-means-and-mahalanobis-distance
# cov(X) = R'R
# y = XR^-1
X <- as.matrix(tpxlcov[[i]][,1:5])
# Re-scale the data
# C <- chol(var(X))
# y <- X %*% qr.solve(C)
y = scale(X)
k2 <- kmeans(y, centers = M, iter.max = 200, algorithm = "MacQueen")
tpxlcov[[i]][, paste0("cluster", M)] <- as.numeric(k2$cluster)
}
}
# export data: these are the data that were used in the analysis
saveRDS(tfires, "data/tfires.rds")
saveRDS(tpxlcov, "data/tpxlcov.rds")
saveRDS(tsage, "data/tsage.rds")
saveRDS(tdfEnv, "data/tdfEnv_covars.rds")
saveRDS(txys, "data/txys.rds")
# === next: model_fit.R
|
6afc016823b2433a2a6fea13dfa4bb1ce6215f26
|
a20245ad9c9f1956a0bfb97d8e0a2802a8d8408c
|
/man/markers_for_heatmap.Rd
|
395b923d7d2d1e423673736d05ad40336130bf6f
|
[] |
no_license
|
brickyyyy/SC3min
|
881681954e93bfa4552dde9c793f110a66b2481d
|
b4c3d6dac5848ed93ba9eff209d8ffcea4c2438b
|
refs/heads/master
| 2020-04-07T19:44:36.417393
| 2019-03-06T18:45:17
| 2019-03-06T18:45:17
| 158,660,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 630
|
rd
|
markers_for_heatmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ShinyFunctions.R
\name{markers_for_heatmap}
\alias{markers_for_heatmap}
\title{Reorder and subset gene markers for plotting on a heatmap}
\usage{
markers_for_heatmap(markers)
}
\arguments{
\item{markers}{a \code{data.frame} object with the following colnames:
\code{sc3min_k_markers_clusts}, \code{sc3min_k_markers_auroc}, \code{sc3min_k_markers_padj}.}
}
\description{
Reorders the rows of the input data.frame based on the \code{sc3min_k_markers_clusts}
column and also keeps only the top 10 genes for each value of \code{sc3min_k_markers_clusts}.
}
|
72337eb45295cd9b75b8b53e4ba3f78309615a28
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/storr/examples/storr_multistorr.Rd.R
|
f47a8282afae655befab0dd45c56b6c30b4b3189
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 564
|
r
|
storr_multistorr.Rd.R
|
library(storr)
### Name: storr_multistorr
### Title: Storr with multiple storage drivers
### Aliases: storr_multistorr
### ** Examples
# Create a storr that is stores keys in an environment and data in
# an rds
path <- tempfile()
st <- storr::storr_multistorr(driver_environment(),
driver_rds(path))
st$set("a", runif(10))
st$get("a")
# The data can be also seen by connecting to the rds store
rds <- storr::storr_rds(path)
rds$list() # empty
rds$list_hashes() # here's the data
rds$get_value(rds$list_hashes())
st$destroy()
|
933d92cc27ddd2b20ffdc843cb805ad32492379b
|
66002e6645ac0707ff6b704cf550849da0affbc2
|
/Density Estimation and Vine Copula Model/section1&2/section1.r
|
a6008303d25b22ac3414b982f6f6006420a20159
|
[] |
no_license
|
relatecode/Revenue-Insurance
|
fef60050dcae93656af020a558dc31c6e85b5657
|
06786488caf537745531db36b9c083083b491f37
|
refs/heads/main
| 2023-08-14T20:12:11.061069
| 2021-09-25T00:59:11
| 2021-09-25T00:59:11
| 361,301,327
| 0
| 0
| null | null | null | null |
ISO-8859-13
|
R
| false
| false
| 4,008
|
r
|
section1.r
|
# R code for 'Calculation of Crop Insurance Premium:Based on a Density Ratio Model and Vine Copula”®
# section 1
dat=read.delim("D:/Y/“ó¶¹/shandong.txt",header= TRUE,sep="\t", stringsAsFactors = FALSE)
# load the following libraries and functions
library('logspline')
library('lme4')
library('fBasics')
# fit a simple model with spline time trend and county dummies
# use data for year 2006 and onward
dat=dat[dat$Year>2005,]
# two point quadratic spline for time trend
t2=quantile(dat$Year,c(1/3,2/3))
year=cbind(dat$Year,dat$Year^2,(dat$Year-t2[1])^2*(dat$Year>t2[1]),(dat$Year-t2[2])^2*(dat$Year>t2[2]))
City=dat$City
# first estimate at the district level
fit=glm(Yield~year+factor(City),data=dat)
# check the time trend
t0=unique(dat$Year)
t0=sort(t0)
tt0=cbind(t0,t0^2,(t0-t2[1])^2*(t0>t2[1]),(t0-t2[2])^2*((t0>t2[2])))
trend=tt0%*%coef(fit)[2:5]
plot(t0,trend,type='b')
summary(resid(fit))
plot(logspline(fit$res))
# studentize residuals by predicted value
summary(fitted(fit))
res=resid(fit)/fitted(fit)
summary(res)
plot(density(res))
# use logspline for the baseline
# as it is easier to calculate the normalizing factor here
f0=logspline(res,lbound=min(res),ubound=max(res))
y=plogspline(res,f0)
# divide 12 years of data into 6 cells
y2=cut(y,breaks=seq(0,1,length=7))
w=xtabs(~y2+dat$City.code)
w=unlist(w)
w=matrix(w,ncol=1)
cy=unique(dat$City.code)
cy=sort(cy)
cy=rep(cy,each=6)
temp=seq(0,1,length=7)
z=temp[-1]/2+temp[-7]/2
dat2=data.frame(cbind(w,cy,rep(z,17)))
names(dat2)=c('w','cy','z')
dat2$cy=factor(dat2$cy)
# generate Legendre basis
zz=cbind(legendre(dat2$z,1,2),legendre(dat2$z,2,2),legendre(dat2$z,3,2),legendre(dat2$z,4,2),legendre(dat2$z,5,2),legendre(dat2$z,6,2),legendre(dat2$z,7,2),legendre(dat2$z,8,2))
# model specfication via Poisson regression
# Table 1 in the text is based on these models
# Model 1 (fit1) is preferred accroding to AIC.But!K = 1 is apparently too restrictive ,so we still choose K=2!
fit1=glm(w~zz[,1]:factor(cy),data=dat2,family='poisson')
fit2=glm(w~zz[,1]:factor(cy)+zz[,2]:factor(cy),data=dat2,family='poisson')
fit3=glm(w~zz[,1]:factor(cy)+zz[,2]:factor(cy)+zz[,3]:factor(cy),data=dat2,family='poisson')
fit4=glm(w~zz[,1]:factor(cy)+zz[,2]:factor(cy)+zz[,3]:factor(cy)+zz[,4]:factor(cy),data=dat2,family='poisson')
fit5=glm(w~zz[,1]:factor(cy)+zz[,2]:factor(cy)+zz[,3]:factor(cy)+zz[,4]:factor(cy)+zz[,5]:factor(cy),data=dat2,family='poisson')
fit6=glm(w~zz[,1]:factor(cy)+zz[,2]:factor(cy)+zz[,3]:factor(cy)+zz[,4]:factor(cy)+zz[,5]:factor(cy)+zz[,6]:factor(cy),data=dat2,family='poisson')
c(fit1$aic,fit2$aic,fit3$aic,fit4$aic,fit5$aic,fit6$aic)
# calculate f0
x0=seq(min(res),max(res),length=300)
out0=dlogspline(x0,f0)
plot(x0,out0,type='l')
# function to calculate densities
ff=function(x,b,f.logspline,id)
{
d0=dlogspline(x,f.logspline)
p0=plogspline(x,f.logspline)
tilt=0
for (i in 1:length(id))
{
y=legendre(p0,id[i],2)
tilt=tilt+b[i]*y
}
d=d0*exp(tilt)
return(d)
}
# repeat this for each city
a1=matrix(0,ncol=1,nrow=17)
out1=matrix(0,ncol=17,nrow=300)
id=c(1,2)
b1=coef(fit2)[-1]
b1=matrix(b1,ncol=2)
for (i in 1:17)
{
a1[i]=integrate(ff,lower=min(res),upper=max(res),b=b1[i,],f.logspline=f0,id=id)$value
temp=ff(x0,b1[i,],f0,id)
out1[,i]=temp/a1[i]
}
matplot(x0,out1,type='l')
lines(x0,out0,lwd=3)
# for each city, plot the district common density and individually estimated density
out.city=matrix(0,ncol=17,nrow=length(x0))
for (i in seq(1,17))
{
temp=res[dat$City.code==i]
f00=logspline(temp)
out.city[,i]=dlogspline(x0,f00)
}
matplot(x0,out.city,type='l')
lines(x0,out0,lwd=3)
## semi-parametric method to obtain the PIT of yield
YU=matrix(0,ncol=17,nrow=12)
for (i in 1:17)
for(j in 1:12){
YU[j,i]=integrate(ff,lower = min(res),upper = res[ j +12*(i-1)],b=b1[i,],f.logspline=f0,id=id)$value/a1[i]
}
|
a3bbec7319c3a8a2eabdb97f0910c5b052342262
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051218-test.R
|
672294b313a71e05b3469a5d4c10d6b69044cdf0
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
r
|
1610051218-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(3.41641322410626e-312, 1.08601191821704e-306, 1.76611980003167e-279, -7.31409635508499e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
828c09b0330d6dfcf21b637a37c073eac14529a2
|
2067ae0560a820f9a4af01bbfcfb8a4cc02fe129
|
/animate.R
|
32bda9b3ba310135eb4739d3717037bcaf3710d6
|
[
"MIT"
] |
permissive
|
joelonsql/sir
|
4f1c9e6c8581ddc3218a96af13a2a00e79f8fea3
|
43afe8e204069295a2086ee9a2d0a2829533c1c1
|
refs/heads/master
| 2021-04-15T02:34:37.088052
| 2020-03-28T12:51:23
| 2020-03-28T12:51:23
| 249,287,564
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,589
|
r
|
animate.R
|
library(plotly)
signif.num <- function(x) {
symnum(x, corr = FALSE, na = FALSE, legend = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
}
data <- read_csv("time_series_covid19_deaths_global.csv") %>%
rename(Province = "Province/State",
CountryRegion = "Country/Region") %>%
pivot_longer(-c(Province,CountryRegion,Lat,Long), names_to = "Date", values_to = "NDeaths") %>%
mutate(Date = mdy(Date)) %>%
group_by(CountryRegion, Date) %>%
summarise(NDeaths = sum(NDeaths)) %>%
ungroup() %>%
mutate(Day = as.integer(Date - min(Date))) %>%
inner_join(
read_csv("populations.csv", col_names=c("CountryRegion","Population")),
by = "CountryRegion"
) %>%
filter(Population > 1000000)
data$Deaths <- data$NDeaths / data$Population
i <- 0
all_data <- data.frame()
days <- max(data$Date)-min(data$Date)-7
last_actual_date <- max(data$Date)
deaths_per_capita_and_rate <- data.frame(Date=character(),Country=character(),DeathsPerCapita=double(),DeathRate=double(),NDeaths=integer())
for (d in days:0) {
for (country in unique(data$CountryRegion)) {
skip <- 0
end_date <- last_actual_date - d
country_data <- data %>% filter(CountryRegion == country, Deaths > 0, Date > end_date - 7, Date < end_date)
if (count(country_data) < 3) {
deaths_per_capita_and_rate <- add_row(deaths_per_capita_and_rate,
Date=as.character(end_date),
Country=country,
DeathsPerCapita=0,
DeathRate=0,
NDeaths=0
)
next
}
if (min(country_data$Deaths) == max(country_data$Deaths)) {
deaths_per_capita_and_rate <- add_row(deaths_per_capita_and_rate,
Date=as.character(end_date),
Country=country,
DeathsPerCapita=0,
DeathRate=0,
NDeaths=0
)
next
}
country_data$Day <- country_data$Day - min(country_data$Day)
model <- lm(log2(Deaths) ~ Day, country_data)
model_summary <- summary(model)
p_values <- sapply(model_summary$coefficients[,4],signif.num)
print(paste(country, "R²adj", round(model_summary$adj.r.squared,2), "Coefficients",
paste(
names(p_values),
model_summary$coefficients[,1],
p_values,
sep=" ",
collapse=" ; "
)
))
deaths_per_capita_and_rate <- add_row(deaths_per_capita_and_rate,
Date=as.character(end_date),
Country=country,
DeathsPerCapita=tail(country_data,1)$Deaths,
DeathRate=model_summary$coefficients[,1]["Day"],
NDeaths=tail(country_data,1)$NDeaths,
)
}
}
deaths_per_capita_and_rate %>%
plot_ly(
x = ~DeathsPerCapita,
y = ~DeathRate,
color = ~Country,
text = ~Country,
size = ~NDeaths,
frame = ~Date,
type = "scatter",
mode = "text"
) %>%
layout(
title = "COVID-19 death rate and deaths per capita",
xaxis = list(type="log",range=c(log10(0.00000001),log10(0.001)))
)
|
202065800d8ac87b68a47472f145d113f1cf9f85
|
187d170f88518defcceb303502419962b453fa71
|
/R/CriteriaManager.R
|
094efd4514b64853c3bd366e3e43c203691248fd
|
[] |
no_license
|
vubiostat/wfccmr
|
8cec84461825caae45fa19fd2372ce3d7eb33bb6
|
9a1a340ac5e93133eec781418206d0d1fc9e460e
|
refs/heads/master
| 2020-06-01T06:57:12.552197
| 2009-02-23T22:29:08
| 2009-02-23T22:29:08
| 115,112
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,040
|
r
|
CriteriaManager.R
|
setClass("CriteriaManager",
representation( criteria="Criteria",
name="character",
sign="character",
wfccmfunction="character",
prefilter="character",
permutations="numeric",
"VIRTUAL"),
prototype( criteria=Criteria(),
name="criteria",
sign="",
wfccmfunction="",
prefilter="",
permutations=0),
validity=function(object) {
if (length(object@name) != 1)
return("there can only be one name")
if (length(object@wfccmfunction) != 1)
return("there can only be one WFCCM function")
if (length(object@prefilter) != 1)
return("there can only be one pre-filter function")
if (length(object@permutations) != 1)
return("there can only be one distance permutations number")
TRUE
}
)
# Constructor
CriteriaManager <- function(criteria=Criteria(), name="criteria", sign="", wfccmfunction="", prefilter="", permutations=0) {
new("CriteriaManager", criteria=criteria, name=name, sign=sign, wfccmfunction=wfccmfunction, prefilter=prefilter, permutations=permutations)
}
# Write
write.CriteriaManager <- function(x, file) {
cat(x@name, "",
x@prefilter, "",
x@wfccmfunction, "",
paste(x@sign, collapse=" "), "",
x@permutations, "",
as(x@criteria, "character"),
file=file, sep="\n")
}
# Read from file
read.CriteriaManager <- function(file) {
data <- scan(file, what=character(0), sep="\n", blank.lines.skip=FALSE, quiet=TRUE)
fix <- function(x) { gsub("([^.])value", "\\1.value", gsub("_", ".", x)) }
name <- data[1]
prefilter <- fix(data[3])
wfccmfunction <- fix(data[5])
sign <- fix(strsplit(data[7], " ")[[1]])
old.option <- options(warn = -1)
on.exit(options(old.option))
permutations <- as.numeric(data[9])
next.line <- 11
if (is.na(permutations)) {
permutations <- 0
next.line <- 9
}
criteria <- Criteria()
for (line in next.line:length(data)) {
if (nchar(data[line]) == 0) break
criteria[line - next.line + 1] <- as.Criteria(data[line])
}
list(criteria=criteria, name=name, prefilter=prefilter, wfccmfunction=wfccmfunction, sign=sign, permutations=permutations)
}
# Tests
is.CriteriaManager <- function(x) { is(x, "CriteriaManager") }
# Coersion
setAs(from="CriteriaManager", to="character",
function(from) {
paste(x@name,
x@prefilter,
x@wfccmfunction,
paste(x@sign, collapse=" "),
x@permutations,
paste(x@criteria, collapse="\n"),
sep="\n")
}
)
setMethod("as.character",
signature( x="CriteriaManager"),
function(x) as(x, "character")
)
# Show
setMethod("show",
signature( object="CriteriaManager"),
function(object) {
cat(paste("", paste("Criteria", from@name), "",
paste("Pre-filter:", from@prefilter), "",
paste("Function:", from@wfccmfunction), "",
paste("Sign:", paste(from@sign, collapse=" ")), "",
paste("Distance Permutations:", from@permutations), "",
"Criteria:",
as(from@criteria, "character"),
"", "",
sep="\n"))
}
)
# Names
setMethod("names",
signature( x="CriteriaManager"),
function(x) x@name
)
setReplaceMethod("names",
signature( x="CriteriaManager",
value="character"),
function(x, value) {
x@name <- value
x
}
)
# Length
setMethod("length",
signature( x="CriteriaManager"),
function(x) stop("operation not supported.")
)
setReplaceMethod("length",
signature( x="CriteriaManager"),
function(x, value) stop("operation not supported.")
)
# Get CriteriaSet X (1-based)
setMethod("[[",
signature( x="CriteriaManager"),
function(x, i, j) stop("operation not supported.")
)
|
6d38712a542681f208698a466dac295733ecc728
|
3561c3366a49b37383987a706fd6efef9472a061
|
/T7 - Local Regression&GAM/spam_gam.R
|
e762b7fd26dbacbfa47980fc1217495ac7d4e47a
|
[] |
no_license
|
zhuwzh/MATH4432-Tutorial
|
5a4ae2850f8e1e269baf2a24964fad353407b7b5
|
5637e49189ecc6b8e344befe5c02a3aa24fe3774
|
refs/heads/master
| 2018-09-06T18:55:48.572867
| 2018-06-04T17:15:25
| 2018-06-04T17:15:25
| 118,726,708
| 11
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
spam_gam.R
|
library(ElemStatLearn)
library(gam)
data(spam)
spam_log = spam
spam_log[,1:57] = log(spam_log[,1:57] + .1)
tr_idx = sample(1:dim(spam)[1], round(2/3 * dim(spam)[1])) # 3067
ts_idx = setdiff(1:dim(spam)[1], tr_idx) # 1534
spam_tr = spam_log[tr_idx,]
spam_ts = spam_log[ts_idx,]
# construct formula using cubic smoothing spline with deg = 4 (trace = 5)
df = 3 # 1 linear (without counting intercept) + 3 non-linear
f = 'spam~'
for(i in 1:57){
f = paste(f, 's(',names(spam)[i],',',as.character(df),')+', sep='')
}
f = substr(f, start=1, stop=nchar(f)-1)
gam_fit = gam(as.formula(f), family = 'binomial', data = spam_tr)
summary(gam_fit)
gam_pred = predict(gam_fit, newdata = spam_ts, type ='response')
gam_acc = sum((gam_pred > .5) == (spam_ts$spam == 'spam')) / dim(spam_ts)[1]
print(gam_acc)
## Pruning Classification Trees
spam_tr = spam[tr_idx,]
spam_ts = spam[ts_idx,]
tree_fit = tree(spam~., data = spam_tr, split='deviance')
summary(tree_fit) ## Residual mean deviance is minus log likelihood divided by N - size(T)
plot(tree_fit)
text(tree_fit, pretty=0)
cv_tree = cv.tree(tree_fit, FUN = prune.misclass)
best_size = cv_tree$size[which(cv_tree$dev == min(cv_tree$dev))]
par(mfrow=c(1,2))
plot(cv_tree$size ,cv_tree$dev ,type="b") # number of terminal nodes
plot(cv_tree$k ,cv_tree$dev ,type="b") # \alpha in textbook
prune_tree = prune.misclass(tree_fit, best=best_size)
plot(prune_tree)
text(prune_tree, pretty = 0)
tree_pred = predict(prune_tree, newdata = spam_ts, type='class')
tree_acc = sum(tree_pred == spam_ts$spam) / dim(spam_ts)[1]
|
d8d0f57cd829862247b1cd9ce4e4fa8d521890ef
|
4bd57b8501d4326ecc06c1d1ea499935e1668d95
|
/MASH-dev/SeanWu/MBITES/man/Bionomics_vectorialCapacity.Rd
|
f46f293a6e7b4364d66d658a321996ea4ca00e1f
|
[] |
no_license
|
aucarter/MASH-Main
|
0a97eac24df1f7e6c4e01ceb4778088b2f00c194
|
d4ea6e89a9f00aa6327bed4762cba66298bb6027
|
refs/heads/master
| 2020-12-07T09:05:52.814249
| 2019-12-12T19:53:24
| 2019-12-12T19:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,810
|
rd
|
Bionomics_vectorialCapacity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MBITES-Bionomics.R
\name{Bionomics_vectorialCapacity}
\alias{Bionomics_vectorialCapacity}
\title{Bionomics: Compute Vectorial Capacity (Human-centric)}
\usage{
Bionomics_vectorialCapacity(mosquitos, humans, EIP, spatial = FALSE)
}
\arguments{
\item{mosquitos}{a data.frame of parsed JSON mosquito output}
\item{humans}{a data.frame of parsed JSON human output}
\item{EIP}{the length of EIP}
\item{spatial}{compute spatial dispersion of bites or not}
}
\value{
a list where each element corresponds to a human host.
Each host has \code{VC}, which is the total number of secondary bites arising from him or her, and
\code{spatialVC} which is a list of origin/destination pairs tracking dispersion of each initial bite.
}
\description{
Takes in JSON output parsed into a data.frame object from
an MBITES simulation run.
Computes vectorial capacity, as well as its spatial dispersion, from a human-centric (ego-centric)
algorithm, described as follows:
\enumerate{
\item For each mosquito iterate through all its bites:
}
\itemize{
\item If the bite had a successful blood meal (human to mosquito transmission only occurs during a blood meal)
find all pairs of bites seperated by more than EIP days, where the other bites can be probing events or blood meal events.
\item Add these secondary bites to the initial bite's human host's individual vectorial capacity.
\item Optionally, record the sites where these secondary bites were dispersed to.
Mosquitoes that were still alive at the end of simulation are filtered out.
Please note that in order to reconstruct kernels for VC, the distance matrix between sites
must be preserved somewhere, as the mosquito only records the index of the site it visited, not the xy coordinates.
}
}
|
6e256fdc1405a77cb18aac54f3b32f5fbf10d848
|
a80c7ff6384c77d31f21b76377344d10c9b1efaa
|
/R/figures.R
|
23798ab99a27d4afae119a65d182e71ea2879f40
|
[
"MIT"
] |
permissive
|
smwindecker/vic.carbon
|
379f4ea47747dc9f1d3f0adf3c086989d74086ed
|
1b28c38ab9973169172ffcdb72817e9153567aac
|
refs/heads/master
| 2021-07-24T13:52:59.268245
| 2019-10-11T04:42:59
| 2019-10-11T04:42:59
| 143,502,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,131
|
r
|
figures.R
|
get_pred_df <- function (depth, core, model) {
df_core <- data.frame(core = core,
SITE_CODE = substr(core, 1, 4),
depth = seq(0, depth, length.out = 100))
pred_core <- predict.gam(model, df_core, se.fit = TRUE)
pred_core <- cbind(df_core,
response = pred_core$fit,
lwr = pred_core$fit-2*pred_core$se.fit,
upr = pred_core$fit+2*pred_core$se.fit)
pred_core
}
method_subplot <- function (df, mod) {
subset_dere2 <- df[df$core == 'DERE2', ]
plot_max_depth <- max(subset_dere2$depth)
pred_dere2 <- get_pred_df(plot_max_depth, 'DERE2', mod)
par(mar = c(2, 2, 1, 1))
plot(subset_dere2$depth, subset_dere2$mgOC_cm3, col = 'red', pch = 20,
cex = 2, xaxt = 'n', yaxt = 'n', xlab = '', ylab = '',
cex.lab = 2, bty = 'n', xaxs = 'i', yaxs = 'i')
lines(pred_dere2$depth, exp(pred_dere2$response), col = 'red', lwd = 4)
polygon(x = c(pred_dere2$depth, rev(pred_dere2$depth)),
y = c(rep(0, nrow(pred_dere2)), rev(exp(pred_dere2$response))),
col = adjustcolor('red', alpha.f = 0.40),
border = NA)
}
covariate_plot <- function (covariate, file_location = '') {
r <- aggregate(shapefile(paste0(file_location,
'shapefiles/processed/cma.shp')),
dissolve = TRUE) %>%
as("SpatialPolygonsDataFrame")
cov <- raster(x = paste0(fil_location,
'shapefiles/processed/',
covariate, '.tif')) %>%
mask(r)
plot(cov, axes = FALSE, box = FALSE, legend = FALSE)
}
bp <- function (variable) {
par(mfrow = c(3,3))
boxplot(model_df$ndvi ~ model_df[, variable], ylab = 'ndvi')
boxplot(model_df$annprecip ~ model_df[, variable], ylab = 'prec')
boxplot(model_df$mvbf ~ model_df[, variable], ylab = 'mvbf')
boxplot(model_df$twi ~ model_df[, variable], ylab = 'twi')
boxplot(model_df$stock_Mg_h ~ model_df[, variable], ylab = 'temp')
boxplot(model_df$stock_Mg_h ~ model_df[, variable], ylab = 'water_obs')
boxplot(model_df$stock_Mg_h ~ model_df[, variable], ylab = 'natveg_prop')
boxplot(model_df$stock_Mg_h ~ model_df[, variable], ylab = 'catchment_area')
boxplot(model_df$stock_Mg_h ~ model_df[, variable], ylab = 'aridity')
}
# Produce pair plot of traits
pair_plot <- function (df) {
panel.cor <- function (x, y, digits = 2, prefix = "", cex.cor = 1.8, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- stats::cor(x, y)
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.7/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * abs(r))
p <- stats::cor.test(x, y)$p.value
if (p < 0.05) sym <- 8
if (p < 0.01) sym <- c(8,8)
if (p <0.001) sym <- c(8,8,8)
if (p < 0.05) legend('topright', legend = '', pch = sym, bty = 'n')
}
# Customize upper panel
upper.panel<-function(x, y){
points(x, y, xlab = '', ylab = '', cex = 2.2)
mylm <- lm(y ~ x)
abline(mylm, col = 'red', cex = 2.2)
newx <- seq(min(x), max(x), length.out = 500)
prd <- predict(mylm, newdata = data.frame(x = newx), interval = c('confidence'),
level = 0.90, type = 'response')
lines(newx, prd[, 2], col = 'black', lty = 2, cex = 2.2)
lines(newx, prd[, 3], col = 'black', lty = 2, cex = 2.2)
}
# Create the plot
par(cex.axis = 2.7)
pairs(df,
lower.panel = panel.cor,
upper.panel = upper.panel,
cex.labels = 5)
}
s_depth <- function (df, model, subplot) {
df <- data.frame(core = 'DERE1',
SITE_CODE = 'DERE',
depth = seq(0, max(df$depth), length.out = 100))
pred <- predict.gam(model, df, terms = 's(depth)', se.fit = TRUE)
pred_mean <- exp(pred$fit)
pred_lwr <- exp(pred$fit - 2*pred$se.fit)
pred_upr <- exp(pred$fit + 2*pred$se.fit)
plot(df$depth, pred_mean, type = 'l',
bty = 'L', xaxs = 'i', yaxs = 'i', ylab = '', xlab = '',
lwd = 2, ylim = c(0, 170), xlim = c(0, 130),
yaxt = 'n', xaxt = 'n')
lines(df$depth, pred_lwr, lty = 4, lwd = 2)
lines(df$depth, pred_upr, lty = 4, lwd = 2)
polygon(x = c(df$depth, rev(df$depth)),
y = c(pred_lwr, rev(pred_upr)),
col = adjustcolor('black', alpha.f = 0.10),
border = NA)
axis(side = 1, at = c(0, 65, 130), cex.axis = 2.5,
labels = c(0, 65, 130),
padj = .4)
axis(side = 2, at = c(0, 85, 170), las = 2, cex.axis = 2.5,
labels = c(0, 85, 170))
legend('topleft', legend = subplot, bty = 'n', cex = 2.5)
}
s_cores_depth <- function (df, mod, subplot) {
dere <- df[df$SITE_CODE == 'DERE', ]
dere1 <- df[df$core == 'DERE1', ]
dere2 <- df[df$core == 'DERE2', ]
pred_dere1 <- get_pred_df(max(dere$depth), 'DERE1', mod)
pred_dere2 <- get_pred_df(max(dere$depth), 'DERE2', mod)
plot(dere1$depth, dere1$mgOC_cm3, col = 'palevioletred', pch = 20,
bty = 'L', xaxs = 'i', yaxs = 'i', ylab = '', xlab = '',
ylim = c(0, 170), xlim = c(0, 130),
yaxt = 'n', xaxt = 'n')
points(dere2$depth, dere2$mgOC_cm3, col = 'red', pch = 20)
lines(pred_dere1$depth, exp(pred_dere1$response), col = 'palevioletred', lwd = 2)
lines(pred_dere1$depth, exp(pred_dere1$lwr), col = 'palevioletred', lty = 4, lwd = 2)
lines(pred_dere1$depth, exp(pred_dere1$upr), col = 'palevioletred', lty = 4, lwd = 2)
polygon(x = c(pred_dere1$depth, rev(pred_dere1$depth)),
y = c(exp(pred_dere1$lwr), rev(exp(pred_dere1$upr))),
col = adjustcolor('palevioletred', alpha.f = 0.10),
border = NA)
lines(pred_dere2$depth, exp(pred_dere2$response), col = 'red', lwd = 2)
lines(pred_dere2$depth, exp(pred_dere2$lwr), col = 'red', lty = 4, lwd = 2)
lines(pred_dere2$depth, exp(pred_dere2$upr), col = 'red', lty = 4, lwd = 2)
polygon(x = c(pred_dere2$depth, rev(pred_dere2$depth)),
y = c(exp(pred_dere2$lwr), rev(exp(pred_dere2$upr))),
col = adjustcolor('red', alpha.f = 0.10),
border = NA)
axis(side = 1, at = c(0, 65, 130), cex.axis = 2.5,
labels = c(0, 65, 130),
padj = .4)
legend('topleft', legend = subplot, bty = 'n', cex = 2.5)
}
s_sites_depth <- function (df, mod, subplot) {
dere <- df[df$SITE_CODE == 'DERE', ]
dere2 <- df[df$core == 'DERE2', ]
ewin <- df[df$SITE_CODE == 'EWIN', ]
ewin3 <- df[df$core == 'EWIN3', ]
pred_dere2 <- get_pred_df(max(dere$depth), 'DERE2', mod)
pred_ewin3 <- get_pred_df(max(ewin$depth), 'EWIN3', mod)
plot(dere2$depth, dere2$mgOC_cm3, col = 'red', pch = 20,
bty = 'L', xaxs = 'i', yaxs = 'i', ylab = '', xlab = '',
ylim = c(0, 170), xlim = c(0, 130),
yaxt = 'n', xaxt = 'n')
points(ewin3$depth, ewin3$mgOC_cm3, col = 'blue', pch = 20)
lines(pred_dere2$depth, exp(pred_dere2$response), col = 'red', lwd = 2)
lines(pred_dere2$depth, exp(pred_dere2$lwr), col = 'red', lty = 4, lwd = 2)
lines(pred_dere2$depth, exp(pred_dere2$upr), col = 'red', lty = 4, lwd = 2)
polygon(x = c(pred_dere2$depth, rev(pred_dere2$depth)),
y = c(exp(pred_dere2$lwr), rev(exp(pred_dere2$upr))),
col = adjustcolor('red', alpha.f = 0.10),
border = NA)
lines(pred_ewin3$depth, exp(pred_ewin3$response), col = 'blue', lwd = 2)
lines(pred_ewin3$depth, exp(pred_ewin3$lwr), col = 'blue', lty = 4, lwd = 2)
lines(pred_ewin3$depth, exp(pred_ewin3$upr), col = 'blue', lty = 4, lwd = 2)
polygon(x = c(pred_ewin3$depth, rev(pred_ewin3$depth)),
y = c(exp(pred_ewin3$lwr), rev(exp(pred_ewin3$upr))),
col = adjustcolor('blue', alpha.f = 0.10),
border = NA)
axis(side = 1, at = c(0, 65, 130), cex.axis = 2.5,
labels = c(0, 65, 130),
padj = .4)
legend('topleft', legend = subplot, bty = 'n', cex = 2.5)
}
spline_mod_plot <- function (df, mod) {
par(mfrow = c(1,3), mar = c(4, 4, 1, 1), oma = c(5, 5, 2, 2))
s_depth(df, mod, '(a)')
s_cores_depth(df, mod, '(b)')
s_sites_depth(df, mod, '(c)')
mtext(text = 'Depth (cm)',
side = 1,
line = 0.6,
outer = TRUE,
cex = 2.2)
mtext(text = expression(paste('Organic carbon density (mg cm'^'-3', ')')),
side = 2,
line = 0.7,
outer = TRUE,
cex = 2.2)
}
variable_plot <- function (df, model, variable, subplot, ...) {
variables <- c('annprecip', 'ndvi', 'twi', 'mvbf', 'water_obs', 'catchment_area', 'natveg_prop')
remaining_vars <- variables[!variables %in% variable]
sim_df <- data.frame(annprecip = 1:100,
ndvi = 1:100,
twi = 1:100,
mvbf = 1:100,
water_obs = 1:100,
catchment_area = 1:100,
natveg_prop = 1:100)
for (i in remaining_vars) {
sim_df[,i] <- rep(mean(df[,i]), 100)
}
# log scale catchemtn area
sim_df[,variable] <- seq(min(df[,variable]), max(df[,variable]), length.out = 100)
pred <- predict.gam(model, sim_df, se.fit = TRUE)
if (variable != 'catchment_area') {
pred_mean <- exp(pred$fit)
pred_lwr <- exp(pred$fit - 2*pred$se.fit)
pred_upr <- exp(pred$fit + 2*pred$se.fit)
x <- sim_df[,variable]
}
# catchment area on log scale
if (variable == 'catchment_area') {
pred_mean <- pred$fit
pred_lwr <- pred$fit - 2*pred$se.fit
pred_upr <- pred$fit + 2*pred$se.fit
x <- log(sim_df[,variable])
}
if (max(x) < 10) {
xhigh <- mceiling(max(x), .1)
digits <- "%.1f"
xlow <- 0
}
if (max(x) < 1) {
xhigh <- mceiling(max(x), .01)
digits <- "%.2f"
xlow <- 0
}
if (max(x) > 2) {
xhigh <- mceiling(max(x), 1)
digits <- "%.0f"
xlow <- mceiling(min(x), 1)
}
yhigh <- mceiling(max(pred_upr) + .1*max(pred_upr), 1)
plot(x, pred_mean, type = 'l',
bty = 'L', xaxs = 'i', yaxs = 'i', ylab = '', lwd = 2,
ylim = c(0, yhigh), xlim = c(min(x), xhigh),
yaxt = 'n', xaxt = 'n', cex.lab = 2.5, ...)
lines(x, pred_lwr, lty = 4, lwd = 2)
lines(x, pred_upr, lty = 4, lwd = 2)
polygon(x = c(x, rev(x)),
y = c(pred_lwr, rev(pred_upr)),
col = adjustcolor('black', alpha.f = 0.10),
border = NA)
axis(side = 1, at = c(xlow, xhigh), cex.axis = 2.5,
labels = sprintf(digits, c(xlow, xhigh)),
padj = .4)
axis(side = 2, at = c(0, yhigh), las = 2, cex.axis = 2.5,
labels = sprintf("%.0f", c(0, yhigh)))
# text(0.5, 0.5, txt, cex = cex.cor * abs(r))
# x1, x2 just repeat each data point
# y1, y2 are the vertical start/stop locations
y2 <- max(pred_upr)*.035
if (variable != 'catchment_area') {
for (i in df[,variable]) {
lines(c(i, i), c(0, y2))
}
}
if (variable == 'catchment_area') {
for (i in log(df[,variable])) {
lines(c(i, i), c(0, y2))
}
}
legend('topleft', legend = subplot, bty = 'n', cex = 2.5)
}
stock_mod_plot <- function (df, model) {
par(mfrow = c(3, 3), mar = c(7, 4, 1, 1), oma = c(2, 7, 2, 2))
variable_plot(df, model, 'annprecip', '(a)', xlab = 'Annual precipitation (mm)')
variable_plot(df, model, 'ndvi', '(b)', xlab = 'NDVI')
variable_plot(df, model, 'twi', '(c)', xlab = 'TWI')
variable_plot(df, model, 'mvbf', '(d)', xlab = 'MVBF')
variable_plot(df, model, 'water_obs', '(e)', xlab = 'Water Obs. from Space (%)')
variable_plot(df, model, 'natveg_prop', '(f)', xlab = 'Prop. of native vegetation')
variable_plot(df, model, 'catchment_area', '(g)', xlab = expression(paste('log catchment area (m'^'2', ')')))
mtext(text = expression(paste('Soil carbon stock (Mg h'^'-1', ')')),
side = 2,
line = 2,
outer = TRUE,
cex = 3,
adj = 0.55)
}
# create functions to specify how to round
mfloor <- function (x, base) {
base*floor(x/base)
}
mceiling <- function (x, base) {
base*ceiling(x/base)
}
mround <- function (x, base) {
base*round(x/base)
}
stock_mod_table <- function (mod) {
sum.gam <- stock_mod_nore
ptab <- as.data.frame(sum.gam$p.table)
stab <- as.data.frame(sum.gam$s.table)
colnames(ptab)[4] <- "p-value"
colnames(ptab)[3] <- "t-value"
ptab.cnames = colnames(ptab)
stab.cnames = colnames(stab)
stab.cnames[3] = "F-value"
colnames(ptab) = c("A", "B", "C", "D")
if (ncol(stab) != 0) {
colnames(stab) = colnames(ptab)
}
tab = rbind(ptab, stab)
colnames(tab) = ptab.cnames
tab = round(tab, 4)
m = data.frame(matrix(0, nrow(tab), ncol(tab)))
for (i in 1:nrow(tab)) {
for (j in 1:4) {
if ((j == 4) & (tab[i, j] < 1e-04)) {
m[i, j] = "< 0.0001"
}
else {
m[i, j] = sprintf("%3.4f", tab[i, j])
}
}
}
colnames(m) = colnames(tab)
rownames(m) = rownames(tab)
tab = m
tab2 = rbind(c(ptab.cnames), tab[1:nrow(ptab), ])
if (nrow(stab) > 0) {
tab2 = rbind(tab2, c(stab.cnames), tab[(nrow(ptab) +
1):nrow(tab), ])
}
if (nrow(stab)) {
rownames(tab2)[(nrow(ptab) + 2)] = "B. smooth terms"
}
rownames(tab2)[1] = "A. parametric coefficients"
for (i in 1:nrow(tab2)) {
if (tab2[i, 4] == "0")
tab2[i, 4] = "< 0.0001"
if (length(grep("\\.", tab2[i, 2])) == 0)
tab2[i, 2] = paste(tab2[i, 2], ".0000", sep = "")
}
# create xtable
pca_loadings <- xtable::xtable(loadings)
print(pca_loadings,
include.rownames = FALSE,
include.colnames = FALSE,
only.contents = TRUE,
comment = FALSE,
hline.after = NULL,
file = output_file)
}
gamtabs <- function (model, output_file, caption = " ", label = "tab.gam", pnames = NA,
snames = NA, ptab = NA, stab = NA, ...)
{
if (!requireNamespace("xtable", quietly = TRUE)) {
stop("Package 'xtable' needed for this function to work. Please install it.",
call. = FALSE)
}
sum.gam <- model
if (!inherits(model, "summary.gam")) {
sum.gam <- summary(model)
}
if (is.na(ptab[1])) {
ptab = as.data.frame(sum.gam$p.table)
}
if (is.na(stab[1])) {
stab = as.data.frame(sum.gam$s.table)
}
if (!is.na(pnames[1])) {
rownames(ptab) = pnames
}
if (!is.na(snames[1])) {
rownames(stab) = snames
}
colnames(ptab)[4] = "p-value"
colnames(ptab)[3] = "t-value"
ptab.cnames = colnames(ptab)
stab.cnames = colnames(stab)
stab.cnames[3] = "F-value"
colnames(ptab) = c("A", "B", "C", "D")
if (ncol(stab) != 0) {
colnames(stab) = colnames(ptab)
}
tab = rbind(ptab, stab)
colnames(tab) = ptab.cnames
tab = round(tab, 4)
m = data.frame(matrix(0, nrow(tab), ncol(tab)))
for (i in 1:nrow(tab)) {
for (j in 1:4) {
if ((j == 4) & (tab[i, j] < 1e-04)) {
m[i, j] = "< 0.0001"
}
else {
m[i, j] = sprintf("%3.4f", tab[i, j])
}
}
}
colnames(m) = colnames(tab)
rownames(m) = rownames(tab)
tab = m
tab2 = rbind(c(ptab.cnames), tab[1:nrow(ptab), ])
if (nrow(stab) > 0) {
tab2 = rbind(tab2, c(stab.cnames), tab[(nrow(ptab) +
1):nrow(tab), ])
}
if (nrow(stab)) {
rownames(tab2)[(nrow(ptab) + 2)] = "B. smooth terms"
}
rownames(tab2)[1] = "A. parametric coefficients"
for (i in 1:nrow(tab2)) {
if (tab2[i, 4] == "0")
tab2[i, 4] = "< 0.0001"
if (length(grep("\\.", tab2[i, 2])) == 0)
tab2[i, 2] = paste(tab2[i, 2], ".0000", sep = "")
}
table_tab2 <- xtable::xtable(tab2)
print(table_tab2,
include.rownames = TRUE,
include.colnames = FALSE,
only.contents = TRUE,
comment = FALSE,
hline.after = NULL,
file = output_file)
# print(xtable::xtable(tab2, caption = caption, label = label,
# align = "lrrrr"), include.colnames = FALSE, hline.after = c(0,
# (nrow(ptab) + 1), nrow(tab2)), ...)
}
|
5501153ebdc3ec0b9f32d056c76db344b2a183d2
|
8c43d49923d146e6a842b11846c66ff83d41ae70
|
/script/제13장_1 연습문제.R
|
2122dfc1296851c2e0ef2e57138cddcb0c2eb658
|
[] |
no_license
|
Sinhoon/rpro
|
ac5a9a6cc3ba8475581f3c59507e4c049876990d
|
1ffee9a42f4590d4ef4b59a0a6fabda9f97b046b
|
refs/heads/master
| 2020-11-30T13:07:26.763311
| 2020-01-13T08:24:02
| 2020-01-13T08:24:02
| 230,402,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,281
|
r
|
제13장_1 연습문제.R
|
#################################
## <제13장 연습문제>
#################################
# 01. mpg 데이터 셋을 대상으로 7:3 비율로 학습데이터와 검정데이터로 각각
# 샘플링한 후 각 단계별로 분류분석을 수행하시오.
# 조건) x,y 변수선택
# 독립변수(설명변수) : displ + cyl + year
# 종속변수(반응변수) : cty
library(rpart)
library(ggplot2)
data(mpg)
str(mpg)
# 단계1 : 학습데이터와 검정데이터 샘플링
idx <- sample(nrow(mpg),nrow(mpg)*0.7,replace = FALSE)
test <- mpg[-idx,]
train <- mpg[idx,]
# 단계2 : 학습데이터 이용 분류모델 생성
model <- rpart(cty ~ displ + cyl + year , data =mpg)
# 단계3 : 검정데이터 이용 예측치 생성 및 평가
table (predict(model,test),test$cty)
cor(predict(model,test),test$cty)
# 단계4 : 분류분석 결과 시각화
rpart.plot(model)
# 단계5 : 분류분석 결과 해설
# 02. weather 데이터를 이용하여 다음과 같은 단계별로 의사결정 트리 방식으로 분류분석을 수행하시오.
# 조건1) rpart() 함수 이용 분류모델 생성
# 조건2) y변수 : RainTomorrow, x변수 : Date와 RainToday 변수 제외한 나머지 변수로 분류모델 생성
# 조건3) 모델의 시각화를 통해서 y에 가장 영향을 미치는 x변수 확인
# 조건4) 비가 올 확률이 50% 이상이면 ‘Yes Rain’, 50% 미만이면 ‘No Rain’으로 범주화
# 단계1 : 데이터 가져오기
library(rpart) # model 생성
library(rpart.plot) # 분류트리 시각화
setwd("c:/Rwork/data")
weather = read.csv("weather.csv", header=TRUE)
# 단계2 : 데이터 샘플링
weather.df <- weather[, c(-1,-14)]
idx <- sample(1:nrow(weather.df), nrow(weather.df)*0.7)
weather_train <- weather.df[idx, ]
weather_test <- weather.df[-idx, ]
head(weather.df)
# 단계3 : 분류모델 생성
model <- rpart(RainTomorrow~ . , data=weather_train)
# 단계4 : 분류모델 시각화 - 중요변수 확인
rpart.plot(model)
# 단계5 : 예측 확률 범주화('Yes Rain', 'No Rain')
pred <- predict(model,weather_test)
pred <- ifelse(pred[,1] >= 0.5 , "no rain" , "yes rain")
# 단계6 : 혼돈 matrix 생성 및 분류 정확도 구하기
table(pred,weather_test$RainTomorrow)
|
e6ab6e31c74f24a35fb6a9202d0243efa4a4f47e
|
7d5c32d6e586cf4abba4aaf2174bdf6fef85e606
|
/R/plot.R
|
e1d9a47c4d20baf72ca0ac11850704dc8a19db9e
|
[] |
no_license
|
thibautjombart/apex
|
68330ec4a58a4a04c6a05c2b528307e88340a955
|
588cb4f0561fb4aa47d11f8ce5179d5c1427edfb
|
refs/heads/master
| 2020-12-15T04:19:41.707765
| 2020-12-11T17:12:32
| 2020-12-11T17:12:32
| 32,546,406
| 5
| 8
| null | 2017-09-19T13:27:28
| 2015-03-19T21:11:43
|
R
|
UTF-8
|
R
| false
| false
| 1,428
|
r
|
plot.R
|
######################
#### PLOT METHOD ####
######################
#' Display multidna objects
#'
#' Default printing for multidna objects
#'
#' @export
#'
#' @author Thibaut Jombart \email{t.jombart@@imperial.ac.uk}
#'
#' @param x a multidna object
#' @param y an integer vector indicating the genes to plot
#' @param rows a logical indicating if different genes should be displayed in separate rows
#' @param ask a logical indicating if the user should be prompted between graphs
#' @param ... arguments passed to \code{\link{image.DNAbin}}
#' @author Thibaut Jombart \email{t.jombart@@imperial.ac.uk}
#'
#' @import ape
#'
#' @aliases plot,multidna-method
#' @aliases plot.multidna
#'
#' @importFrom graphics image mtext par
#'
#' @docType methods
#'
#' @examples
#' ## simple conversion with nicely ordered output
#' data(woodmouse)
#' genes <- list(gene1=woodmouse[,1:500], gene2=woodmouse[,501:965])
#' x <- new("multidna", genes)
#' x
#' plot(x)
#'
setMethod ("plot", "multidna", function(x, y, rows=TRUE, ask=FALSE, ...){
## HANDLE ARGUMENTS ##
n.genes <- length(x@dna)
if(missing(y)) y <- 1:n.genes
y <- as.integer(y)
y <- y[y>0 | y<=n.genes]
## MAKE PLOT ##
opar <- par(no.readonly=TRUE)
on.exit(par(opar))
par(ask=ask)
if(rows) par(mfrow=c(n.genes,1))
for(i in y){
image(x@dna[[i]], ...)
mtext(side=1, text=names(x@dna)[i], line=3, cex=2)
}
})
|
7cbed88428c539259dc9d5bbf9021710065f09dc
|
106009081664e68cab404c1dfdb68d1e0856ee0d
|
/man/cnc_pessoas_infos.Rd
|
22b7b88e15c065cec4f261f26ff50777edbdf586
|
[] |
no_license
|
aassumpcao/cnc
|
c340880504652394c7b30372b237cc4b5ea307ef
|
616ad5d90ee7605549e5039cadd6a5d936f6782a
|
refs/heads/master
| 2020-04-02T10:42:10.276959
| 2018-10-25T20:03:52
| 2018-10-25T20:03:52
| 154,350,719
| 1
| 0
| null | 2018-10-23T15:17:01
| 2018-10-23T15:17:00
| null |
UTF-8
|
R
| false
| true
| 718
|
rd
|
cnc_pessoas_infos.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnc.R
\name{cnc_pessoas_infos}
\alias{cnc_pessoas_infos}
\title{Baixa infos de pessoas}
\usage{
cnc_pessoas_infos(d_pessoas, path = "data-raw/pessoas_infos")
}
\arguments{
\item{d_pessoas}{retornados pela função \code{\link{parse_pessoas}}.}
\item{path}{caminho da pasta onde os arquivos HTML serão salvos. Se a pasta não existir, será criada.}
}
\value{
\code{data.frame} indicando "OK" se baixou corretamente e "arquivo existe" se o arquivo já existe.
}
\description{
Baixa HTMLs de pesquisas do tipo
\url{http://www.cnj.jus.br/improbidade_adm/visualizar_condenacao.php?seq_condenacao=1&rs=getDadosParte&rst=&rsrnd=0&rsargs[]=1}
}
|
f04a45cafaa62b7d9220d41988a40afa739205ec
|
c902d3bb683c8d473272192732eed2f05ae2d2cc
|
/man/multinewton.Rd
|
44cf8a71878136f8983d3c94d40df4d4bda81c21
|
[] |
no_license
|
dkahle/kumerical
|
43210f99d5e4e2d733d2c9533bde28986912cf84
|
bb85c1eb777845ecddd1baf780a371528b4afa73
|
refs/heads/master
| 2020-03-21T03:55:20.270695
| 2018-06-22T01:17:36
| 2018-06-22T01:17:36
| 138,081,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,856
|
rd
|
multinewton.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multinewton.R
\name{multinewton}
\alias{multinewton}
\alias{simple_multinewton}
\title{Multivariate Newton method}
\usage{
multinewton(f, df, x0, tol = 10 * .Machine$double.eps, maxit = 100L)
simple_multinewton(f, df, x0, tol = 10 * .Machine$double.eps, maxit = 100L)
}
\arguments{
\item{f}{function}
\item{df}{function; Jacobian matrix of f}
\item{x0}{initial value}
\item{tol}{tolerance, defaults to 10*.Machine$double.eps}
\item{maxit}{maximum number of iterations}
}
\value{
a list
}
\description{
\code{multinewton()} assumes that f is a vector-valued function of vector
argument, although both can be one dimensional. It is therefore a
generalization of \code{uninewton()}, but has slightly different output.
}
\examples{
library("kumerical")
f <- function(x) x^2 - 2
df <- function(x) 2*x
x0 <- 2
uninewton(f, df, x0)
simple_multinewton(f, df, x0)
multinewton(f, df, x0)
str(multinewton(f, df, x0))
# this is easier with mpoly:
library("mpoly")
(p <- mp("x^2 - 2"))
f <- as.function(p)
df <- as.function(gradient(p))
x0 <- 2
simple_multinewton(f, df, x0)
multinewton(f, df, x0)
jacobian <- function(ps, varorder = vars(ps)) {
list_of_mpolyLists <- lapply(ps, deriv, var = varorder)
list_of_gradient_functions <- lapply(
list_of_mpolyLists, as.function,
varorder = varorder, silent = TRUE
)
J <- function(.) lapply(list_of_gradient_functions, function(f) f(.))
function(v) do.call(rbind, J(v))
}
# intersection of the parabola y = x^2 and circle x^2 + y^2 = 1
# algebraically, this is
# y + y^2 = 1 => y^2 + y - 1 = 0 =>
plus_y <- (-1 + sqrt(1 - 4*(1)*(-1))) / (2*1) # = 0.618034 and
minus_y <- (-1 - sqrt(1 - 4*(1)*(-1))) / (2*1) # = -1.618034
# so that
# x = sqrt( plus_y) = =-0.7861514 and
# x = sqrt(minus_y) = +-1.27202i
# for solutions (+-0.7861514, 0.618034) and (+-1.27202i, -1.618034)
theoretical_solns <- list(
c( sqrt(plus_y), plus_y), c( sqrt(-minus_y)*1i, minus_y),
c(-sqrt(plus_y), plus_y), c(-sqrt(-minus_y)*1i, minus_y)
)
ps <- mp(c("y - x^2", "x^2 + y^2 - 1"))
f <- as.function(ps, varorder = c("x", "y"))
lapply(theoretical_solns, f)
df <- jacobian(ps, varorder = c("x", "y"))
x0 <- c(2, 2)
f(x0)
df(x0)
simple_multinewton(f, df, x0)
out <- multinewton(f, df, x0)
str(out, 1)
str(out$evals, 1)
# intersection of a plane, hyperboloid, and cone
# true solutions =
# c(-3/sqrt(2), 0, 3/sqrt(2))
# c( 3/sqrt(2), 0, -3/sqrt(2))
# corresponding to the nonlinear system
# x + y + z = 0
# x^2 - y^2 + z^2 = 9,
# x^2 + y^2 - z^2 = 0
ps <- mp(c("x + y + z", "x^2 - y^2 + z^2 - 9", "x^2 + y^2 - z^2"))
f <- as.function(ps, varorder = c("x", "y", "z"))
df <- jacobian(ps, varorder = c("x", "y", "z"))
x0 <- c(2, 2, 2)
f(x0)
df(x0)
out <- multinewton(f, df, x0)
str(out, 1)
c( 3/sqrt(2), 0, -3/sqrt(2))
out$root
}
|
e6adf633162e3197b67f9e70237f87304ee10665
|
0458dbf12d50a2d1ae7ba3c4634a7147e7e538c6
|
/pathwayanalysis.R
|
f3128fd6432936ba57959f0546911a8284455200
|
[] |
no_license
|
YulongNiu/smuSeqSongYing
|
d03ea0a7a90cc4936b25a21c96906f139ee3b6c3
|
e7c717ef0e69114e32d44c0c8367ce568e7d0bc9
|
refs/heads/master
| 2021-03-22T05:25:28.566453
| 2018-11-20T08:17:20
| 2018-11-20T08:17:20
| 84,801,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,159
|
r
|
pathwayanalysis.R
|
#####################KEGG####################################
setwd('/extDisk1/RESEARCH/smuSeqSongYing/kallisto_results')
library('KEGGAPI')
smuPathRaw <- getKEGGPathGenes('smu')
smuPathRaw <- sapply(smuPathRaw, function(x) {
eachID <- sapply(strsplit(x, split = ':', fixed = TRUE), '[[', 2)
return(eachID)
})
smuIDs <- res[, 1]
smuKEGG <- lapply(smuPathRaw, function(x) {
return(x[x %in% smuIDs])
})
save(smuKEGG, file = 'smuKEGG.RData')
#############################################################
##########################GO#################################
setwd('/extDisk1/RESEARCH/smuSeqSongYing/kallisto_results')
load('kallisto_results/smuGO.RData')
res <- read.csv('degseq4h_whole.csv', stringsAsFactor = FALSE)
smuIDs <- res[, 1]
smuGO <- lapply(smuGO, function(x) {
return(x[x %in% smuIDs])
})
save(smuGO, file = 'kallisto_results/smuGO.RData')
#############################################################
##########################GO analysis###########################
setwd('/extDisk1/RESEARCH/smuSeqSongYing/kallisto_results')
library('goseq')
library('GO.db')
library('foreach')
library('doMC')
library('KEGGAPI')
library('magrittr')
registerDoMC(8)
load('smuGO.RData')
load('smuKEGG.RData')
res <- read.csv('degseq4h_whole.csv', stringsAsFactor = FALSE)
## remove 0 terms
smuGO %<>% `[`(sapply(smuGO, length) > 0)
smuKEGG %<>% `[`(sapply(smuKEGG, length) > 0)
## padj < 0.01 & |log2FC| > 1
degVecLogic <- res$padj < 0.01 & abs(res$log2FoldChange) > log2(2)
degVecLogic[is.na(degVecLogic)] <- FALSE
degVec <- as.integer(degVecLogic)
names(degVec) <- res$GeneID
pwf <- nullp(degVec, bias.data = res$Length)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~GO~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GOMat <- foreach(i = 1:length(smuGO), .combine = rbind) %dopar% {
eachMat <- cbind(smuGO[[i]], names(smuGO)[i])
return(eachMat)
}
GOMat <- as.data.frame(GOMat)
GOTestWithCat <- goseq(pwf, gene2cat = GOMat, use_genes_without_cat = FALSE)
GOTestWithCat <- GOTestWithCat[!is.na(GOTestWithCat$ontology), ]
## add ablog2FC
goSub <- smuGO[match(GOTestWithCat[, 1], names(smuGO))]
abLogFC <- sapply(goSub, function(x) {
eachFC <- res[match(x, res$GeneID), 'log2FoldChange']
return(mean(abs(eachFC), na.rm = TRUE))
})
GOTestWithCat$abLogFC <- abLogFC
## deal with NA and select BP MF and CC
termCat <- c('BP', 'MF', 'CC')
for (i in termCat) {
write.csv(GOTestWithCat[GOTestWithCat$ontology == i, ],
paste0('degseq4h_FC2_', i, '_withcat.csv'))
}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~KEGG~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## deal path
pathAnno <- getKEGGPathAnno('smu')
pathAnno[, 2] <- sapply(pathAnno[, 2], function(x){
eachLen <- nchar(x)
x <- substr(x, 1, eachLen - 29)
return(x)
})
KEGGMat <- foreach(i = 1:length(smuKEGG), .combine = rbind) %dopar% {
eachMat <- cbind(smuKEGG[[i]], names(smuKEGG)[i])
return(eachMat)
}
KEGGMat <- as.data.frame(KEGGMat)
KEGGTestWithCat <- goseq(pwf, gene2cat = KEGGMat, use_genes_without_cat = FALSE)
KEGGTestWithCat$term <- pathAnno[match(KEGGTestWithCat[, 'category'], pathAnno[, 1]), 2]
KEGGTestWithCat$term %<>%
strsplit(., split = ' - ', fixed = TRUE) %>%
sapply(., `[`, 1)
KEGGTestWithCat$ontology <- 'KEGG'
goSub <- smuKEGG[match(KEGGTestWithCat[, 1], names(smuKEGG))]
abLogFC <- sapply(goSub, function(x) {
eachFC <- res[match(x, res$GeneID), 'log2FoldChange']
return(mean(abs(eachFC), na.rm = TRUE))
})
KEGGTestWithCat$abLogFC <- abLogFC
write.csv(KEGGTestWithCat, file = 'degseq4h_FC2_KEGG_withcat.csv')
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
################################################################
########################geneset plot###########################
setwd('/extDisk1/RESEARCH/smuSeqSongYing/kallisto_results')
library('ggplot2')
library('RColorBrewer')
library('latex2exp')
library('magrittr')
fname <- dir(pattern = 'withcat')
fbasename <- fname %>%
strsplit(., split = '.', fixed = TRUE) %>%
sapply(., `[`, 1)
for (i in seq_along(fname)) {
##for (i in 1:2) {
pMat <- read.csv(fname[i], row.names = 1, stringsAsFactor = FALSE)
## pvalue < 0.05
plotpMat <- pMat[, c('term', 'numDEInCat', 'over_represented_pvalue', 'abLogFC')]
plotpMat[, 3] <- -log10(plotpMat[, 3])
colnames(plotpMat) <- c('Name', 'Size', 'logpvalue', 'ablogFC')
colorPal <- colorRampPalette(rev(c('red', 'yellow', 'cyan', 'blue')), bias=1)(10)
ggplot(plotpMat[plotpMat[, 'logpvalue'] >= -log10(0.05), ], aes(x = ablogFC, y = Name)) +
geom_point(aes(size = Size, colour = logpvalue)) +
scale_size_continuous(name = 'Number of significant genes', range = c(3,8)) +
scale_colour_gradientn(name = '-log10(P-value)', limits=c(0, max(plotpMat[, 3])), colours = colorPal) +
ylab('') +
xlab(TeX('Average |$\\log_{2}$FC|')) +
theme(legend.position = 'bottom',
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(size = 13, face = 'bold'))
ggsave(paste0(fbasename[i], '.pdf'), width = 13)
}
###############################################################
|
b4486e00a0709b5b26c3719132b81c05997b0a8d
|
40891dd2f3d0c050514050a4766d86a36a5aa0f9
|
/frankfunk.R
|
d083200bee23bd2c4149b557abcd318de44e46cf
|
[] |
no_license
|
frankpopham/frankfunk
|
ac30760430f3d0bab16414e2dfdd057162564be7
|
ee3efdb8d92beff466f62a9985d936e3f49818bf
|
refs/heads/master
| 2023-05-25T09:04:49.217325
| 2021-06-10T08:33:48
| 2021-06-10T08:33:48
| 375,033,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,394
|
r
|
frankfunk.R
|
### statr_tabulate - replicates one way tabulate as in Stata requires tidyverse
statr_tabulate <- function(data, var, miss=TRUE) {
data %>%
when(miss=="TRUE" ~ .,
miss=="FALSE" ~ filter(., !is.na({{var}}))) %>%
group_by({{var}}, .add=TRUE) %>%
summarise(Freq.=n(), .groups="drop_last") %>%
mutate(Total=sum(Freq.)) %>%
mutate(Percent=(Freq./Total)*100) %>%
mutate(Cum.=cumsum(Percent))
}
#devtools::source_url("https://raw.githubusercontent.com/frankpopham/frankfunk/master/frankfunk.R")
#' Pare down (summarise) a data frame
#' @description Summarises a data frame (pares down). A count is given for each category of a
#' categorical variables (character, factor, logical) while for numeric variables the
#' mean is returned. Also for categorical variables percentage is given. Counts are given for
#' missing category in categorical variables but missing are not included in calculation of
#' percentage. Weights are allowed.
#' @param .data A data frame
#' @param .wt A weight variable in the data frame
#' @param ... grouping variable in the data frame
#'
#' @return A tibble with four columns 1) variable name, 2) Variable value (category) or
#' (mean) for numeric variables, 3) Number of cases (n) - for numeric variables this is number
#' of non-missing cases. 4) Percentage of non missing cases for each category or the mean.
#' @export
#' @importFrom magrittr %>%
#' @importFrom rlang .data
#' @examples
#' pare(iris)
#' iris_t <- iris
#' levels(iris_t$Species) <- c(NA, "versicolor", "virginica")
#' pare(iris_t)
#.wt?????remove
pare <- function(.data , .wt=1) {
s1 <- dplyr::select(.data, !where(is.numeric))
if(ncol(s1)==0) {df1 <- NULL}
else {
s1 <- names(s1)
s2 <- .data %>%
dplyr::mutate(.wt={{.wt}}) %>%
dplyr::select(c(tidyselect::all_of(s1), .wt))
df1 <- purrr::map_dfr(s1, ~dplyr::count(s2, .data[[.x]], wt=.wt) %>%
tidyr::pivot_longer(-.data$n, names_to="variable") %>%
dplyr::mutate(dplyr::across(c(.data$variable, .data$value),
as.character))) %>%
dplyr::group_by(.data$variable) %>%
dplyr::mutate(nsum=ifelse(is.na(.data$value), NA, .data$n)) %>%
dplyr::mutate(pc=ifelse(is.na(.data$value), NA,
(.data$n/ sum(.data$nsum, na.rm=TRUE))*100)) %>%
dplyr::ungroup()
}
df2 <-.data %>%
dplyr::mutate(.wt={{.wt}}) %>%
dplyr::select(c(where(is.numeric), .wt))
df21 <- df2 %>%
dplyr::summarise(dplyr::across(-.wt, ~sum(as.numeric(!is.na(.x))*.wt),
.names = "n_{.col}"))
df22 <- df2 %>%
dplyr::summarise(dplyr::across(-.wt, ~weighted.mean(.x, .wt, na.rm=TRUE),
.names = "pc_{.col}"))
df2 <- dplyr::bind_cols(df21, df22) %>%
tidyr::pivot_longer(dplyr::everything(),
names_to=c(".value", "variable"),
names_sep="_") %>%
dplyr::mutate(value = "(mean)") %>%
dplyr::mutate(dplyr::across(c(.data$variable, .data$value), as.character))
dplyr::bind_rows(df1, df2) %>%
dplyr::select(.data$variable, .data$value, .data$n, "% or mean"=.data$pc)
}
|
dbe0bbdee087a00890f9b9fee9441dcc481067e1
|
d560e276190e401e341f522f5fbc276cd1a98dc0
|
/man/metric.select.UI.Rd
|
38289c2fe66cd3ee9fb4a7fabef90a65ece3bf4e
|
[
"MIT"
] |
permissive
|
p-schaefer/BenthicAnalysistesting
|
288a9093cb50abc512cea3b74278e8d0a8eae4e3
|
85a38ec40637ab1d6a0a77f0fd88825b973e6586
|
refs/heads/master
| 2023-05-26T00:31:29.222171
| 2023-05-01T13:59:42
| 2023-05-01T13:59:42
| 85,619,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,554
|
rd
|
metric.select.UI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.select.UI.R
\name{metric.select.UI}
\alias{metric.select.UI}
\title{Indicator metric selection}
\usage{
metric.select.UI(Test, Reference, outlier.rem = T, rank = F,
outbound = 0.1)
}
\arguments{
\item{Test}{Vector containing metric scores at the test site. Should be a single row from \code{benth.met} or \code{add.met}.}
\item{Reference}{Data frame of metric scores at the reference sites. Should be output from \code{benth.met} or \code{add.met}.}
\item{outbound}{Used if outlier.rem=T A numeric value between 0 and 1 indicating the outlier boundary for defining values as final outliers (default to 0.1)}
\item{Rank}{Use rank differences in metric selection}
}
\value{
$Best.Metrics - Vector containing the final selected indicator metrics
$Indicative.Metrics - Vector containing all metrics that indicate impairment
$raw.data - Data frame containing only selected best metrics
$ref.sites - Vector containing input reference site names
$outlier.ref.sites - Vector containing sites removed as potential outliers
}
\description{
Determines which indicator metrics which best differentiate the test site from its nearest-neighbbour reference sites. Metrics that indicate impairment will be
used preferentially.
}
\details{
A interative selection algorithm is used as follows:
1. The first metric selected for the final set is the one which displayes the greatest distance from the Reference condition mean
2. Metrics with a pearson correlation greater than 0.7 to (any of) the selected metric(s) are excluded from further steps
3. The ranked departure of remaining metrics is divided by the (maximum) correlation with the metric(s) previously included in the analysis
4. The metric with the greatest score is selected for inclusion in the final set
5. Return to step 2 until the number of selected metrics is equal to the greater of 4 or 1/5 the number of Reference sites
If no metrics or too few metrics demonstrate impairment, the following metrics are included until the maximum is reached:
Richness, Percent Dominance, HBI, Percent EPT.
}
\examples{
data(YKBioData,envir = environment())
bio.data<-benth.met(YKBioData,2,2)$Summary.Metrics
nn.refsites<- c("075-T-1", "019-T-1","003-T-1","076-T-1","071-T-1","022-T-1","074-T-1",
"002-T-1","004-T-1","073-T-1","186-T-1","062-T-1","005-T-1","025-T-1",
"187-T-1","023-T-1","193-T-1","192-T-1","196-T-1","194-T-1")
metric.select(bio.data[201,],bio.data[nn.refsites,])
}
\keyword{Benthic}
\keyword{Metrics}
|
54a10d002a0613433f94f9f91195fcc86ff86e0a
|
e7adc9d2497308ad762fb9e78c192e68d16a1a88
|
/man/SocCMap.Rd
|
bd824119058127e005a962717e27355252c90bdd
|
[] |
no_license
|
cran/cogmapr
|
dbf58edf7afb43f0abf2d8a3e18c93ab0f587e9a
|
abae44a6c2095302ca7204ec8f9e3d1d007db4bf
|
refs/heads/master
| 2022-01-09T10:21:02.221757
| 2022-01-04T14:40:07
| 2022-01-04T14:40:07
| 180,386,150
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,820
|
rd
|
SocCMap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cogmapr.R
\name{SocCMap}
\alias{SocCMap}
\title{Social Cognitive Mapping}
\usage{
SocCMap(
data.edges.soc,
project,
label = "num",
minlen = 1,
fontsize = 16,
shape = "box"
)
}
\arguments{
\item{data.edges.soc}{A data.frame as produced by the EdgSocCMap function}
\item{project}{A QDA project, a list as generated by the ProjectCMap function.}
\item{label}{A character string that defines the text that will be print in the variables (vertex) of the cognitive maps. It can be "num", "name" or "numname" (which is of the form "NUM - Name"). The default is "num"}
\item{minlen}{A graphical parameter that defines a relative lenght between the variable of the cognitive maps. See help from RGraphViz package.}
\item{fontsize}{The fontsize of vertices (concepts), in r-base plot}
\item{shape}{The shape of the verices (concepts), in r-base plot}
}
\value{
a 'SocCMap' object, a list containing various information that could be use for plotting an Individual Cognitive Map. The most important elements are :
\describe{
\item{"vertex"}{A list of information on Cognitive Map's variables (i.e. vertices)}
\item{"edg"}{A list of information about relationships}
##' \item{"graph"}{A graphNEL object}
\item{"eAttrs"}{A list of graphical attributes of edges}
\item{"nAttrs"}{A list of graphical attributes of nodes (vertices)}
\item{"gAttrs"}{A list of graphical attributes of the whole graph}
}
}
\description{
Formatting the data for plotting an Social Cognitive Map
}
\examples{
project_name <- "a_new_project"
main_path <- paste0(system.file("testdata", package = "cogmapr"), '/')
my.project <- ProjectCMap(main_path, project_name)
edg.scm <- EdgSocCMap(my.project, min.weight=6, weighted.icm=TRUE)
SocCMap(edg.scm, my.project)
}
|
7db892c2acdb03de7035624682e1874128502ed1
|
ae2e3d4df5fb6d2c431f47c186dc5cbf51beda47
|
/scripts/2_recovery_year_model.R
|
ef42135514d28eea9f7f9084d547f80ab8f82463
|
[] |
no_license
|
jpwrobinson/coral-bleach-recovery
|
865140fa9cdf7a46ce5c4a68d09852b5f452fa85
|
7dcb0deed5f9bd8ea3f77faa6a14c1971e21c1c7
|
refs/heads/master
| 2020-03-26T16:37:59.353771
| 2019-06-18T09:25:55
| 2019-06-18T09:25:55
| 145,114,409
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
2_recovery_year_model.R
|
#!/bin/env Rscript
library(rethinking)
library(here)
setwd(here())
source('scripts/scaling_function.R')
## ------- ------- ------- ------- ------- ------- ------- ##
### Fit model to estimate recovery year ###
## ------- ------- ------- ------- ------- ------- ------- ##
## load predictors (can be provided by request to the authors)
load(file='data/recovery_predictors_clean.Rdata') ## data frame is 'rates'
## change recovery year to real time
rates$recoveryyear<-rates$recoveryyear+6
## scale exp. covariates to mean = 0
scaled<-scaler(rates, ID = c('location', 'recoveryyear'))
### rec.year ~ predictors; linear model
m <- map2stan(
alist(
## response distribution
recoveryyear ~ dnorm( mu , sigma) ,
## model structure
mu <- ratea + rateb*Herb_biomass +
rated*Depth +
ratec*Coral_Juv_dens +
ratee*Init_complex +
ratef*Init_totalcoral +
rateg*Wave_exposure_joules +
rateh*N_percent +
ratej*Manage,
## fixed priors for exp. covariates
c(rateb, ratec, rated, ratee, ratef, rateg, rateh, ratej) ~ dnorm(0, 2),
## priors from mean year value
c(ratea) ~ dnorm(17, 5),
## error priors
c(sigma) ~ dcauchy(0 , 2 )
), data=scaled, warmup=1500, iter=7000, chains = 1)
rec.year.m<-m
save(rec.year.m, file='data/recovery_year_model.Rdata')
|
b572d98899d4598addfdc4754901d8e78a330255
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlefirebaserulesv1.auto/man/TestRulesetRequest.Rd
|
ce28b0dd79ff9b1936aa65c1f7392e8f6b00c531
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 585
|
rd
|
TestRulesetRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/firebaserules_objects.R
\name{TestRulesetRequest}
\alias{TestRulesetRequest}
\title{TestRulesetRequest Object}
\usage{
TestRulesetRequest(source = NULL)
}
\arguments{
\item{source}{`Source` to be checked for correctness}
}
\value{
TestRulesetRequest object
}
\description{
TestRulesetRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The request for FirebaseRulesService.TestRuleset.
}
\seealso{
Other TestRulesetRequest functions: \code{\link{projects.test}}
}
|
d79168fc677ed8510a8fb2009856363be1163d9f
|
374e89a23262a647aa2506c494d704024e2e9089
|
/ModelFitting/poiszeroinfl.R
|
4a192758bebe8babc1aa01e7defec80b289f0300
|
[] |
no_license
|
carliedario/Miscellaneous-R-Code
|
e70cb2f38c6c8a9973feda97db0eb908624257d3
|
4c0fa04adfbd97b3ca62a49ca69a39ff31c6f61f
|
refs/heads/master
| 2023-01-19T11:32:00.198051
| 2020-11-25T22:08:35
| 2020-11-25T22:08:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,533
|
r
|
poiszeroinfl.R
|
#' ---
#' title: "Zero-inflated Poisson Model"
#' author: "Michael Clark"
#' date: ""
#' ---
#'
#' Log likelihood function to estimate parameters for a Zero-inflated Poisson model. With examples
#' and comparison to pscl package output. Also includes approach based on Hilbe GLM text.
#' see also: https://github.com/m-clark/Miscellaneous-R-Code/blob/master/ModelFitting/NBzeroinfl.R
ZIP = function(y, X, par) {
# arguments are response y, predictor matrix X, and parameter named starting points of 'logit' and 'pois'
# Extract parameters
logitpars = par[grep('logit', names(par))]
poispars = par[grep('pois', names(par))]
# Logit part; in this function Xlogit = Xpois but one could split X argument into Xlogi and Xpois for example
Xlogit = X
LPlogit = Xlogit %*% logitpars
logi0 = plogis(LPlogit) # alternative 1/(1+exp(-LPlogit))
# Poisson part
Xpois = X
mupois = exp(Xpois %*% poispars)
# LLs
logliklogit = log( logi0 + exp(log(1 - logi0) - mupois) )
loglikpois = log(1 - logi0) + dpois(y, lambda = mupois, log = TRUE)
# Hilbe formulation
# logliklogit = log(logi0 + (1 - logi0)*exp(- mupois) )
# loglikpois = log(1-logi0) -mupois + log(mupois)*y #not necessary: - log(gamma(y+1))
y0 = y == 0 # 0 values
yc = y > 0 # Count part
loglik = sum(logliklogit[y0]) + sum(loglikpois[yc])
-loglik
}
#' Get the data
library(haven)
library(pscl)
fish = read_dta("http://www.stata-press.com/data/r11/fish.dta")
#' Get starting values or simply do zeros
#' for this function, a named vector for the starting values
#' for zip: need 'logit', 'pois'
init.mod = glm(
count ~ persons + livebait,
data = fish,
x = TRUE,
y = TRUE,
"poisson"
)
# starts = c(logit = coef(init.mod), pois = coef(init.mod))
starts = c(rep(0, 3), rep(0, 3))
names(starts) = c(paste0('pois.', names(coef(init.mod))),
paste0('logit.', names(coef(init.mod))))
#' Estimate with optim function
optPois1 = optim(
par = starts ,
fn = ZIP,
X = init.mod$x,
y = init.mod$y,
method = "BFGS",
control = list(maxit = 5000, reltol = 1e-12),
hessian = TRUE
)
# optPois1
#' Comparison
# Extract for clean display
B = optPois1$par
se = sqrt(diag(solve((optPois1$hessian))))
Z = B/se
p = pnorm(abs(Z), lower = FALSE)*2
# pscl results
zipoismod = zeroinfl(count ~ persons + livebait, data = fish, dist = "poisson")
summary(zipoismod)
round(data.frame(B, se, Z, p), 4)
|
002694db3d6780da8fd70f00711ad65d813ccfce
|
eaed00793361c447c234c00825dfc52b562d9260
|
/ComplexityEntropy.R
|
a660a6f7b06497781aefec0a81e28f29d332e230
|
[] |
no_license
|
OminiaVincit/SmartTabAno
|
633258895aaae60b2c34100119e461c87a2834c5
|
0d605cbb268ee115cd22c1026eff1aa273cabfbf
|
refs/heads/master
| 2021-01-20T12:37:31.979369
| 2014-07-05T17:59:43
| 2014-07-05T17:59:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,747
|
r
|
ComplexityEntropy.R
|
# Project: SmartTab Anonymization
# Author: Tran Quoc Hoan
# Start date: 2013-Nov-14
# File: ComplexityEntropy.R
# Do: get complexity : ApEntropy and SaEntropy
# Last edited on: 2014-January-01
library(ggplot2)
library(tseriesChaos)
library(scatterplot3d)
#######################################################################################
# function to get time lag
# (1) the first time lag of zero-crossing auto-correlation or first minimum correlation
# if value in (1) > lagMax then return lagMax
#######################################################################################
getTimeLag <- function(x,lagMax) {
my.ts = ts(x)
if (min(x) == max(x)) return (0)
acfCo <- acf(my.ts,lag.max=lagMax, plot=F)$acf
result <- which.min(acfCo)-1
if (acfCo[result+1] < 0) {
negative <- which(acfCo < 0)
prev <- negative[1]-1
after <- negative[1]
if (abs(acfCo[prev]) < abs(acfCo[after]) ) lag2 <- prev
else lag2 <- after
result <- min(result, lag2)
}
return (result)
}
getTimeLagPart <- function(x) {
len <- length(x)
epLen <- len%/%5
x1 <- x[1:epLen]
x2 <- x[epLen:(2*epLen)]
x3 <- x[(2*epLen):(3*epLen)]
x4 <- x[(3*epLen):(4*epLen)]
x5 <- x[(4*epLen):len]
lag1 <- getTimeLag(x1,500)
lag2 <- getTimeLag(x2,500)
lag3 <- getTimeLag(x3,500)
lag4 <- getTimeLag(x4,500)
lag5 <- getTimeLag(x5,500)
return (c(lag1,lag2,lag3,lag4,lag5))
}
####################################################################
# distance function using in appEn and samEn
####################################################################
distance <- function(x, len, lag, index1, index2) {
u <- x[index1+(0:(len-1))*lag] - x[index2+(0:(len-1))*lag]
Max <- max(abs(u))
return (Max)
}
###########################################################################
# Calculate Sample Entropy
###########################################################################
sampleEn <- function(x, Num, len, lag, tolerance) {
if (lag == 0) return (0)
a <- NULL
b <- NULL
for (i in 1:(Num-len*lag)) {
b[i] <- 0
a[i] <- 0
for (j in 1:(Num-len*lag)) {
if ( (j != i) && (distance(x, len, lag, i, j) <= tolerance) ) b[i] <- b[i]+1
if ( (j != i) && (distance(x, len+1, lag, i, j) <= tolerance) ) a[i] <- a[i]+1
# Notice b[i] >= a[i]
}
b[i] <- b[i]/(Num-(len-1)*lag)
a[i] <- a[i]/(Num-(len-1)*lag)
}
A <- sum(a[1:(Num-len*lag)])/(Num-len*lag)
B <- sum(b[1:(Num-len*lag)])/(Num-len*lag)
if (A*B == 0) sampEnResult <- 1000
else sampEnResult <- -log(A/B)/log(2)
return (sampEnResult)
}
##################################################################################################
# Calculate Approximate Entropy (including self-matched)
##################################################################################################
appEn <- function(x, Num, len, lag, tolerance) {
if (lag == 0) return (0)
a <- NULL
b <- NULL
c <- NULL
for (i in 1:(Num-len*lag)) {
b[i] <- 0
a[i] <- 0
for (j in 1:(Num-len*lag)) {
if ( distance(x, len, lag, i, j) <= tolerance ) b[i] <- b[i]+1
if ( distance(x, len+1, lag, i, j) <= tolerance ) a[i] <- a[i]+1
}
c[i] <- -log(a[i]/b[i])/log(2)
}
appEnResult <- sum(c[1:(Num-len*lag)])/(Num-len*lag)
return (appEnResult)
}
##################################################################################################
# Calculate Approximate Entropy (including self-matched) & Sample Entropy (exclusing self-matched)
##################################################################################################
AppSamEn <- function(x, Num, len, lag, tolerance) {
if (lag == 0) return (c(0,0))
a <- NULL
b <- NULL
c <- NULL
for (i in 1:(Num-len*lag)) {
b[i] <- 0
a[i] <- 0
for (j in 1:(Num-len*lag)) {
if ( (j != i) && (distance(x, len, lag, i, j) <= tolerance) ) b[i] <- b[i]+1
if ( (j != i) && (distance(x, len+1, lag, i, j) <= tolerance) ) a[i] <- a[i]+1
}
c[i] <- -log((a[i]+1)/(b[i]+1))/log(2)
b[i] <- b[i]/(Num-(len-1)*lag)
a[i] <- a[i]/(Num-(len-1)*lag)
}
A <- sum(a[1:(Num-len*lag)])/(Num-len*lag)
B <- sum(b[1:(Num-len*lag)])/(Num-len*lag)
if (A*B==0) sampEnResult <- 1000 # NA value
else sampEnResult <- -log(A/B)/log(2)
appEnResult <- sum(c[1:(Num-len*lag)])/(Num-len*lag)
return (c(appEnResult,sampEnResult))
}
#################################################################################################
# Fast algorithm of ApEn & SaEn
#################################################################################################
repmat <- function(X,m,n) {
# R equivalent of repmat (matlab)
dim(X) <- c(length(X),1)
mx = nrow(X)
nx = ncol(X)
matrix(X,mx*m,nx*n)
}
ApEnSaFast <- function(data, N, dim, lag, tolerance) {
if (lag == 0) return (c(0,0))
apResult <- c(0,0)
saResult <- c(0,0)
for (j in 1:2) {
m <- dim+j-1
phi <- c(1:(N-(m-1)*lag))
teta <- c(1:(N-dim*lag))
dataMat <- mat.or.vec(m,(N-(m-1)*lag))
# setting up data matrix
for (i in 1:m) {
dataMat[i,] <- data[(i-1)*lag + (1:(N-(m-1)*lag))]
}
# counting similar patterns using distance calculation
for (i in 1:(N-(m-1)*lag)) {
tempMat <- abs(dataMat - repmat(dataMat[,i],1,N-(m-1)*lag))
boolMat <- colSums(tempMat > tolerance)
phi[i] <- sum(boolMat<1)/(N-(m-1)*lag)
if (i <= N-dim*lag) teta[i] <- sum(boolMat<1)-1
}
# summing over the counts
apResult[j] <- sum(log(phi))/(N-(m-1)*lag)
saResult[j] <- sum(teta)
}
apen <- (apResult[1]-apResult[2])/log(2)
saen <- log(saResult[1]/saResult[2])/log(2)
return (c(apen,saen))
}
###########################################################################
# Euclid distance function
###########################################################################
sqrtDist <- function(x, len, lag, index1, index2) {
u <- x[index1+(0:(len-1))*lag] - x[index2+(0:(len-1))*lag]
result <- sum(u*u)
return (result)
}
###########################################################################
# Find the index of nearest neighbor
###########################################################################
indexNN <- function(x, len, lag, index) {
n <- length(x)
MIN <- 10000
minIndex <- index
for (k in 1:(n-lag*len)) {
if (k != index) {
tmp <- sqrtDist(x, len, lag, index, k)
if (tmp < MIN) {
MIN <- tmp
minIndex <- k
}
}
}
return (minIndex)
}
###########################################################################
#
# Select parameter m (for calculating entropy)
# determines the length of the sequences
# Method: False Nearest Neighbor
#
###########################################################################
getFNNrate <- function(x, len, lag, tolerance) {
N <- length(x)
numFNN <- 0
std <- sd(x)
for (i in 1:(N-lag*len)) {
j <- indexNN(x, len, lag, i)
rd <- sqrtDist(x, len, lag, i, j)
rd1 <- sqrtDist(x, len+1, lag, i, j)
diff <- abs(x[i+len*lag]-x[j+len*lag])
if (diff > rd*tolerance) numFNN <- numFNN+1
}
result <- numFNN/(N-lag*len)
return (result)
}
laptop <- c(6,57,72,78,111,116,121,66,112,125)
refri <- c(21, 87, 103)
#knownDv <- c(3, 6, 7, 9, 21, 24, 25, 28, 47, 57, 72, 74, 87, 89, 92, 93, 95, 103, 109, 111, 115, 122, 123, 131)
set.seed(983423)
N <- 25000
ranData <- rnorm(N)
#raEn1 <- ApEnSaFast(ranData, N, 2, 1, 0.1)
raEn2 <- ApEnSaFast(ranData, N, 2, 1, 0.2)
print(raEn2)
#raEn3 <- ApEnSaFast(ranData, N, 2, 1, 0.3)
#raEn4 <- ApEnSaFast(ranData, N, 2, 1, 0.4)
#print(c(raEn1, raEn2, raEn3, raEn4))
traceLap <- c("dev_5AE2CA.csv","dev_B1B603.csv","dev_B7BEA1.csv","dev_B8148C.csv", "dev_D320C8.csv")
traceRef <- c("dev_76C07F2.csv","dev_98C08A.csv","dev_98C08A2.csv","dev_599393.csv","dev_B7BE29.csv","dev_B7E6F4.csv","dev_B83B9E.csv","dev_D325D9.csv","dev_D325D9B.csv","dev_D331DA.csv", "dev_D331DA2.csv", "dev_D32131A.csv", "dev_D32131A.csv", "dev_D32131B.csv", "dev_D32131C.csv", "dev_undef1.csv", "dev_undef2.csv")
traceDesk <- c("dev_11F01E.csv","dev_59AC8E.csv","dev_59AC89.csv","dev_59ADA7.csv","dev_7296D7.csv","dev_B7E6FA.csv","dev_D35C05A.csv","dev_D35C05B.csv","dev_D337C9.csv","dev_D337C9B.csv","dev_D337C9C.csv","dev_schalli.csv", "dev_schalli2.csv", "dev_denis.csv")
traceTV <- c("dev_330A3.csv", "dev_B80E51.csv", "dev_B80E51B.csv", "dev_B8121D.csv", "dev_B81116.csv", "dev_B83416.csv", "dev_C3E6D1.csv", "dev_D35F73.csv", "dev_D369E0.csv", "dev_D33097.csv")
laptopad <- c("dev_D320C8.csv", "dev_B8198B1.csv", "dev_B8198B2.csv", "testData.csv")
for (i in 1:target_length) {
target <- target_names[i]
# Find id name
splitID <- strsplit(target,"/",fixed=TRUE)
splitLength <- length(splitID[[1]])
idName <- splitID[[1]][splitLength-1]
fileName <- paste("../Data/IREF/November_splines/",idName, "-",fromDay,"-",toDay,".csv",sep="")
#fileName <- paste("../Data/Tracebase/Laptop/", i,sep="")
# open file and output it plot
?try
devData <- try(read.csv(fileName))
if (inherits(devData, 'try-error')) next
numberDataPoints <- length(devData[,2])
if (numberDataPoints >= N) {
# sData <- devData$Value[1:numberDataPoints]
k <- numberDataPoints%/%N
sData <- devData[,2][1:N]
std <- sd(sData)
# print("out")
# maxData <- max(sData)
# minData <- min(sData)
# meanData <- mean(sData)
timeLag <- getTimeLag(sData, N%/%10)
# print(c(i, std, timeLag, numberDataPoints))
for (m in 2:2) {
for (r in 2:2) {
result <- ApEnSaFast(sData, N, m, timeLag, r*std/10)
# if (r == 1) rate <- result/raEn1
# if (r == 2) rate <- result/raEn2
# if (r == 3) rate <- result/raEn3
# if (r == 4) rate <- result/raEn4
rate <- result/raEn2
print(c(i, idName, std, timeLag, r/10, result, rate))
}
}
# plot two dimensions data
# timeLag <- 20
# x <- sData[1:25000]
# y <- sData[timeLag+(1:25000)]
# z <- sData[2*timeLag+(1:25000)]
# #plot (1:25200, sData[1:25200], type="l")
# plot3d(x,y, z, type="l")
# # filter data
# minData <- min(sData)
# sData <- sData[sData > minData]
# numberNonzeroDataPoints <- length(sData)
# std <- sd(sData)
# print predicted time lag for all devices
# timeLag <- getTimeLag(sData,1000)
# print(c(i, idName, timeLag, std))
#mtout <- mutual(sData, 50, lag.max=2000, plot=TRUE)
# x <- window(rossler.ts, start=90)
# xyz <- embedd(sData, m=3, d=20)
# plot(xyz,type="l",main="")
# scatterplot3d(xyz,type="l")
# recurr(sData, m=3, d=2, start.time=1, end.time=500)
#stplot(sData, m=3, d=8, idt=1, mdt=250)
#fn.out <- false.nearest(sData,m=10,d=timeLag,t=180,rt=10)
#plot(fn.out)
# m <- 2
# for (k in 1:4) {
# saEn <- ApEnSaFast(sData, N, m, timeLag, k*std/10)
# print(c(i, idName, timeLag, std, k/10, saEn))
# }
# r <- std*0.2
# for (lag in 1:(N%/%10)) {
# saEn <- ApEnSaFast(sData, N, 2, lag, r)
# print(c(lag,saEn,timeLag,N))
# }
# minData <- min(sData)
# filter data
# sData <- sData[sData > minData]
# numberDataPointsNonzero <- length(sData)
# print(c(i,idName,numberDataPoints, numberDataPointsNonzero))
# if (numberDataPointsNonzero >= N) {
# sData <- sData[1:N]
# timeLag <- getTimeLag(sData,N%/%10)
# std <- sd(sData)
# ApSa1 <- AppSamEn(sData, N, 2, timeLag, std*0.01)
# ApSa2 <- AppSamEn(sData, N, 2, timeLag, std*0.1)
# ApSa3 <- AppSamEn(sData, N, 2, timeLag, std*0.2)
# ApSa4 <- AppSamEn(sData, N, 2, timeLag, std*0.3)
# ApSa5 <- AppSamEn(sData, N, 2, timeLag, std*0.4)
# print(c(i,idName, N, std, timeLag, ApSa1, ApSa2, ApSa3, ApSa4, ApSa5))
# #print(c(i,idName,timeLag, numberDataPoints, numberDataPointsNonzero))
# }
#minData <- min(sData)
#maxData <- max(sData)
#meanData <- mean(sData)
# print(c(i,idName,minData, numberDataPoints, numberDataPointsNonzero))
# sData2 <- devData$Value[2:N]-devData$Value[1:(N-1)]
# std2 <- sd(sData2)
# rmax2 <- 0
# rmax3 <- 0
# rmax4 <- 0
# rmax5 <- 0
# rmax6 <- 0
# rmax7 <- 0
#
# if (std2 > 0) {
# rmax2 <- (-0.036+0.26*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# rmax3 <- (-0.08+0.46*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# rmax4 <- (-0.12+0.62*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# rmax5 <- (-0.16+0.78*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# rmax6 <- (-0.19+0.91*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# rmax7 <- (-0.2+1.0*sqrt(std/std2))/((numberDataPoints/1000)^(1/4))
# }
# # print predicted time lag for all devices
# timeLag <- getTimeLag(sData,numberDataPoints%/%2-1)
# print(c(i,idName,numberDataPoints, timeLag))
# if (minData < maxData) {
# tsData <- ts(sData)
# acfFileName <- paste("../Data/IREF/November_acf2/",idName, "-",fromDay,"-",toDay,".png",sep="")
# dev.copy(png,acfFileName,width=1200,height=600)
# acf(tsData,lag.max=numberDataPoints/2)
# dev.off()
# timeLag <- getTimeLag(sData,100)
# saEn <- sampleEn(sData, numberDataPoints, 2, timeLag, r)
# print(c(timeLag,saEn))
# }
#test for time lag
# r <- std*0.2
# lag <- getTimeLag(sData,numberDataPoints%/%5)
# saEn <- AppSamEn(sData, numberDataPoints, 2, lag, r)
# print(c(i, idName, lag,saEn))
#
# for (lag in 1:499) {
# saEn <- AppSamEn(sData, numberDataPoints, 2, lag, r)
# print(c(lag,saEn))
# }
# print to csv file
# resultFile <- paste("../Data/IREF/November_result/",idName, "-",fromDay,"-",toDay,"-",N,".csv",sep="")
# timeLag <- getTimeLag(sData,N%/%10)
#
# sink(file=resultFile, type="output")
# print(c(i,idName))
# # test for tolerance
# for (m in 2:9) {
# for (k in 1:100) {
# r <- k*std/100
# ApSa <- ApEnSaFast(sData, N, m, timeLag, r)
# print(c(m, timeLag, k/100, ApSa))
# }
# }
# sink()
# # embedding dimension variability
# for (m in 2:50) {
# r <- 0.2*std
# ApSa <- AppSamEn(sData, numberDataPoints, m, timeLag, r)
# print(c(m, ApSa))
# }
# test for embedding dimension
# for (tol in 1:10) {
# for (dim in 1:50) {
# rate <- getFNNrate(sData, dim, timeLag, 5*tol)
# if (rate < 0.005) break
# print(c(5*tol,dim,timeLag,rate))
# }
# }
}
}
#dev.off()
|
e60430f453a52ecc1e22b6c45b08d067fd26493f
|
0a206350604c3af56d089664576600ae1e7762b3
|
/man/portfolio.diversification.Rd
|
e554977bcfb22fb2a79506bd1a70ed5bc8986475
|
[] |
no_license
|
cran/rportfolios
|
6728d5639684b8e95235791ebb10fcea75a5fc08
|
3b1a6b3b562e044d84c2d832e7a0e802d4fa3aa3
|
refs/heads/master
| 2020-04-28T23:24:24.367409
| 2016-08-19T13:37:41
| 2016-08-19T13:37:41
| 17,699,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,659
|
rd
|
portfolio.diversification.Rd
|
\name{portfolio.diversification}
\alias{portfolio.diversification}
\title{ Portfolio Diversification Measure }
\description{
This function computes one of several portfolio diversification measures
for a single portfolio or a collection of portfolios.
}
\usage{
portfolio.diversification(portfolios, method = c("naive", "herfindahl",
"herfindahl-hirschman", "hannah-kay", "shannon"), alpha = 2)
}
\arguments{
\item{portfolios}{ a vector or matrix of portfolio exposures }
\item{method}{ a character value for the method used to compute the measure }
\item{alpha}{ a numeric value for parameter required for the Hannah-Kay measure }
}
\details{
The function ocmputes a portfolio diversification measure for a single portfolio or
for a collection of portfolios organized as a matrix.
}
\value{
A vector with one or more values.
}
\references{
Worthington, A. C., 2009. Household Asset Portfolio
Diversification: Evidence from the Household, Income and Labour
Dynamics in Australia (Hilda) Survey, Working Paper, Available at
SSRN: \code{http:////ssrn.com//abstract=1421567}.
}
\author{ Frederick Novomestky \email{fn334@nyu.edu} }
\examples{
onePortfolio <- random.longonly( 100, 75 )
naive <- portfolio.diversification( onePortfolio, method = "naive" )
herfindahl <- portfolio.diversification( onePortfolio, method = "herfindahl" )
herfindahl.hirschman <- portfolio.diversification( onePortfolio, method = "herfindahl-hirschman" )
hannah.kay <- portfolio.diversification( onePortfolio, method = "hannah-kay" )
shannon <- portfolio.diversification( onePortfolio, method = "shannon" )
}
\keyword{ math }
|
18a34ad1d237efe0ee9783d56d90cad5b6775dbb
|
40a1d5d15a66717840fb0929bdb4e24dfbd42d75
|
/lib/replace.R
|
0ebd702b93da1a517cd02fb6fdff9acef0d69690
|
[] |
no_license
|
jiayi-cui/Fall2018-Project4-sec2-grp2
|
60e8294dd16dbf56896b61a9998535c5229f435e
|
736659e98f518351d0095f6c52c575cd6d721c65
|
refs/heads/master
| 2020-04-11T10:54:40.393531
| 2018-12-03T16:42:21
| 2018-12-03T16:42:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,096
|
r
|
replace.R
|
# Original Replaces2():Replaces_ori
#
# Replaces1 <- function(word, range) {
# N <- nchar(word)
# out <- list()
# for (letter in letters) {
# out[[letter]] <- rep(word, N)
# for (i in range) {
# substr(out[[letter]][i], i, i) <- letter
# }
# }
# out <- unique(unlist(out))
# return(out)
# }
#
# Replaces_ori <- function(word) {
# N <- nchar(word)
# word.new <- Replaces1(word, 1:N)
# out <- lapply(word.new, Replaces1,
# which(unlist(strsplit(word,"")) %in% unlist(strsplit(word.new,""))))
# out <- unique(unlist(out))
# return(out)
# }
letters_rep=rep(letters,each=length(letters))
Replaces2 <- function(word) {
N <- nchar(word)
if(N==1)
return(letters)
can_list=NULL
for(i in 1:(N-1)) {
for(j in (i+1):N) {
# cat(i,j,"\n")
can_list=c(can_list,paste0(substr(word,1,i-1),letters_rep,
substr(word,i+1,j-1),letters,
substr(word,j+1,N))
)
}
}
return(can_list)
}
# system.time(temp<-Replaces_ori("STAFF"))
# user system elapsed
# 2.47 0.00 2.50
# system.time (for(i in 1:100) {temp<-Replaces2("STAFF")} )
# user system elapsed
# 4.26 0.14 4.52
# Prunning Version, which is much fewer(16 to 10000 times) than candidates.
Prun_Replaces2 <- function(word) {
N <- nchar(word)
if(N==1)
return(letters)
## The prunning part, in trained confusion matrix, there are only several possible
## candidate letters, which could be mistaken as the given letter in OCR text.
letter_pos=vector("list",length=N)
for(i in 1:N){
letter_pos[[i]]=letters[ Cfs_matrix[substr(word,i,i),]>0 ]
}
can_list=NULL
for(i in 1:(N-1)) {
for(j in (i+1):N) {
# cat(i,j,"\n")
part1=paste0(substr(word,1,i-1),letter_pos[[i]])
part2=paste0(substr(word,i+1,j-1),letter_pos[[j]],substr(word,j+1,N))
cur_wordlist=c(outer(part1,part2,FUN=paste0))
can_list=c(can_list,cur_wordlist)
}
}
return(can_list)
}
|
bb6c7f0e03a54b0172945bf7dd3de7762576b319
|
4f8aebafb84b9046bcf2a7ee38eabf6438e05ee1
|
/2021/Sub 5- 2-2-21 HBCU Enrollment/HBCU_Enrollment_Process.R
|
78cb9c84e1e062cc240e8773633969536a1b1da5
|
[] |
no_license
|
geoffreymsmith/TidyTuesday
|
4459eebb7eae32f7d94041176ad4b8727861d276
|
da1afb8a037c18941a6ba3ee421d40c95eb65140
|
refs/heads/master
| 2023-09-02T15:40:11.654588
| 2021-10-27T11:41:43
| 2021-10-27T11:41:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,766
|
r
|
HBCU_Enrollment_Process.R
|
#===== Tidy Tuesday - HBCU - 2-2-21 =====#
# Library Load-in====
`Required Packages` <- c("tidytuesdayR","knitr","ggplot2","tidyverse","extrafont","glue","ggtext","textclean","scales","gganimate", "magick")
`New Packages` <- `Required Packages`[!(`Required Packages` %in% installed.packages()[,"Package"])]
if(length(`New Packages`)) install.packages(`New Packages`)
invisible(suppressPackageStartupMessages(suppressWarnings(lapply(`Required Packages`, require, character.only=T))))
# Data Load-in====
TTdata <- tidytuesdayR::tt_load('2021-02-02')
HBCUdata <- TTdata$hbcu_black
#Font Load-in===
loadfonts()
windowsFonts()
#Have the idea to make an animated donut chart comparing enrollment total between females and males across the years. Maybe an animated donut chart?.#
#Data Carpentry====
#Selecting what's needed, pivoting sex columns===
HBCUgenderenroll <- HBCUdata %>%
select(Year:Females) %>%
pivot_longer(cols = Males:Females, names_to = "Sex", values_to = "Enrollment")
#Calculating percentages, grouping by year and calculating cumsums (ymax)===
HBCUgenderenroll <- HBCUgenderenroll %>%
mutate(Percentage = Enrollment/`Total enrollment`) %>%
group_by(Year) %>%
mutate(ymax = cumsum(Percentage))
#Calculating the y mins for the donut chart plotting.===
HBCUgenderenroll <- HBCUgenderenroll %>%
mutate(ymin = c(0, head(ymax, n=-1)))
#Calculating the label positions for the donut chart===
HBCUgenderenroll <- HBCUgenderenroll %>%
mutate(donutlabels = (ymax + ymin) / 2 )
# Making the donut gif====
donutgif <- ggplot(HBCUgenderenroll, aes(ymax=ymax, ymin=ymin, xmax=5.5, xmin=3, fill=Sex)) +
scale_fill_manual(values = c("#a34986","#2396d9"))+
geom_rect() +
theme_void() +
theme(plot.background = element_rect(fill ="#000000"),
text = element_text(family = 'AvantGarde Bk BT'),
axis.title=element_blank(),
legend.position = "none")+
coord_polar(theta="y") +
xlim(c(0,6)) +
geom_text(data=HBCUgenderenroll,
aes(x=0, y=.75, label=Year),
fontface = "bold",
size=13,
family = "AvantGarde Bk BT",
color = "#ffffff") +
geom_text(data = HBCUgenderenroll,
aes(x=4.2, y=donutlabels, label = paste0(Sex,":\n", percent(Percentage, accuracy = .1))),
fontface = "bold",
size = 3,
color = "#ffffff")+
transition_states(Year,
transition_length = 3,
state_length = 20,
wrap = TRUE)+
ease_aes('circular-in-out')
#Saving the gif===
anim_save("donutgif.gif",donutgif)
#Reading in the gif===
donut <- image_read("donutgif.gif")
plotbase <- ggplot() +
theme_void() +
coord_cartesian(clip = "off")+
xlim(0,15)+
ylim(-40,15)+
theme(plot.background = element_rect(fill ="#000000"),
text = element_text(family = 'AvantGarde Bk BT'),
axis.title=element_blank(),
legend.position = "none",
plot.title = element_textbox_simple(
size = 23,
color = "#ffffff",
padding = margin(5,5,5,5),
margin = margin(19,0,10,0),
halign = .5,
family = "The Bambank Script"),
plot.subtitle = element_textbox_simple(
size = 12,
color = "#ffffff",
padding = margin(5,5,5,5),
margin = margin(20,0,20,0),
halign = .5),
plot.caption = element_textbox_simple(
size = 12,
color = "#ffffff",
padding = margin(5,5,5,5),
margin = margin(20,0,0,0),
halign = 1))+
labs(title = "<b> <span style = 'color:#a34986'>Battle</span> of the <span style = 'color:#2396d9'>Sexes</span></b>",
subtitle = paste0('<i><b>"Female & Male Proportions of Black HBCU Enrollees Through The Years"</i></b><br><br>',"From<b> ",min(HBCUgenderenroll$Year)," </b>to<b> ", max(HBCUgenderenroll$Year),"</b> an increasing shift in the reported amount of HBCU enrollees that identified as <span style = 'color:#a34986'><b>Female</b></span> has been observed in data provided by <span style = 'color:#2396d9'><b>Data.World</b></span> and the <span style = 'color:#2396d9'><b>National Center for Education Statistics (NCES)</b></span>. Previous literature suggests that this gap may be due to males having a lower academic performance in high school as well as other environmental factors.[1,2]"), caption = "Data Source: Data.World/NCES | Created By: @meghansharris")+
annotate(geom = "text",
label = "1) Gasman, M., Abiola, U., & Freeman, A. (2014). Gender Disparities at Historically Black Colleges and Universities.\n Higher Education Review, 47 (1), 56-76. Retrieved from http://repository.upenn.edu/gse_pubs/351 \n\n2) Richardson, S., Jones-Fosu, S., & Lewis, C. W. (2019). Black Men are Present: Examining Enrollment Patterns\nin Education Degree Programs. Journal of African American Males in Education, 10(1), spring 2019, 20-36.",
color = "#ffffff",
fontface = "italic",
size = 3,
family = 'AvantGarde Bk BT',
x = -Inf,
y = -Inf,
hjust = 0,
vjust = 0)
#Saving the plot===
ggsave("plotbase.png", dpi = 100, width = 7 , height = 9.7, units = "in")
#Loading in the plot===
base <- image_read("plotbase.png")
# Making the final image====
#Overlaying the gif and plot===
finalframes <- image_composite(base, donut, operator = "blend", offset = "+0,+700", gravity = "center")
#Compiling into a gif===
HBCUgif <- image_animate(finalframes , fps = 10)
#Final write-out===
image_write(HBCUgif, "HBCU.gif")
#Sound to let me know it's done because this is taking too long and I have a migraine and need to lay down :p
beepr::beep(sound = "Time-Bomb.wav" )
|
dbf52a9ac92af34d6a688ffae671feb7cc56ebe7
|
042873fd08a8aa3f934ac3649cd28393d6409b78
|
/man/simfixoutbreak.Rd
|
8bbb160ccba0e5b67a0eeb61940fd7faa9501bee
|
[] |
no_license
|
cran/seedy
|
4daeb506c8e247a12b8b834e1a03efd5fe3980f7
|
534000e6a1992a7fa78553281d7cc76e7663fbeb
|
refs/heads/master
| 2018-12-28T22:58:26.130196
| 2015-11-06T23:58:19
| 2015-11-06T23:58:19
| 21,715,650
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,993
|
rd
|
simfixoutbreak.Rd
|
\name{simfixoutbreak}
\alias{simfixoutbreak}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Simulate evolutionary dynamics on a given transmission tree
}
\description{
Simulate within-host evolutionary dynamics on top of an existing transmission tree and generate genomic samples.
}
\usage{
simfixoutbreak(ID,inf.times, rec.times, inf.source, mut.rate, equi.pop=10000, shape=flat,
inoc.size=1, imp.var=25, samples.per.time=1, samp.schedule="random",
samp.freq=500, full=FALSE, feedback=500, glen=100000,
ref.strain=NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ID}{
Vector of unique IDs.
}
\item{inf.times}{
Vector of (integer) infection times.
}
\item{rec.times}{
Vector of (integer) removal times.
}
\item{inf.source}{
Vector of infection sources. The \code{i}th entry corresponds to the ID of the source of infection. For importations, the source should be 0.
}
\item{mut.rate}{
Mutation rate (per genome per generation).
}
\item{equi.pop}{
Equilibrium effective population size of pathogens within-host.
}
\item{shape}{
Function describing the population growth dynamics. See Details.
}
\item{inoc.size}{
Size of pathogen inoculum at transmission.
}
\item{imp.var}{
The expected number of mutations separating unconnected importations.
}
\item{samples.per.time}{
Number of samples taken at sampling times.
}
\item{samp.schedule}{
How should sampling be conducted? Accepted values are: "calendar" - samples are taken from all current infectives every \code{samp.freq} generations; "individual" - samples are taken from each infective at intervals of \code{samp.freq} after infection; "random" - samples are taken at one time between infection and removal for each infective.
}
\item{full}{Should `full' genomic sampling be returned? That is, should a vector of genotypes and their respective frequencies be stored from each individual's sampling times?
}
\item{samp.freq}{
Number of generations between each sampling time (see \code{samp.schedule}).
}
\item{feedback}{
Number of generations between simulation updates returned to R interface.
}
\item{glen}{
Length of genome.
}
\item{ref.strain}{
Initial sequence. By default, a random sequence of length \code{glen}.
}
\item{...}{
Additional arguments to be passed to the \code{shape} function.
}
}
\details{
Population growth dynamics are defined by the function called by 'shape'. This function returns the expected population size at each time step, given the total simulation time. By default, the population is expected to grow exponentially until reaching an equilibrium level, specified by \code{equi.pop} (\code{\link{flat}}). Alternatively, the population can follow a sinusoidal growth curve, peaking at \code{runtime}/2 (\code{\link{hump}}). User-defined functions should be of the form \code{function(time,span,equi.pop,...)}, where \code{span} is equal to the duration of infection in this setting.
}
\value{
Returns a list of outbreak data:
\item{epidata}{A matrix of epidemiological data with columns: person ID, infection time, removal time, source of infection.}
\item{sampledata}{A matrix of genome samples with columns: person ID, sampling time, genome ID.}
\item{libr}{A list with an entry for each unique genotype observed. Each entry is a vector of mutation positions relative to the reference genome.}
\item{nuc}{A list with an entry for each unique genotype observed. Each entry is a vector of nucleotide types (integer between 1 and 4).}
\item{librstrains}{A vector of unique genotype IDs corresponding to the \code{libr} object.}
\item{endtime}{End time of the outbreak.}
}
\examples{
# Simulate a transmission chain
inf.times <- (0:20)*100
rec.times <- inf.times + 100 + rpois(21,50)
inf.source <- 0:20
inf.source[c(3,11)] <- 0 # Two importations
mut.rate <- 0.001
# Now simulate evolutionary dynamics and samples on top of this tree
W <- simfixoutbreak(ID=1:21, inf.times, rec.times, inf.source, mut.rate, equi.pop=1000, shape=flat,
inoc.size=10, imp.var=25, samples.per.time=5, samp.schedule="random",
samp.freq=500, full=FALSE, feedback=100, glen=100000,
ref.strain=NULL)
sampledata <- W$sampledata
epidata <- W$epidata
# Calculate distance matrix for observed samples
distmat <- gd(sampledata[,3], W$libr, W$nuc, W$librstrains)
# Now pick colors for sampled isolates
colvec <- rainbow(1200)[1:1000] # Color palette
refnode <- 1 # Compare distance to which isolate?
colv <- NULL # Vector of colors for samples
maxD <- max(distmat[,refnode])
for (i in 1:nrow(sampledata)) {
colv <- c(colv,
colvec[floor((length(colvec)-1)*(distmat[refnode,i])/maxD)+1])
}
plotoutbreak(epidata, sampledata, col=colv, stack=TRUE, arr.len=0.1,
blockheight=0.5, hspace=60, label.pos="left", block.col="grey",
jitter=0.004, xlab="Time", pch=1)
}
|
8d36eb8bed227baf19550ae4539f93116cde65a9
|
ca7f37df9f06afadc5522f9b289d09d2e20c3364
|
/ptcgo/tests/testthat/test_ordering.R
|
0aed1daff7051745f1dceecbf8a41f0118c5e872
|
[
"MIT"
] |
permissive
|
linnykos/pokemon_decklist
|
fb1b0e3058e13bd4bd6e6d2c2207532280cb3763
|
45da459016a7867dda8e1d121721e6e089a772eb
|
refs/heads/master
| 2021-06-22T19:34:20.645125
| 2017-08-30T20:14:42
| 2017-08-30T20:14:42
| 101,420,628
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,943
|
r
|
test_ordering.R
|
context("Test ordering")
## .ordering_trainer is correct
test_that(".ordering_trainer works", {
deck_file = "../assets/greninja.txt"
txt <- .file_reader(deck_file)
txt <- txt[grep("^\\* ", txt)]
txt_mat <- t(sapply(txt, .line_parser)); n <- nrow(txt_mat)
txt_mat <- .format_information(txt_mat, db_path = "../../data-raw/db.sqlite")
idx <- which(txt_mat$Type %in% c("Item", "Stadium", "Supporter"))
txt_mat <- txt_mat[idx,]
res <- .ordering_trainer(txt_mat)
expect_true(all(dim(res) == dim(txt_mat)))
idx1 <- which(res$Type == "Supporter")
idx2 <- which(res$Type == "Item")
idx3 <- which(res$Type == "Stadium")
expect_true(max(idx1) <= min(idx2))
expect_true(max(idx2) <= min(idx3))
expect_true(all(res$name[idx1] == sort(res$name[idx1])))
expect_true(all(res$name[idx2] == sort(res$name[idx2])))
expect_true(all(res$name[idx3] == sort(res$name[idx3])))
})
#######
## .order_number_name is correct
test_that(".order_number_name", {
txt_mat <- data.frame(num = c(2,8,10,8),
name = c("Water Energy", "Grass Energy",
"Dark Energy", "Electric Energy"))
res <- .order_number_name(txt_mat)
expect_true(all(res[,1] == c(10, 8, 8, 2)))
expect_true(all(res[,2] == c("Dark Energy", "Electric Energy", "Grass Energy",
"Water Energy")))
})
#######
## .ordering_energy is correct
test_that(".ordering_energy works", {
deck_file = "../assets/greninja.txt"
txt <- .file_reader(deck_file)
txt <- txt[grep("^\\* ", txt)]
txt_mat <- t(sapply(txt, .line_parser)); n <- nrow(txt_mat)
txt_mat <- .format_information(txt_mat, db_path = "../../data-raw/db.sqlite")
idx <- which(txt_mat$Type == "Energy")
txt_mat <- txt_mat[idx,]
res <- .ordering_energy(txt_mat)
expect_true(all(dim(res) == dim(txt_mat)))
expect_true(all(res$num == c(3,10)))
expect_true(all(res$name == c("Splash Energy", "Water Energy")))
})
|
c38803e33d22ab63991f7cbc0dc98ae70db51f4c
|
0ddaea2c0031ae8f8b6cf98bf693e700fe36e263
|
/Cachematrix.R
|
30a0ee3432b094d407264e44393d484680638da0
|
[] |
no_license
|
surojeetsadhu/Programming_Assignment2
|
cfa21fec64b52fdd53082c7881063629a942c1a8
|
e00900b733c309f087401bba709a3140e06d2c95
|
refs/heads/master
| 2020-12-30T17:50:57.602226
| 2017-04-08T09:06:35
| 2017-04-08T09:06:35
| 87,619,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,624
|
r
|
Cachematrix.R
|
# Programming_Assignment2
#Function “makeCacheMatrix” creates a special “matrix” object that can cache its inverse. makeCacheMatrix #contains 4 functions: set, get, setinverse, getinverse.
makeCacheMatrix <- function(x = matrix()) {
+ m <- NULL
+ set <- function(y) {
+ x <<- y
+ m <<- NULL
+ }
+ get <- function() x
+ setinverse <- function(solve) m <<- solve
+ getinverse <- function() m
+ list(set = set, get = get,
+ setinverse = setinverse,
+ getinverse = getinverse)
+ }
#Function “cacheSolve” computes the inverse of the special “matrix” returned by makeCacheMatrix.
If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve
the inverse from the cache. If the inverse has not been calculated, data gets the matrix stored with makeCacheMatrix,
m calculates the inverse, and x$inverse(m) stores it in the object m in makeCacheMatrix.#
cacheSolve <- function(x, ...) {
+ m <- x$getinverse()
+ if(!is.null(m)) {
+ message("getting cached data")
+ return(m)
+ }
+ data <- x$get()
+ m <- solve(data, ...)
+ x$setinverse(m)
+ m
+ }
># Applying data
> suro<-diag(6,3)
> suro
[,1] [,2] [,3]
[1,] 6 0 0
[2,] 0 6 0
[3,] 0 0 6
> Cache_Marix <- makeCacheMatrix(suro)
> cacheSolve(Cache_Marix)
[,1] [,2] [,3]
[1,] 0.1666667 0.0000000 0.0000000
[2,] 0.0000000 0.1666667 0.0000000
[3,] 0.0000000 0.0000000 0.1666667
># On applying the above functions , getting the cached data. Thanks for reviewing.
|
635623f625679166cc47bcf262f5d1504a8f0f5e
|
fd0272f85cc81991dead59474fa1f39c3c6e8d50
|
/R/generation_genotype.R
|
aec754133ebdce6e91abb9e97c83210e694213db
|
[] |
no_license
|
jyc7385/infolab7
|
2f614d4f56697777504d15ab21fac703e0f243f0
|
7259038f8472e6e7af93017762043606a2f566f9
|
refs/heads/master
| 2020-07-08T08:50:21.859802
| 2019-08-23T13:34:31
| 2019-08-23T13:34:31
| 203,624,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
generation_genotype.R
|
#' genotype data
#'
#' genotype data
#'
#' @export
genotype <- function(n,samp,p)
{
geno <- matrix(data=1, nrow=n, ncol=samp)
for(i in 1: n)
geno[i,] <-sample(-1:1,samp, replace=TRUE,prob=c((1-p)*(1-p),2*p*(1-p),p*p))
write.table(geno, file="sample_genotype.txt", row.names=F, col.names=F, quote=F,sep="\t")
return(geno)
}
|
76879a1264db5d74430f6a90cd3d28fb19284326
|
4e27985b0d0eaeb9c7188883309f5c01b08f0827
|
/man/get_comps.Rd
|
02aab2d51e353abab278996d46ecd71b168f8632
|
[] |
no_license
|
nxskok/poistan
|
6064803d61785c7fb2ac2037ec602187d3766cc4
|
b352b66d79a16d305df6c3ba371874a7fc5c08f6
|
refs/heads/master
| 2020-06-01T07:19:27.814275
| 2015-05-17T17:26:10
| 2015-05-17T17:26:10
| 34,591,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
rd
|
get_comps.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_comps}
\alias{get_comps}
\title{get several comps and glue results into one data frame}
\usage{
get_comps(v)
}
\arguments{
\item{v}{vector of competition IDs}
}
\value{
data frame of games like \code{get_comp}
}
\description{
get several comps and glue results into one data frame
}
|
ab8db2667af3334678d5f85b9959b05b2718b704
|
7155051ae17b48d508236e2163243f99ea8ffc1c
|
/man/is.posint.Rd
|
da51350077356e312fa64d7e156e8bbf4afc70a1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
RInterested/R-numtheory
|
d1679d59814fc79619a2477ffac2e7d06c590464
|
65158ebf3d5958887f0ac9e8250712de57df0719
|
refs/heads/master
| 2023-03-18T00:10:01.227526
| 2018-06-17T14:10:39
| 2018-06-17T14:10:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
rd
|
is.posint.Rd
|
\name{is.posint}
\alias{is.posint}
\title{Check for Positive Integer Number}
\description{
Checks whether n is a positive integer number.
}
\usage{
is.posint(n)
}
\arguments{
\item{n}{Numeric}
}
\examples{
is.posint(2) # TRUE
is.posint(2.5) # FALSE
is.posint(-1) #FALSE
}
|
2352c15abc4b987ea7abf65d7e90883cbe295c17
|
4d66fc0072249d922788405f6cde901601ad6ee8
|
/proyecto/dimensionFractal.R
|
39907e0c8917f99ccf70c681dcd76c7b37178be3
|
[] |
no_license
|
eduardovaldesga/SimulacionSistemas
|
d2fc3cf2da7f5be0fee2ff1b8bd59a8a6805dde7
|
ef91a86ade3f02b4842274344373609dbc9d9edf
|
refs/heads/master
| 2021-01-15T10:58:00.480996
| 2017-12-12T06:05:52
| 2017-12-12T06:05:52
| 99,603,378
| 0
| 3
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,270
|
r
|
dimensionFractal.R
|
require(parallel)
require(data.table)
cuantos.cubos=function(tab,grid.x,grid.y,grid.z){
paso.x=grid.x[2]-grid.x[1]
paso.y=grid.y[2]-grid.y[1]
paso.z=grid.z[2]-grid.z[1]
eps=0.000001
cluster <- makeCluster(detectCores() - 1)
clusterExport(cluster,c("tab","grid.y","grid.z","eps","paso.x","paso.y","paso.z"),envir=environment())
dim=parSapply(cluster,grid.x,function(xx){
cuantos=0
area=0
var=0
orden.x=subset(tab,x+eps>=xx & x< xx+paso.x)
for(yy in grid.y){
orden.y=subset(orden.x,y+eps>=yy & y<yy+paso.y)
if(dim(orden.y)[1]>0){
for(zz in grid.z){
orden.z=subset(orden.y,z+eps>=zz & z<zz+paso.z)
if(dim(orden.z)[1]>0){
lin=lm(data=orden.z,z~x+y)
coeff=lin$coefficients
n.area=sqrt((1+coeff[2]^2)*(1+coeff[3]^2))*paso.x*paso.y
if(is.na(n.area))n.area=0
area=area+n.area
cuantos=cuantos+1
if(dim(orden.z)[1]>3){
n.var=sqrt(1/(dim(orden.z)[1]-3)*sum(orden.z$residuals^2))
if(is.na(n.var))n.var=0
}else{
n.var=0
}
var=var+n.var
}
}
}
}
return(c(cuantos,area,var))
})
stopCluster(cluster)
res=c(box.count=sum(dim[1,]),area=sum(dim[2,]),var=mean(dim[3,]))
return(res)
}
dim.fractal=function(tab,num.puntos=4,return.puntos=F,plot.log=T){
particiones=10
puntos=data.frame()
tab=as.data.table(tab)
names(tab)=c('x','y','z')
lin=lm(data=tab,z~x+y)
tab$residuals=lin$residuals
for(i in 1:num.puntos){
#hacer malla
grid.x=seq(min(tab$x),max(tab$x),length.out = (2^(i-1))*particiones)
grid.y=seq(min(tab$y),max(tab$y),length.out = (2^(i-1))*particiones)
grid.z=seq(min(tab$z),max(tab$z),length.out = (2^(i-1))*particiones)
puntos=rbind(puntos,c((2^(i-1)*particiones),1/(2^(i-1)*particiones),cuantos.cubos(tab,grid.x,grid.y,grid.z)))
}
names(puntos)=c('particiones','escala','box.count','area','var')
#calcular dimensión
puntos.log=log2(puntos)
reg1=lm(box.count~particiones,puntos.log)
reg2=lm(area~escala,puntos.log)
reg3=lm(var~particiones,puntos.log)
if(plot.log){
#Kolmogorov
plot(puntos.log[,c(1,3)],main='Conteo de cajas',xlab='log particiones',ylab='log número de cajas',pch=19)
abline(reg1$coefficients)
legend("bottomright",paste("pendiente:",format(round(reg1$coefficients[[2]], 2), nsmall = 2)))
#Richardson
plot(puntos.log[,c(2,4)],main='Richardson',xlab='log escala',ylab='log area',pch=19)
abline(reg2$coefficients)
legend("bottomleft",paste("pendiente:",format(round(reg2$coefficients[[2]], 2), nsmall = 2)))
plot(puntos.log[,c(1,5)],main='Variograma',xlab='log particiones',ylab='log variación',pch=19)
abline(reg3$coefficients)
legend("bottomright",paste("pendiente:",format(round(reg3$coefficients[[2]], 2), nsmall = 2)))
}
res=c(box.count=reg1$coefficients[2],area=2+reg2$coefficients[2],variograma=3-reg3$coefficients[2]/2)
if(!return.puntos){
return(res)
}else{
return(list(puntos,res))
}
}
|
d200090455fc5bdfef314c3161d320424faee625
|
3bede8db604c126812924f6fd581df8e0de92a80
|
/R/datasets.R
|
a3c2b3ef6c1f7057cc80cc39c08afbb92b773344
|
[] |
no_license
|
cran/rnaturalearth
|
6fdc30a296d8f1d090ac9106a861098ee78a6d58
|
e96cf44deb11aa52b9defa90d30875ad27def90e
|
refs/heads/master
| 2023-09-01T05:02:49.336943
| 2023-08-21T09:10:02
| 2023-08-21T10:32:49
| 85,758,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 925
|
r
|
datasets.R
|
#' world country polygons from Natural Earth
#'
#' at 1:110m scale (small). Other data and resolutions are in the packages
#' rnaturalearthdata and rnaturalearthhires.
#'
#' @format A \code{SpatialPolygonsDataFrame}
#'
#' @slot data A data frame with country attributes
#'
#' @aliases countries110
#'
#' @name countries
NULL
#'
#' @source
#' \url{https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/cultural/ne_10m_admin_0_countries.zip}
#'
#' @rdname countries
"countries110"
#'
#' list of cultural layers available from Natural Earth
#'
#' @format A \code{DataFrame}
#' @aliases df_layers_cultural
#' @name df_layers_cultural
#'
NULL
#' @rdname df_layers_cultural
"df_layers_cultural"
#' list of physical layers available from Natural Earth
#'
#'
#' @format A \code{DataFrame}
#' @aliases df_layers_physical
#' @name df_layers_physical
#'
NULL
#' @rdname df_layers_physical
"df_layers_physical"
|
1887746d7e5fe5659db2c939c041d0cfafce163d
|
51b599c68b27793c1ab865f0c4ba1b2129dffbd0
|
/neural1.R
|
7c32eeda0758af68a3b03dc5807388083d19dfa1
|
[] |
no_license
|
joancardonasa/twidiction
|
5b815a5ae1b973dd18b0a966d1be2ed242659573
|
d25569fbe19d59cf59ecd1f2cb7d8c71eb58c96d
|
refs/heads/master
| 2021-01-17T08:08:45.774475
| 2016-06-27T20:01:34
| 2016-06-27T20:01:34
| 62,075,230
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 1,560
|
r
|
neural1.R
|
# First neural net
#library(neuralnet)
setwd("C:/Users/Joan/Desktop/TFG")
library(dplyr)
data <- read.csv("dataset.csv")
data <- select(data, positive, negative, neutral, Open, High, Low, Close)# SELECT TAMBE ES DE NEURAL NET!!!!
y <- as.numeric(data$Close[2:length(data$Close)] > data$Close[1:(length(data$Close) - 1)])
#data$y <- 1:59
#data$y[2:59] <- y # output variable
data <- scale(data) # normalization
# m = 59, n = 7 + 1 output
output <- vector(length = 59)
output[1] <- 1 # del 16/2 al 17/2 subió 300
output[2:59] <- y
data <- cbind(data, output)
summa <- 0
var <- 39 #39 iterations
k <- 15
l <- 4
# Primero intento, sin hacer el lag de 16 dias
library(neuralnet)
for(i in 1:var){
train <- data[(i):(i+k),1:8] #feature dataset
test <- data[(i+k+1):(i+k+1+l),1:7]
answers <- data[(i+k+1):(i+k+1+l),8]
# do the algorithm
net <- neuralnet(output ~ positive + negative + neutral + Open + High + Low + Close, train,
hidden = 7,
lifesign = "minimal",
linear.output = FALSE, threshold = 0.1) # this trains the model
results <- compute(net, test)
results_fin <- data.frame(actual = answers, prediction = results$net.result)
# cada iteració de les 39 té 5 proves -> 39 * 5 = 195. La suma d'encerts es divideix entre 195
# ara 49 *2
results_fin$prediction <- round(results_fin$prediction)
summa <- summa + sum(results_fin[,1] == results_fin[,2])
}
detach(package:neuralnet)
detach(package:dplyr)
print(summa/195)
# summa/195 = 0,67
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.